未验证 提交 2981ca73 编写于 作者: C cheng cheng 提交者: GitHub

Reduce unittest cost (#4253)

* reduce unittest cost

* format

* refactor unittest for reduce cost

* refine code for review

* moments test double

* rollback top k shape dim

* fix axis err in test_prelu
Co-authored-by: Noneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
上级 2c598060
......@@ -49,7 +49,6 @@ def compare_with_tensorflow(device_type, activation_type, shape, data_type):
"relu": tf.nn.relu,
"sigmoid": tf.math.sigmoid,
"tanh": tf.math.tanh,
# "gelu": tfa.activations.gelu,
}
@flow.global_function(type="train", function_config=func_config)
......@@ -82,8 +81,8 @@ def compare_with_tensorflow(device_type, activation_type, shape, data_type):
loss_diff = test_global_storage.Get("loss_diff")
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
rtol = 1e-3 if activation_type is "gelu" else 1e-5
atol = 1e-3 if activation_type is "gelu" else 1e-5
rtol = 1e-5
atol = 1e-5
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol, atol)
assert np.allclose(test_global_storage.Get("x_diff"), tf_x_diff.numpy(), rtol, atol)
......@@ -93,16 +92,15 @@ class TestActivations(flow.unittest.TestCase):
def test_activations(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
# arg_dict["activation_type"] = ["relu", "sigmoid", "tanh", "gelu"]
arg_dict["activation_type"] = ["relu", "sigmoid", "tanh"]
arg_dict["shape"] = [(1024, 1024)]
arg_dict["shape"] = [(64, 64)]
arg_dict["data_type"] = [flow.float, flow.double]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if os.getenv("ONEFLOW_TEST_CPU_ONLY") is None:
for act_type in arg_dict["activation_type"]:
compare_with_tensorflow("gpu", act_type, (1024, 1024), flow.float16)
compare_with_tensorflow("gpu", act_type, (64, 64), flow.float16)
if __name__ == "__main__":
......
......@@ -21,6 +21,7 @@ import oneflow as flow
import tensorflow as tf
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
import oneflow.typing as oft
import os
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
......@@ -60,23 +61,21 @@ def gen_arg_list():
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["in_shape"] = [
(100,),
(100, 100),
(1000, 1000),
(10, 10, 2000),
(10, 10000),
(10, 10, 20),
(10, 1000),
]
arg_dict["axis"] = [-1]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
arg_dict["data_type"] = ["double", "int64"]
return GenArgList(arg_dict)
def gen_arg_list_for_test_axis():
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["device_type"] = ["gpu"]
arg_dict["in_shape"] = [(10, 10, 20, 30)]
arg_dict["axis"] = [-2, -1, 0, 1, 2]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
arg_dict["axis"] = [-2, 0, 1, 2]
arg_dict["data_type"] = ["float32", "int32"]
return GenArgList(arg_dict)
......@@ -86,6 +85,9 @@ class TestArgmax(flow.unittest.TestCase):
def test_argmax(test_case):
for arg in gen_arg_list():
compare_with_tensorflow(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_argmax_gpu(test_case):
for arg in gen_arg_list_for_test_axis():
compare_with_tensorflow(*arg)
......
......@@ -21,6 +21,7 @@ import oneflow as flow
import tensorflow as tf
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
import oneflow.typing as oft
import os
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
......@@ -57,30 +58,34 @@ def compare_with_tensorflow(device_type, in_shape, axis, direction, data_type):
def gen_arg_list():
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["in_shape"] = [(100,), (100, 100), (10, 10, 200)]
arg_dict["in_shape"] = [(10,), (10, 10, 20)]
arg_dict["axis"] = [-1]
arg_dict["direction"] = ["ASCENDING", "DESCENDING"]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
arg_dict["data_type"] = ["double", "int32"]
return GenArgList(arg_dict)
def gen_arg_list_for_test_axis():
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["device_type"] = ["gpu"]
arg_dict["in_shape"] = [(10, 10, 20)]
arg_dict["axis"] = [-2, -1, 0, 1, 2]
arg_dict["axis"] = [-2, 0, 2]
arg_dict["direction"] = ["ASCENDING", "DESCENDING"]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
arg_dict["data_type"] = ["float32", "int64"]
return GenArgList(arg_dict)
@flow.unittest.skip_unless_1n1d()
class TestArgsort(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_argsort(test_case):
for arg in gen_arg_list():
compare_with_tensorflow(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_argsort_gpu(test_case):
for arg in gen_arg_list_for_test_axis():
compare_with_tensorflow(*arg)
......
......@@ -16,6 +16,7 @@ limitations under the License.
import numpy as np
import unittest
from collections import OrderedDict
import os
import oneflow as flow
......@@ -158,6 +159,7 @@ def _dynamic_multi_iter_compare(
@flow.unittest.skip_unless_1n1d()
class TestArgwhere(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_argwhere(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(10), (30, 4), (8, 256, 20)]
......@@ -169,6 +171,7 @@ class TestArgwhere(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_argwhere_multi_iter(test_case):
arg_dict = OrderedDict()
arg_dict["iter_num"] = [2]
......@@ -183,6 +186,7 @@ class TestArgwhere(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n4d()
class TestArgwhere4D(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_argwhere(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(10, 5)]
......
......@@ -545,6 +545,7 @@ class TestBatchNormalization(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
CompareBnWithTensorFlow(test_case, **arg, training=False, trainable=False)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_layer_batchnorm_trainable_without_training(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
......@@ -562,6 +563,7 @@ class TestBatchNormalization(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
CompareBnWithTensorFlow(test_case, **arg, training=False, trainable=True)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_nn_batchnorm(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
......@@ -572,6 +574,7 @@ class TestBatchNormalization(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
CompareNnBnWithTensorFlow(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_batchnorm_fp16(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
......@@ -620,7 +623,7 @@ class TestBatchNormalization(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_batchnorm_add_relu(test_case):
arg_dict = OrderedDict()
arg_dict["input_shape"] = [(12, 16, 24, 32), (5, 7, 9, 11)]
arg_dict["input_shape"] = [(5, 7, 9, 11)]
arg_dict["axis"] = [0, 1, 2, 3]
arg_dict["data_type"] = [flow.float32, flow.float16]
for arg in GenArgDict(arg_dict):
......@@ -629,7 +632,7 @@ class TestBatchNormalization(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_batchnorm_relu(test_case):
arg_dict = OrderedDict()
arg_dict["input_shape"] = [(12, 16, 24, 32), (5, 7, 9, 11)]
arg_dict["input_shape"] = [(12, 16, 24, 32)]
arg_dict["axis"] = [0, 1, 2, 3]
arg_dict["data_type"] = [flow.float32, flow.float16]
for arg in GenArgDict(arg_dict):
......
......@@ -20,16 +20,11 @@ import numpy as np
import oneflow as flow
from test_util import GenArgList
import oneflow.typing as oft
import os
def _test_split_to_split(
test_case,
src_device_type,
dst_device_type,
src_device_num,
dst_device_num,
src_axis,
dst_axis,
test_case, src_device_type, dst_device_type, src_axis, dst_axis,
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
......@@ -37,27 +32,31 @@ def _test_split_to_split(
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def split_to_split_job(x: oft.Numpy.Placeholder((96, 96))):
def build_s2s(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.split(src_axis)))
src = flow.identity(
input_blob.with_distribute(flow.distribute.split(src_axis))
)
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
@flow.global_function(function_config=func_config)
def split_to_split_job(input_blob: oft.Numpy.Placeholder((96, 96))):
result_list = []
for i in (1, 2, 3):
for j in (1, 2, 3):
result_list.append(build_s2s(input_blob, i, j))
return tuple(result_list)
x = np.random.rand(96, 96).astype(np.float32)
y = split_to_split_job(x).get().numpy()
test_case.assertTrue(np.array_equal(x, y))
result_tuple = split_to_split_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.array_equal(x, out.numpy()))
def _test_split_to_split_enable_all_to_all(
test_case,
src_device_type,
dst_device_type,
src_device_num,
dst_device_num,
src_axis,
dst_axis,
test_case, src_device_type, dst_device_type, src_device_num, dst_device_num,
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
......@@ -66,26 +65,33 @@ def _test_split_to_split_enable_all_to_all(
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def split_to_split_job(x: oft.Numpy.Placeholder((32, 16, 64, 48))):
def build_s2s_all2all(input_blob, src_axis, dst_axis):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.split(src_axis)))
src = flow.identity(
input_blob.with_distribute(flow.distribute.split(src_axis))
)
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
@flow.global_function(function_config=func_config)
def split_to_split_all2all_job(input_blob: oft.Numpy.Placeholder((32, 16, 64, 48))):
result_list = []
for i in (0, 1, 2, 3):
for j in (0, 1, 2, 3):
if i == j:
continue
result_list.append(build_s2s_all2all(input_blob, i, j))
return tuple(result_list)
x = np.random.rand(32, 16, 64, 48).astype(np.float32)
y = split_to_split_job(x).get().numpy()
test_case.assertTrue(np.array_equal(x, y))
result_tuple = split_to_split_all2all_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.array_equal(x, out.numpy()))
def _test_split_to_broadcast(
test_case,
src_device_type,
dst_device_type,
src_device_num,
dst_device_num,
src_axis,
test_case, src_device_type, dst_device_type, src_axis,
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
......@@ -93,26 +99,31 @@ def _test_split_to_broadcast(
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def split_to_broadcast_job(x: oft.Numpy.Placeholder((96, 96))):
def build_s2b(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.split(src_axis)))
src = flow.identity(
input_blob.with_distribute(flow.distribute.split(src_axis))
)
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
@flow.global_function(function_config=func_config)
def split_to_broadcast_job(input_blob: oft.Numpy.Placeholder((96, 96))):
result_list = []
for i in (1, 2, 3):
for j in (1, 2, 3):
result_list.append(build_s2b(input_blob, i, j))
return tuple(result_list)
x = np.random.rand(96, 96).astype(np.float32)
y = split_to_broadcast_job(x).get().numpy()
test_case.assertTrue(np.array_equal(x, y))
result_tuple = split_to_broadcast_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.array_equal(x, out.numpy()))
def _test_broadcast_to_split(
test_case,
src_device_type,
dst_device_type,
src_device_num,
dst_device_num,
dst_axis,
test_case, src_device_type, dst_device_type, dst_axis,
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
......@@ -120,26 +131,29 @@ def _test_broadcast_to_split(
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def broadcast_to_split_job(x: oft.Numpy.Placeholder((96, 96))):
def build_b2s(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.broadcast()))
src = flow.identity(input_blob.with_distribute(flow.distribute.broadcast()))
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
@flow.global_function(function_config=func_config)
def broadcast_to_split_job(input_blob: oft.Numpy.Placeholder((96, 96))):
result_list = []
for i in (1, 2, 3):
for j in (1, 2, 3):
result_list.append(build_b2s(input_blob, i, j))
return tuple(result_list)
x = np.random.rand(96, 96).astype(np.float32)
y = broadcast_to_split_job(x).get().numpy()
test_case.assertTrue(np.array_equal(x, y))
result_tuple = broadcast_to_split_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.array_equal(x, out.numpy()))
def _test_partial_sum_to_split(
test_case,
src_device_type,
dst_device_type,
src_device_num,
dst_device_num,
dst_axis,
test_case, src_device_type, dst_device_type, dst_axis,
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
......@@ -147,63 +161,83 @@ def _test_partial_sum_to_split(
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def partial_sum_to_split_job(x: oft.Numpy.Placeholder((96, 96, 96))):
def build_p2s(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.split(0)))
src = flow.identity(input_blob.with_distribute(flow.distribute.split(0)))
src = flow.math.reduce_sum(src, axis=0)
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
@flow.global_function(function_config=func_config)
def partial_sum_to_split_job(input_blob: oft.Numpy.Placeholder((96, 96, 96))):
result_list = []
for i in (2, 3):
for j in (1, 2, 3):
result_list.append(build_p2s(input_blob, i, j))
return tuple(result_list)
x = np.random.uniform(-1e-5, 1e-5, (96, 96, 96)).astype(np.float32)
y = partial_sum_to_split_job(x).get().numpy()
test_case.assertTrue(np.allclose(np.sum(x, axis=0), y))
result_tuple = partial_sum_to_split_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.allclose(np.sum(x, axis=0), out.numpy()))
def _test_partial_sum_to_broadcast(
test_case, src_device_type, dst_device_type, src_device_num, dst_device_num
):
def _test_partial_sum_to_broadcast(test_case, src_device_type, dst_device_type):
flow.clear_default_session()
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def partial_sum_to_broadcast_job(x: oft.Numpy.Placeholder((96, 96, 96))):
def build_p2b(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.split(0)))
src = flow.identity(input_blob.with_distribute(flow.distribute.split(0)))
src = flow.math.reduce_sum(src, axis=0)
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
@flow.global_function(function_config=func_config)
def partial_sum_to_broadcast_job(input_blob: oft.Numpy.Placeholder((96, 96, 96))):
result_list = []
for i in (2, 3):
for j in (1, 2, 3):
result_list.append(build_p2b(input_blob, i, j))
return tuple(result_list)
x = np.random.uniform(-1e-5, 1e-5, (96, 96, 96)).astype(np.float32)
y = partial_sum_to_broadcast_job(x).get().numpy()
test_case.assertTrue(np.allclose(np.sum(x, axis=0), y))
result_tuple = partial_sum_to_broadcast_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.allclose(np.sum(x, axis=0), out.numpy()))
def _test_broadcast_to_broadcast(
test_case, src_device_type, dst_device_type, src_device_num, dst_device_num
):
def _test_broadcast_to_broadcast(test_case, src_device_type, dst_device_type):
flow.clear_default_session()
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def broadcast_to_broadcast_job(x: oft.Numpy.Placeholder((96, 96, 96))):
def build_b2b(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(x.with_distribute(flow.distribute.broadcast()))
src = flow.identity(input_blob.with_distribute(flow.distribute.broadcast()))
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
x = np.random.uniform(-1e-5, 1e-5, (96, 96, 96)).astype(np.float32)
y = broadcast_to_broadcast_job(x).get().numpy()
test_case.assertTrue(np.array_equal(x, y))
@flow.global_function(function_config=func_config)
def broadcast_to_broadcast_job(input_blob: oft.Numpy.Placeholder((96, 96))):
result_list = []
for i in (1, 2, 3):
for j in (1, 2, 3):
result_list.append(build_b2b(input_blob, i, j))
return tuple(result_list)
x = np.random.rand(96, 96).astype(np.float32)
result_tuple = broadcast_to_broadcast_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.array_equal(x, out.numpy()))
def _test_multi_lbi(
......@@ -240,80 +274,70 @@ def _test_multi_lbi(
@flow.unittest.skip_unless_1n4d()
class TestBoxingV2(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_split_to_split(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["src_device_num"] = [1, 2, 3]
arg_dict["dst_device_num"] = [1, 2, 3]
arg_dict["src_axis"] = [0, 1]
arg_dict["dst_axis"] = [0, 1]
for arg in GenArgList(arg_dict):
_test_split_to_split(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_split_to_split_all_to_all(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["gpu"]
arg_dict["dst_device_type"] = ["gpu"]
arg_dict["src_device_num"] = [4]
arg_dict["dst_device_num"] = [4]
arg_dict["src_axis"] = [0, 1, 2, 3]
arg_dict["dst_axis"] = [0, 1, 2, 3]
for arg in GenArgList(arg_dict):
(_, _, _, _, src_axis, dst_axis) = arg
if src_axis == dst_axis:
continue
_test_split_to_split_enable_all_to_all(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_split_to_broadcast(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["src_device_num"] = [1, 2, 3]
arg_dict["dst_device_num"] = [1, 2, 3]
arg_dict["src_axis"] = [0, 1]
for arg in GenArgList(arg_dict):
_test_split_to_broadcast(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_broadcast_to_split(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["src_device_num"] = [1, 2, 3]
arg_dict["dst_device_num"] = [1, 2, 3]
arg_dict["dst_axis"] = [0, 1]
for arg in GenArgList(arg_dict):
_test_broadcast_to_split(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_partial_sum_to_split(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["src_device_num"] = [2, 3]
arg_dict["dst_device_num"] = [1, 2, 3]
arg_dict["dst_axis"] = [0, 1]
for arg in GenArgList(arg_dict):
_test_partial_sum_to_split(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_partial_sum_to_broadcast(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["src_device_num"] = [2, 3]
arg_dict["dst_device_num"] = [1, 2, 3]
for arg in GenArgList(arg_dict):
_test_partial_sum_to_broadcast(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_broadcast_to_broadcast(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["src_device_num"] = [1, 2, 3]
arg_dict["dst_device_num"] = [1, 2, 3]
for arg in GenArgList(arg_dict):
_test_broadcast_to_broadcast(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_multi_lbi(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
......
......@@ -20,6 +20,7 @@ import numpy as np
import oneflow as flow
from test_util import GenArgList, type_name_to_flow_type
import oneflow.typing as oft
import os
def func_equal(a, b):
......@@ -46,10 +47,6 @@ def func_less_equal(a, b):
return a <= b
# def func_logical_and(a, b):
# return a & b
def np_array(dtype, shape):
if dtype == flow.int8:
return np.random.randint(0, 127, shape).astype(np.int8)
......@@ -65,82 +62,80 @@ def np_array(dtype, shape):
assert False
def GenerateTest(
test_case, func, a_shape, b_shape, dtype=flow.int32, device_type="cpu"
):
def GenerateTest(test_case, a_shape, b_shape, dtype=flow.int32, device_type="gpu"):
func_config = flow.FunctionConfig()
func_config.default_data_type(dtype)
@flow.global_function(function_config=func_config)
def ModJob1(a: oft.Numpy.Placeholder(a_shape, dtype=dtype)):
with flow.scope.placement(device_type, "0:0"):
return func(a, a)
@flow.global_function(function_config=func_config)
def ModJob2(
def MyTestJob(
a: oft.Numpy.Placeholder(a_shape, dtype=dtype),
b: oft.Numpy.Placeholder(b_shape, dtype=dtype),
):
with flow.scope.placement(device_type, "0:0"):
return func(a, b)
equal_out = func_equal(a, b)
not_equal_out = func_not_equal(a, b)
greater_than_out = func_greater_than(a, b)
greater_equal_out = func_greater_equal(a, b)
less_than_out = func_less_than(a, b)
less_equal_out = func_less_equal(a, b)
return (
equal_out,
not_equal_out,
greater_than_out,
greater_equal_out,
less_than_out,
less_equal_out,
)
a = np_array(dtype, a_shape)
b = np_array(dtype, b_shape)
y = ModJob1(a).get().numpy()
test_case.assertTrue(np.array_equal(y, func(a, a)))
y = ModJob2(a, b).get().numpy()
test_case.assertTrue(np.array_equal(y, func(a, b)))
(
equal_out,
not_equal_out,
greater_than_out,
greater_equal_out,
less_than_out,
less_equal_out,
) = MyTestJob(a, b).get()
test_case.assertTrue(np.array_equal(equal_out.numpy(), func_equal(a, b)))
test_case.assertTrue(np.array_equal(not_equal_out.numpy(), func_not_equal(a, b)))
test_case.assertTrue(
np.array_equal(greater_than_out.numpy(), func_greater_than(a, b))
)
test_case.assertTrue(
np.array_equal(greater_equal_out.numpy(), func_greater_equal(a, b))
)
test_case.assertTrue(np.array_equal(less_than_out.numpy(), func_less_than(a, b)))
test_case.assertTrue(np.array_equal(less_equal_out.numpy(), func_less_equal(a, b)))
flow.clear_default_session()
@flow.unittest.skip_unless_1n1d()
class TestBroadcastLogicalOps(flow.unittest.TestCase):
def test_naive(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(function_config=func_config)
def ModJob(a: oft.Numpy.Placeholder((5, 2)), b: oft.Numpy.Placeholder((5, 2))):
return a == b
x = np.random.rand(5, 2).astype(np.float32)
y = np.random.rand(5, 2).astype(np.float32)
z = ModJob(x, y).get().numpy()
r = func_equal(x, y)
test_case.assertTrue(np.array_equal(z, x == y))
flow.clear_default_session()
def test_broadcast(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(function_config=func_config)
def ModJob(a: oft.Numpy.Placeholder((5, 2)), b: oft.Numpy.Placeholder((1, 2))):
return a == b
x = np.random.rand(5, 2).astype(np.float32)
y = np.random.rand(1, 2).astype(np.float32)
z = None
z = ModJob(x, y).get().numpy()
test_case.assertTrue(np.array_equal(z, x == y))
flow.clear_default_session()
def test_broadcast_logical(test_case):
def test_broadcast_logical_cpu(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["func"] = [
func_equal,
func_not_equal,
func_greater_than,
func_greater_equal,
func_less_than,
func_less_than,
arg_dict["a_shape"] = [(64, 64)]
arg_dict["b_shape"] = [(1, 64)]
arg_dict["data_type"] = [
flow.int32,
flow.float,
]
arg_dict["device_type"] = ["cpu"]
for arg in GenArgList(arg_dict):
if len(arg[1]) < len(arg[2]):
continue
GenerateTest(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_broadcast_logical_gpu(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["a_shape"] = [(64, 64), (64, 64, 64)]
arg_dict["b_shape"] = [(1, 64), (64, 1), (64, 1, 64), (1, 64, 1)]
arg_dict["b_shape"] = [(1, 64), (1, 64, 1)]
arg_dict["data_type"] = [
flow.int8,
flow.int32,
......@@ -148,27 +143,13 @@ class TestBroadcastLogicalOps(flow.unittest.TestCase):
flow.float,
flow.double,
]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["device_type"] = ["gpu"]
for arg in GenArgList(arg_dict):
if arg[5] == "cpu" and arg[4] == "float16":
continue
if len(arg[2]) < len(arg[3]):
if len(arg[1]) < len(arg[2]):
continue
GenerateTest(*arg)
def test_xy_mod_x1(test_case):
GenerateTest(test_case, func_less_than, (64, 64), (64, 1), flow.int8)
def test_xy_mod_1y(test_case):
GenerateTest(test_case, func_greater_than, (64, 64), (1, 64))
def test_xyz_mod_x1z(test_case):
GenerateTest(test_case, func_equal, (64, 64, 64), (64, 1, 64))
def test_xyz_mod_1y1(test_case):
GenerateTest(test_case, func_not_equal, (64, 64, 64), (1, 64, 1))
if __name__ == "__main__":
unittest.main()
......@@ -24,6 +24,7 @@ from test_util import (
type_name_to_np_type,
)
import oneflow.typing as oft
import os
def _test_fused_scale_tril_fw_bw(
......@@ -99,6 +100,7 @@ def _test_fused_scale_tril_fw_bw(
@flow.unittest.skip_unless_1n1d()
class TestFusedScaleTril(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_fused_scale_tril_fw_bw(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["gpu"]
......@@ -109,13 +111,11 @@ class TestFusedScaleTril(flow.unittest.TestCase):
"int32",
"int64",
]
arg_dict["shape"] = [(6, 6), (3, 6, 8)]
arg_dict["diagonal"] = [-8, -1, 0, 1, 8]
arg_dict["shape"] = [(3, 6, 8)]
arg_dict["diagonal"] = [-8, -1, 0, 8]
arg_dict["fill_value"] = [1.0, 0]
arg_dict["scale"] = [5.0, 3]
for arg in GenArgDict(arg_dict):
if arg["device"] == "cpu" and arg["type_name"] == "float16":
continue
if isinstance(arg["fill_value"], float) and arg_dict["type_name"] not in [
"float32",
"float16",
......
......@@ -15,6 +15,7 @@ limitations under the License.
"""
from collections import OrderedDict
import os
import unittest
import numpy as np
import oneflow as flow
......@@ -73,13 +74,14 @@ def gen_arg_list():
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["target_dtype"] = ["int32", "int64"]
arg_dict["predictions_shape"] = [(10, 5)]
arg_dict["k"] = [1, 2, 3, 4, 5]
arg_dict["k"] = [1, 2, 5]
arg_dict["with_finite"] = [False, True]
return GenArgList(arg_dict)
@flow.unittest.skip_unless_1n1d()
class TestInTopk(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_in_top_K(test_case):
for arg in gen_arg_list():
compare_with_tensorflow(*arg)
......
......@@ -126,7 +126,7 @@ def _compare_instance_norm_nd_with_np(
class TestInstanceNormND1n1d(flow.unittest.TestCase):
def test_instance_norm(test_case):
arg_dict = OrderedDict()
arg_dict["input_shape"] = [(4, 2, 32), (4, 2, 32, 32), (4, 2, 32, 32, 32)]
arg_dict["input_shape"] = [(4, 2, 32), (4, 2, 32, 32, 32)]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["machine_ids"] = ["0:0"]
arg_dict["device_counts"] = [1]
......@@ -138,10 +138,11 @@ class TestInstanceNormND1n1d(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n2d()
class TestInstanceNormND1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_instance_norm(test_case):
arg_dict = OrderedDict()
arg_dict["input_shape"] = [(4, 2, 32), (4, 2, 32, 32), (4, 2, 32, 32, 32)]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["input_shape"] = [(4, 2, 32), (4, 2, 32, 32)]
arg_dict["device_type"] = ["gpu"]
arg_dict["machine_ids"] = ["0:0-1"]
arg_dict["device_counts"] = [2]
arg_dict["eps"] = [1e-3]
......
......@@ -72,12 +72,13 @@ def compare_with_tensorflow(device_type, x_shape, data_type, alpha):
@flow.unittest.skip_unless_1n1d()
class TestLeakyRelu(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_leaky_relu(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["x_shape"] = [(10, 10, 20, 30), (10, 20, 30), (10, 20), (20,)]
arg_dict["x_shape"] = [(10, 10, 20, 30), (10, 20)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["alpha"] = [0.1, 0.2, -0.2, 2]
arg_dict["alpha"] = [0.1, -0.2, 2]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
......
......@@ -17,6 +17,7 @@ import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
import os
from test_util import (
GenArgDict,
......@@ -123,6 +124,7 @@ def _test_masked_fill_fw_bw(test_case, device, x_shape, mask_shape, type_name, v
@flow.unittest.skip_unless_1n1d()
class TestMaskedFill(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_masked_fill_fw_bw(test_case):
arg_dict = OrderedDict()
arg_dict["type_name"] = [
......@@ -135,15 +137,12 @@ class TestMaskedFill(flow.unittest.TestCase):
]
arg_dict["device"] = ["gpu", "cpu"]
arg_dict["x_shape"] = [
(2, 4),
(1, 4),
(2, 2, 4),
(2, 1, 4),
(2, 3, 2, 4),
(2, 2, 3, 2, 4),
]
arg_dict["mask_shape"] = [(2, 1, 2, 4)]
arg_dict["value"] = [2.5, 3.3, -5.5]
arg_dict["value"] = [2.5, -5.5]
for arg in GenArgDict(arg_dict):
if arg["device"] == "cpu" and arg["type_name"] == "float16":
continue
......
......@@ -78,10 +78,11 @@ def compare_with_tensorflow(device_type, x_shape, data_type, axes):
@flow.unittest.skip_unless_1n1d()
class TestMoments(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_moments(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["x_shape"] = [(10, 10, 20, 30), (10, 20, 30), (10, 20), (20,)]
arg_dict["x_shape"] = [(10, 20, 30), (20,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["axes"] = [[0], [0, 2], [0, 1]]
for arg in GenArgList(arg_dict):
......
......@@ -99,13 +99,14 @@ def _run_test(test_case, device_type, dtype, x_shape, shared_axes):
@flow.unittest.skip_unless_1n1d()
class TestPrelu(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_prelu(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["dtype"] = ["float32", "double"]
arg_dict["dtype"] = ["float32"]
arg_dict["x_shape"] = [(10, 32, 20, 20)]
arg_dict["shared_axes"] = [(2,), (2, 3), (1,), (1, 2), (1, 2, 3)]
arg_dict["shared_axes"] = [(2,), (1, 2), (1, 3), (1, 2, 3)]
for arg in GenArgList(arg_dict):
_run_test(*arg)
......
......@@ -17,6 +17,7 @@ import unittest
import uuid
from collections import OrderedDict
import os
import numpy as np
import oneflow as flow
import oneflow.typing as oft
......@@ -56,6 +57,7 @@ def gen_numpy_data(prediction, label, beta=1.0):
@flow.unittest.skip_unless_1n1d()
class TestSmoothL1Loss(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_smooth_l1_loss(_):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
......
......@@ -95,6 +95,7 @@ def compare_with_tensorflow(device_type, x_shape, data_type, axis):
@flow.unittest.skip_unless_1n1d()
class TestSoftmax(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_softmax_shape(test_case):
if flow.eager_execution_enabled():
print("\nSkip under erger mode!")
......
......@@ -16,6 +16,7 @@ limitations under the License.
import unittest
from collections import OrderedDict
import os
import numpy as np
import oneflow as flow
import tensorflow as tf
......@@ -57,10 +58,10 @@ def compare_with_tensorflow(device_type, in_shape, axis, direction, data_type):
def gen_arg_list():
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["in_shape"] = [(100,), (100, 100), (10, 10, 200)]
arg_dict["in_shape"] = [(10,), (10, 10, 20)]
arg_dict["axis"] = [-1]
arg_dict["direction"] = ["ASCENDING", "DESCENDING"]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
arg_dict["data_type"] = ["float32", "double"]
return GenArgList(arg_dict)
......@@ -69,15 +70,16 @@ def gen_arg_list_for_test_axis():
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["in_shape"] = [(10, 10, 20)]
arg_dict["axis"] = [-2, -1, 0, 1, 2]
arg_dict["axis"] = [-2, 0, 2]
arg_dict["direction"] = ["ASCENDING", "DESCENDING"]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
arg_dict["data_type"] = ["int32", "int64"]
return GenArgList(arg_dict)
@flow.unittest.skip_unless_1n1d()
class TestSort(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_sort(test_case):
for arg in gen_arg_list():
compare_with_tensorflow(*arg)
......
......@@ -16,6 +16,7 @@ limitations under the License.
import unittest
from collections import OrderedDict
import os
import numpy as np
import oneflow as flow
import oneflow.typing as oft
......@@ -24,14 +25,13 @@ from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
@flow.unittest.skip_unless_1n1d()
class TestSyncDynamicResize(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_sync_dynamic_resize(_):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["x_shape"] = [
(100,),
(100, 1),
(1000, 10),
(10, 10, 200),
]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
arg_dict["size_type"] = ["int32", "int64"]
......
......@@ -21,6 +21,7 @@ import oneflow as flow
import tensorflow as tf
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
import oneflow.typing as oft
import os
from oneflow.python.ops.transpose_util import get_perm_when_transpose_axis_to_last_dim
from oneflow.python.ops.transpose_util import get_inversed_perm
......@@ -66,9 +67,9 @@ def compare_with_tensorflow(device_type, in_shape, axis, k, data_type, sorted):
def gen_arg_list():
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["in_shape"] = [(100,), (100, 100), (10, 500), (10, 10, 500)]
arg_dict["in_shape"] = [(100,), (10, 10, 50)]
arg_dict["axis"] = [-1]
arg_dict["k"] = [1, 50, 200]
arg_dict["k"] = [1, 50]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
arg_dict["sorted"] = [True]
......@@ -79,7 +80,7 @@ def gen_arg_list_for_test_axis():
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["in_shape"] = [(10, 10, 500)]
arg_dict["axis"] = [-2, -1, 0, 1, 2]
arg_dict["axis"] = [-2, 0, 2]
arg_dict["k"] = [1, 50, 200]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
arg_dict["sorted"] = [True]
......@@ -89,6 +90,7 @@ def gen_arg_list_for_test_axis():
@flow.unittest.skip_unless_1n1d()
class TestTopK(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_top_k(test_case):
for arg in gen_arg_list():
compare_with_tensorflow(*arg)
......
......@@ -15,6 +15,8 @@ limitations under the License.
"""
import unittest
from collections import OrderedDict
import os
import numpy as np
import oneflow as flow
from test_util import (
......@@ -92,18 +94,13 @@ def _test_tril_fw_bw(test_case, device, shape, type_name, diagonal, fill_value):
@flow.unittest.skip_unless_1n1d()
class TestTril(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_tril_fw_bw(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "gpu"]
arg_dict["type_name"] = [
"float32",
"float16",
"double",
"int32",
"int64",
]
arg_dict["shape"] = [(6, 6), (3, 6, 8)]
arg_dict["diagonal"] = [-8, -1, 0, 1, 8]
arg_dict["type_name"] = ["float32", "float16", "double", "int32", "int64"]
arg_dict["shape"] = [(3, 6, 8)]
arg_dict["diagonal"] = [-8, -1, 0, 8]
arg_dict["fill_value"] = [1.0, 0]
for arg in GenArgDict(arg_dict):
if arg["device"] == "cpu" and arg["type_name"] == "float16":
......
......@@ -17,6 +17,7 @@ import unittest
import numpy as np
import oneflow as flow
import oneflow.typing as oft
import os
from collections import OrderedDict
......@@ -78,6 +79,7 @@ def _test_two_stage_reduce(
@flow.unittest.skip_unless_1n4d()
class TestTwoStageReduce(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_two_stage_reduce_max(test_case):
arg_dict = OrderedDict()
arg_dict["flow_func"] = [flow.math.two_stage_reduce_max]
......@@ -89,6 +91,7 @@ class TestTwoStageReduce(flow.unittest.TestCase):
for arg in GenArgList(arg_dict):
_test_two_stage_reduce(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_two_stage_reduce_min(test_case):
arg_dict = OrderedDict()
arg_dict["flow_func"] = [flow.math.two_stage_reduce_min]
......
......@@ -18,6 +18,7 @@ import numpy as np
import oneflow as flow
from scipy.special import erf, erfc, gammaln
import oneflow.typing as oft
import os
@flow.unittest.skip_unless_1n2d()
......@@ -35,6 +36,7 @@ class TestUnaryElementwiseOps(flow.unittest.TestCase):
y = AbsJob(x).get().numpy()
test_case.assertTrue(np.array_equal(y, np.absolute(x)))
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_1n2c_mirror_dynamic_abs(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
......@@ -64,6 +66,7 @@ class TestUnaryElementwiseOps(flow.unittest.TestCase):
y = AcosJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arccos(x)))
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_acos_consistent_1n2c(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
......@@ -105,6 +108,7 @@ class TestUnaryElementwiseOps(flow.unittest.TestCase):
y = AcosJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arccos(x)))
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_1n2c_mirror_dynamic_acos(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
......@@ -557,6 +561,7 @@ class TestUnaryElementwiseOps(flow.unittest.TestCase):
y = SignJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.sign(x), equal_nan=True))
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_sign_double_consistent_1n2c(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
......
......@@ -103,6 +103,7 @@ def _test_unsorted_segment_sum_model_parallel_fw(
@flow.unittest.skip_unless_1n4d()
class TestUnsortedSegmentSumModelParallel(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_unsorted_segment_sum_model_parallel_fw(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
......
......@@ -17,6 +17,7 @@ import unittest
import numpy as np
import oneflow as flow
import tensorflow as tf
import os
from collections import OrderedDict
from test_util import GenArgDict
......@@ -218,6 +219,7 @@ def _of_where_with_x_and_y_are_none(input, input_shape=None):
@flow.unittest.skip_unless_1n4d()
class TestWhere(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_where(test_case):
arg_dict = OrderedDict()
arg_dict["cond_shape"] = [[5, 10]]
......@@ -228,6 +230,7 @@ class TestWhere(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_where_case_1(test_case):
arg_dict = OrderedDict()
arg_dict["cond_shape"] = [[4, 5, 8]]
......@@ -238,6 +241,7 @@ class TestWhere(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_where_case_2(test_case):
arg_dict = OrderedDict()
arg_dict["cond_shape"] = [[10, 7, 9]]
......@@ -248,6 +252,7 @@ class TestWhere(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_where_case_3(test_case):
arg_dict = OrderedDict()
arg_dict["cond_shape"] = [[12, 25, 6]]
......@@ -258,6 +263,7 @@ class TestWhere(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_where_grad(test_case):
arg_dict = OrderedDict()
arg_dict["cond_shape"] = [[10]]
......@@ -269,6 +275,7 @@ class TestWhere(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
_compare_with_tf(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_where_grad_case_1(test_case):
arg_dict = OrderedDict()
arg_dict["cond_shape"] = [[3, 7, 10]]
......@@ -279,6 +286,7 @@ class TestWhere(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
_compare_with_tf(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_where_grad_case_2(test_case):
arg_dict = OrderedDict()
arg_dict["cond_shape"] = [[16, 1]]
......@@ -289,6 +297,7 @@ class TestWhere(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
_compare_with_tf(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_where_grad_4card(test_case):
arg_dict = OrderedDict()
arg_dict["cond_shape"] = [[10]]
......@@ -300,6 +309,7 @@ class TestWhere(flow.unittest.TestCase):
for arg in GenArgDict(arg_dict):
_compare_with_tf(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_where_argwhere(test_case):
rand_input = np.random.random_sample((11, 3, 5)).astype(np.float32)
rand_input[np.nonzero(rand_input < 0.5)] = 0.0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册