提交 a8772528 编写于 作者: A Anna R 提交者: TensorFlower Gardener

Disable tests that flakily timeout in tensorflow.cuda_asan project.

PiperOrigin-RevId: 339922525
Change-Id: I88c0d616154484ddd353d262f2fe7408cfc058c4
上级 c31a3d0c
......@@ -126,6 +126,7 @@ tf_xla_py_test(
srcs = ["adagrad_da_test.py"],
python_version = "PY3",
tags = [
"no_cuda_asan", # times out
"no_pip", # TODO(b/149738646): fix pip install so these tests run on kokoro pip
],
deps = [
......@@ -143,6 +144,7 @@ tf_xla_py_test(
srcs = ["adam_test.py"],
python_version = "PY3",
tags = [
"no_cuda_asan", # times out
"no_pip", # TODO(b/149738646): fix pip install so these tests run on kokoro pip
],
deps = [
......@@ -793,6 +795,7 @@ tf_xla_py_test(
enable_mlir_bridge = True,
python_version = "PY3",
tags = [
"no_cuda_asan", # times out
"no_pip", # TODO(b/149738646): fix pip install so these tests run on kokoro pip
],
deps = [
......
......@@ -984,6 +984,7 @@ distribute_py_test(
shard_count = 10,
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # times out
],
deps = [
":collective_all_reduce_strategy",
......@@ -1754,6 +1755,7 @@ distribute_py_test(
shard_count = 4,
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # times out
"notsan", # TODO(b/160006974)
],
xla_enable_strict_auto_jit = True,
......
......@@ -378,6 +378,9 @@ cuda_py_test(
"no_windows", #TODO(b/139745667)
"notsan", #TODO(b/139745667)
],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
":backprop",
":context",
......
......@@ -222,6 +222,7 @@ distribute_py_test(
shard_count = 10,
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # times out
],
deps = [
":optimizer_combinations",
......@@ -820,6 +821,7 @@ distribute_py_test(
shard_count = 7,
tags = [
"multi_and_single_gpu",
"no_cuda_asan", # times out
"no_rocm",
],
xla_tags = [
......@@ -842,6 +844,9 @@ distribute_py_test(
"multi_and_single_gpu",
"no_rocm",
],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
":saved_model_test_base",
"//tensorflow/python/distribute:combinations",
......
......@@ -548,6 +548,9 @@ cuda_py_test(
tags = [
"no_windows_gpu",
],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python/keras",
......
......@@ -1373,6 +1373,9 @@ cuda_py_test(
name = "topk_op_test",
size = "medium",
srcs = ["topk_op_test.py"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
......@@ -1595,6 +1598,9 @@ cuda_py_test(
name = "aggregate_ops_test",
size = "small",
srcs = ["aggregate_ops_test.py"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
......@@ -1608,6 +1614,9 @@ cuda_py_test(
name = "argmax_op_test",
size = "small",
srcs = ["argmax_op_test.py"],
tags = [
"no_cuda_asan", # times out
],
xla_tags = [
"no_cuda_asan", # times out
],
......@@ -1767,6 +1776,9 @@ cuda_py_test(
name = "check_ops_test",
size = "small",
srcs = ["check_ops_test.py"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:check_ops",
......@@ -2205,6 +2217,9 @@ cuda_py_test(
size = "medium",
srcs = ["einsum_op_test.py"],
shard_count = 4,
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
......@@ -2283,6 +2298,9 @@ cuda_py_test(
size = "small",
srcs = ["one_hot_op_test.py"],
tags = ["no_windows_gpu"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
......@@ -2295,7 +2313,7 @@ cuda_py_test(
name = "stack_op_test",
size = "small",
srcs = ["stack_op_test.py"],
xla_tags = [
tags = [
"no_cuda_asan", # times out
],
deps = [
......@@ -2315,6 +2333,9 @@ cuda_py_test(
grpc_enabled = True,
shard_count = 2,
tags = ["no_windows"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
......@@ -2334,6 +2355,9 @@ cuda_py_test(
name = "pad_op_test",
size = "small",
srcs = ["pad_op_test.py"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
......@@ -2441,6 +2465,9 @@ cuda_py_test(
name = "relu_op_test",
size = "small",
srcs = ["relu_op_test.py"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework_for_generated_wrappers",
......@@ -2585,6 +2612,9 @@ cuda_py_test(
name = "spacetobatch_op_test",
size = "small",
srcs = ["spacetobatch_op_test.py"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:array_ops_gen",
......@@ -3504,6 +3534,9 @@ cuda_py_test(
"no_oss", # b/117185141.
"nomsan", # TODO(b/117236102): Re-enable in msan build.
],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
......
......@@ -145,6 +145,9 @@ cuda_py_test(
srcs = ["random_gamma_test.py"],
shard_count = 4,
tags = ["nozapfhahn"],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
":util",
"//tensorflow/python:array_ops",
......
......@@ -1136,6 +1136,9 @@ cuda_py_test(
"no_windows", # b/139083295: bfloat16 tests fail on Windows
"notsan",
],
xla_tags = [
"no_cuda_asan", # times out
],
deps = [
":moving_averages",
":saver",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册