Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[2.18-rocm-enhanced] Fix test tags and enable some of the tests #2724

Open
wants to merge 3 commits into
base: r2.18-rocm-enhanced
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion tensorflow/compiler/mlir/lite/debug/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ cc_library(
tf_cc_test(
name = "debug_test",
srcs = ["debug_test.cc"],
tags = ["no_rocm"],
deps = [
":debug",
":debug_options_proto_cc",
Expand Down
3 changes: 0 additions & 3 deletions tensorflow/compiler/mlir/lite/tests/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,6 @@ glob_lit_tests(
"raise-custom-ops.mlir": "medium",
},
tags_override = {
"legalize-tf.mlir": ["no_rocm"],
"optimize.mlir": ["no_rocm"],
"prepare-tf.mlir": ["no_rocm"],
"const-fold.mlir": ["no_mac_arm64"],
},
test_file_exts = ["mlir"],
Expand Down
4 changes: 0 additions & 4 deletions tensorflow/compiler/mlir/lite/tests/end2end/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,6 @@ glob_lit_tests(
size_override = {
"quant_stats.pbtxt": "medium",
},
tags_override = {
"add.pbtxt": ["no_rocm"],
"fake_quant_per_channel.pbtxt": ["no_rocm"],
},
test_file_exts = [
"pbtxt",
],
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/compiler/mlir/quantization/stablehlo/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ tf_cc_test(
srcs = [
"passes/bridge/convert_tf_quant_to_mhlo_int_test.cc",
],
tags = ["nomac", "no_rocm"], # TODO(b/297362678): re-enable mac test.
tags = ["nomac"], # TODO(b/297362678): re-enable mac test.
deps = [
":bridge_passes",
"//tensorflow/compiler/mlir/quantization/tensorflow/cc:constant_fold",
Expand Down
2 changes: 0 additions & 2 deletions tensorflow/compiler/mlir/tensorflow/tests/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@ glob_lit_tests(
"layout_optimization_to_nhwc.mlir": "medium",
},
tags_override = {
"optimize.mlir": ["no_rocm"],
"tf_optimize.mlir": ["no_rocm"],
"tf-reduce-identity.mlir": ["no_windows"],
},
test_file_exts = ["mlir"],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ glob_lit_tests(
default_tags = [
"no_mac", # TODO(b/191167848)
"no_oss", # TODO(b/190855110)
"no_rocm",
"cuda-only",
],
driver = "@llvm-project//mlir:run_lit.sh",
exclude = [
Expand Down
7 changes: 3 additions & 4 deletions tensorflow/compiler/tests/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -907,7 +907,6 @@ tf_xla_py_strict_test(
shard_count = 12,
tags = [
"no_pip", # TODO(b/149738646): fix pip install so these tests run on kokoro pip
"no_rocm",
"optonly",
],
deps = [
Expand Down Expand Up @@ -1791,7 +1790,7 @@ tf_xla_py_strict_test(
python_version = "PY3",
shard_count = 20,
tags = [
"no_rocm",
"cuda-only",
"no_aarch64", # TODO(b/348125886)
"no_cuda_asan", # times out
"no_pip", # TODO(b/149738646): fix pip install so these tests run on kokoro pip
Expand Down Expand Up @@ -2866,7 +2865,7 @@ tf_cuda_cc_test(
tags = [
"config-cuda-only",
"no_pip", # TODO(b/149738646): fix pip install so these tests run on kokoro pip
"no_rocm", # ROCmSoftwarePlatform #958
"cuda-only", # ROCmSoftwarePlatform #958
"noasan", # TODO(b/201651800)
"requires-gpu-nvidia",
] + tf_cuda_tests_tags(),
Expand All @@ -2887,7 +2886,7 @@ tf_cuda_cc_test(
tags = [
"config-cuda-only",
"no_pip", # TODO(b/149738646): fix pip install so these tests run on kokoro pip
"no_rocm", # ROCmSoftwarePlatform #958
"cuda-only", # ROCmSoftwarePlatform #958
"noasan", # TODO(b/201651800)
"requires-gpu-nvidia",
] + tf_cuda_tests_tags(),
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/mkl/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ tf_cc_test_mkl(
size = "small",
srcs = ["mkl_fused_batch_norm_op_test.cc"],
linkstatic = 1,
tags = ["no_rocm"], # fails on AMD Rome CPUs as of 2021-03-29
tags = ["cuda-only"], # fails on AMD Rome CPUs as of 2021-03-29
deps = [
":mkl_conv_op",
":mkl_fused_batch_norm_op",
Expand Down
3 changes: 1 addition & 2 deletions tensorflow/core/kernels/mlir_generated/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,6 @@ tf_cuda_cc_test(
tags = tf_cuda_tests_tags() + [
"no_cuda", # TODO(b/196608406): re-enable
"no_cuda_asan", # TODO(b/171341759): re-enable.
"no_rocm", # fail on CI
],
deps = [
":base_ops_test",
Expand Down Expand Up @@ -545,7 +544,7 @@ tf_cuda_cc_test(
tags = tf_cuda_tests_tags() + [
"no_cuda", # TODO(b/196608406): re-enable
"no_cuda_asan", # TODO(b/171341759): re-enable.
"no_rocm",
"cuda-only",
],
deps = [
":base_binary_ops_test",
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/nccl/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ tf_cuda_cc_test(
"multi_gpu",
"no_oss",
"notap",
"no_rocm", # flaky on CI as of 2022-05-30
"cuda-only", # flaky on CI as of 2022-05-30
],
deps = [
"//tensorflow/core:test",
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/profiler/backends/gpu/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ tf_cuda_cc_test(
tags = tf_cuda_tests_tags() + [
"gpu_cupti",
"nomac",
"no_rocm", # flaky on CI
"cuda-only", # flaky on CI
],
deps = [
"//tensorflow/cc:cc_ops",
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/util/autotune_maps/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ tf_cuda_only_cc_test(
size = "small",
srcs = ["autotune_serialize_test.cc"],
features = ["-layering_check"],
tags = ["no_rocm"],
tags = ["cuda-only"],
deps = [
":autotune_serialize",
":conv_autotune_maps",
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/dtensor/cc/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ tf_kernel_library(
"dtensor_tpu_kernels.cc",
],
tags = [
"no_rocm",
"cuda-only",
"tpu",
], # Disable building of TPU kernels on non-TPU platforms.
deps = [
Expand Down
5 changes: 2 additions & 3 deletions tensorflow/dtensor/python/tests/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -555,7 +555,6 @@ dtensor_test(
"NCCL_P2P_DISABLE": "1", # FIXME(b/251183104): p2p detection in cuda 10.1+ is broken.
},
tags = [
"no_rocm",
"no_windows",
"nosan", # b/195537906
],
Expand Down Expand Up @@ -749,7 +748,7 @@ dtensor_test(
TPU_V3_DONUT_BACKEND: 32,
},
tags = [
"no_rocm",
"cuda-only",
],
deps = [
":test_util",
Expand Down Expand Up @@ -804,7 +803,7 @@ dtensor_test(
},
tags = [
"no_oss_py38", # TODO(b/267017937)
"no_rocm",
"cuda-only",
],
deps = [
":test_util",
Expand Down
1 change: 0 additions & 1 deletion tensorflow/lite/python/kernel_tests/signal/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ cuda_py_strict_test(
python_version = "PY3",
shard_count = 4,
tags = [
"no_rocm",
"no_windows_gpu",
],
deps = [
Expand Down
2 changes: 0 additions & 2 deletions tensorflow/python/client/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -437,9 +437,7 @@ tf_py_strict_test(
python_version = "PY3",
tags = [
"no_gpu",
"no_rocm",
"no_windows",
"no_rocm",
],
deps = [
":session",
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/compiler/tensorrt/test/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ filegroup(

base_tags = [
"no_cuda_on_cpu_tap",
"no_rocm",
"cuda-only",
"no_windows",
"nomac",
# TODO(b/303453873): Re-enable tests once TensorRT has been updated
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/python/debug/lib/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ cuda_py_strict_test(
shard_count = 4,
tags = [
"no_windows", # TODO(b/142475891): Enable this test on Windows.
"no_rocm", #TODO(ROCm) Re-enable after issue is fixed.
"cuda-only", #TODO(ROCm) Re-enable after issue is fixed.
],
xla_enable_strict_auto_jit = False, # Node names are different with autojit
deps = [
Expand Down Expand Up @@ -390,7 +390,7 @@ cuda_py_strict_test(
python_version = "PY3",
tags = [
"no_windows_gpu",
"no_rocm", #TODO(ROCm) Re-enable after issue is fixed.
"cuda-only", #TODO(ROCm) Re-enable after issue is fixed.
],
deps = [
":debug_events_reader",
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/python/distribute/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -746,7 +746,7 @@ distribute_py_strict_test(
"multi_and_single_gpu",
"no_cuda_asan", # b/213388775
"no_oss", # b/241013307
"no_rocm",
"cuda-only",
"notap", # Flaky; TODO(b/289970206)
],
tpu_tags = [
Expand Down Expand Up @@ -2581,7 +2581,7 @@ distribute_py_strict_test(
"multi_and_single_gpu",
"nomac", # TODO(b/201788023): Attempt MultiProcessCluster to fix this.
"notpu",
"no_rocm", #times out
"cuda-only", #times out
],
deps = [
":distribute_lib",
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/python/feature_column/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ tf_py_strict_test(
"no_cuda_on_cpu_tap",
"no_oss", # TODO(b/206860622): Broken with numpy 1.20+
"no_pip",
"no_rocm",
"cuda-only",
"no_windows",
],
deps = [
Expand Down Expand Up @@ -215,7 +215,7 @@ tf_py_strict_test(
"no_cuda_on_cpu_tap",
"no_oss", # TODO(b/206860622): Broken with numpy 1.20+
"no_pip",
"no_rocm",
"cuda-only",
"no_windows",
],
deps = [":feature_column_v2_test_main_lib"],
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/framework/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -1462,7 +1462,7 @@ cuda_py_strict_test(
python_version = "PY3",
tags = [
"no_pip", # test_ops are not available in pip
"no_rocm",
"cuda-only",
],
deps = [
":config",
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/kernel_tests/image_ops/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ cuda_py_strict_test(
shard_count = 15,
tags = [
"no_oss", # b/241024908
"no_rocm",
"cuda-only",
"nomac", # b/181799478
"notap", # b/31080670
],
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/kernel_tests/linalg/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ cuda_py_strict_test(
shard_count = 50,
tags = [
"no_cuda11", # TODO(b/197522782): reenable test after fixing.
"no_rocm", # extremely slow, thousands of subtests, many triggering
"cuda-only", # extremely slow, thousands of subtests, many triggering
# llvm invocations
"optonly", # times out, b/79171797
],
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/kernel_tests/math_ops/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ cuda_py_strict_test(
name = "cwise_ops_binary_test",
size = "medium",
srcs = ["cwise_ops_binary_test.py"],
tags = ["no_rocm"], #TODO(rocm): weekly sync 240919
tags = ["cuda-only"], #TODO(rocm): weekly sync 240919
shard_count = 50,
# b/140155647: Error just outside of tolerance
xla_enable_strict_auto_jit = False,
Expand Down
6 changes: 3 additions & 3 deletions tensorflow/python/kernel_tests/nn_ops/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ cuda_py_strict_test(
shard_count = 4,
tags = [
"no_mac_arm64",
"no_rocm",
"cuda-only",
"optonly", # times out
],
deps = [
Expand Down Expand Up @@ -405,7 +405,7 @@ cuda_py_strict_test(
srcs = ["cudnn_d9m_test.py"],
tags = [
"no_cuda_asan", # TODO(b/171509035): re-enable.
"no_rocm", #This is test is specific to CUDA and enables determinism through a CUDA specific env var.
"cuda-only", #This is test is specific to CUDA and enables determinism through a CUDA specific env var.
],
deps = [
":cudnn_deterministic_base",
Expand Down Expand Up @@ -437,7 +437,7 @@ cuda_py_strict_test(
size = "medium", # http://b/30603882
timeout = "long",
srcs = ["depthwise_conv_op_d9m_test.py"],
tags = ["no_rocm"],
tags = ["cuda-only"],
shard_count = 8,
deps = [
":depthwise_conv_op_base",
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/kernel_tests/sparse_ops/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ cuda_py_strict_test(
shard_count = 5,
tags = [
"optonly", # b/77589990
"no_rocm"
"cuda-only"
],
deps = [
"//tensorflow/python/eager:def_function",
Expand Down
13 changes: 6 additions & 7 deletions tensorflow/python/ops/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -1017,7 +1017,7 @@ tf_py_strict_test(
srcs = ["collective_ops_test.py"],
python_version = "PY3",
tags = [
"no_rocm",
"cuda-only",
],
deps = [
":array_ops",
Expand Down Expand Up @@ -1048,7 +1048,7 @@ tf_py_strict_test(
python_version = "PY3",
tags = [
"no_pip",
"no_rocm",
"cuda-only",
"no_windows",
"nomac",
],
Expand Down Expand Up @@ -3630,7 +3630,7 @@ cuda_py_strict_test(
python_version = "PY3",
tags = [
"no_windows_gpu",
"no_rocm", #TODO(rocm): weekly sync 240919
"cuda-only", #TODO(rocm): weekly sync 240919
],
deps = [
":array_ops",
Expand Down Expand Up @@ -3715,7 +3715,7 @@ cuda_py_strict_test(
python_version = "PY3",
shard_count = 4,
tags = [
"no_rocm",
"cuda-only",
],
deps = [
":nn_grad",
Expand All @@ -3740,7 +3740,7 @@ cuda_py_strict_test(
python_version = "PY3",
shard_count = 24,
tags = [
"no_rocm",
"cuda-only",
],
deps = [
":array_ops",
Expand Down Expand Up @@ -3891,7 +3891,7 @@ cuda_py_strict_test(
python_version = "PY3",
shard_count = 10,
tags = [
"no_rocm",
"cuda-only",
"no_windows_gpu",
],
deps = [
Expand Down Expand Up @@ -4702,7 +4702,6 @@ cuda_py_strict_test(
python_version = "PY3",
shard_count = 10,
tags = [
"no_rocm",
"no_windows_gpu",
],
deps = [
Expand Down
Loading