diff --git a/tfx/components/distribution_validator/executor_test.py b/tfx/components/distribution_validator/executor_test.py index 33b378f125..347c4f2077 100644 --- a/tfx/components/distribution_validator/executor_test.py +++ b/tfx/components/distribution_validator/executor_test.py @@ -14,7 +14,6 @@ """Tests for tfx.distribution_validator.executor.""" -import pytest import os import tempfile @@ -552,8 +551,6 @@ def testMissBaselineStats(self): }, ) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def testStructData(self): source_data_dir = FLAGS.test_tmpdir stats_artifact = standard_artifacts.ExampleStatistics() @@ -1014,8 +1011,6 @@ def testStructData(self): } """ }) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def testEmptyData(self, stats_train, stats_eval, expected_anomalies): source_data_dir = FLAGS.test_tmpdir stats_artifact = standard_artifacts.ExampleStatistics() diff --git a/tfx/components/distribution_validator/utils_test.py b/tfx/components/distribution_validator/utils_test.py index 360ced0ba8..306c8431af 100644 --- a/tfx/components/distribution_validator/utils_test.py +++ b/tfx/components/distribution_validator/utils_test.py @@ -14,7 +14,6 @@ """Tests for tfx.components.distribution_validator.utils.""" -import pytest import os from absl import flags @@ -31,8 +30,6 @@ class UtilsTest(tf.test.TestCase): - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def test_load_config_from_artifact(self): expected_config = text_format.Parse( """default_slice_config: { diff --git a/tfx/components/example_gen/csv_example_gen/executor_test.py b/tfx/components/example_gen/csv_example_gen/executor_test.py index 776926c224..65acf02922 100644 --- a/tfx/components/example_gen/csv_example_gen/executor_test.py +++ b/tfx/components/example_gen/csv_example_gen/executor_test.py @@ -14,7 +14,6 @@ """Tests for tfx.components.example_gen.csv_example_gen.executor.""" -import pytest import os from absl.testing import absltest @@ -104,8 +103,6 @@ def check_results(results): util.assert_that(examples, check_results) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def testDo(self): output_data_dir = os.path.join( os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.create_tempdir()), diff --git a/tfx/conftest.py b/tfx/conftest.py new file mode 100644 index 0000000000..b9cc734eb9 --- /dev/null +++ b/tfx/conftest.py @@ -0,0 +1,7 @@ +"""Test configuration.""" +from absl import flags + +def pytest_configure(config): + # This is needed to avoid + # `absl.flags._exceptions.UnparsedFlagAccessError` in some tests. + flags.FLAGS.mark_as_parsed() diff --git a/tfx/dsl/compiler/compiler_test.py b/tfx/dsl/compiler/compiler_test.py index 8cc56af02b..4a1d5966a2 100644 --- a/tfx/dsl/compiler/compiler_test.py +++ b/tfx/dsl/compiler/compiler_test.py @@ -17,7 +17,6 @@ """ -import pytest import os import threading import types @@ -149,8 +148,6 @@ def _get_pipeline_ir(self, filename: str) -> pipeline_pb2.Pipeline: consumer_pipeline_with_tags, ]) ) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def testCompile( self, pipeline_module: types.ModuleType, diff --git a/tfx/dsl/compiler/placeholder_utils_test.py b/tfx/dsl/compiler/placeholder_utils_test.py index e2b7c32fba..b2187b058b 100644 --- a/tfx/dsl/compiler/placeholder_utils_test.py +++ b/tfx/dsl/compiler/placeholder_utils_test.py @@ -14,7 +14,6 @@ """Tests for tfx.dsl.compiler.placeholder_utils.""" -import pytest import base64 import itertools import re @@ -411,8 +410,6 @@ def testArtifactUriNoneAccess(self): placeholder_utils.resolve_placeholder_expression( pb, self._none_resolution_context)) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def testArtifactValueOperator(self): test_artifact = standard_artifacts.Integer() test_artifact.uri = self.create_tempfile().full_path @@ -449,8 +446,6 @@ def testArtifactValueOperator(self): pb, self._resolution_context) self.assertEqual(resolved_value, 42) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def testJsonValueArtifactWithIndexOperator(self): test_artifact = standard_artifacts.JsonValue() test_artifact.uri = self.create_tempfile().full_path @@ -1886,8 +1881,6 @@ def _createResolutionContext(self, input_values_dict): False, }, ) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def testComparisonOperator(self, input_values_dict, comparison_op, expected_result): resolution_context = self._createResolutionContext(input_values_dict) @@ -2088,8 +2081,6 @@ def _createTrueFalsePredsAndResolutionContext(self): false_pb, resolution_context), False) return true_pb, false_pb, resolution_context - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def testNotOperator(self): true_pb, false_pb, resolution_context = ( self._createTrueFalsePredsAndResolutionContext()) @@ -2170,8 +2161,6 @@ def testNotOperator(self): "expected_result": False, }, ) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def testBinaryLogicalOperator(self, lhs_evaluates_to_true, rhs_evaluates_to_true, op, expected_result): true_pb, false_pb, resolution_context = ( @@ -2187,8 +2176,6 @@ def testBinaryLogicalOperator(self, lhs_evaluates_to_true, placeholder_utils.resolve_placeholder_expression( pb, resolution_context), expected_result) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.", strict=True) def testNestedExpression(self): true_pb, false_pb, resolution_context = ( self._createTrueFalsePredsAndResolutionContext()) diff --git a/tfx/dsl/input_resolution/strategies/conditional_strategy_test.py b/tfx/dsl/input_resolution/strategies/conditional_strategy_test.py index 18f672376c..52169837a6 100644 --- a/tfx/dsl/input_resolution/strategies/conditional_strategy_test.py +++ b/tfx/dsl/input_resolution/strategies/conditional_strategy_test.py @@ -13,7 +13,6 @@ # limitations under the License. """Tests for tfx.dsl.input_resolution.strategies.conditional_strategy.""" -import pytest from tfx.dsl.input_resolution.strategies import conditional_strategy from tfx.orchestration import data_types from tfx.orchestration import metadata @@ -86,11 +85,6 @@ """ -@pytest.mark.xfail( - run=False, - reason="PR 6889 This class contains tests that fail and needs to be fixed. " - "If all tests pass, please remove this mark.", -) class ConditionalStrategyTest(test_case_utils.TfxTest): def setUp(self): super().setUp() diff --git a/tfx/orchestration/experimental/core/pipeline_ops_test.py b/tfx/orchestration/experimental/core/pipeline_ops_test.py index f570ee5386..17cc405865 100644 --- a/tfx/orchestration/experimental/core/pipeline_ops_test.py +++ b/tfx/orchestration/experimental/core/pipeline_ops_test.py @@ -14,7 +14,6 @@ """Tests for tfx.orchestration.experimental.core.pipeline_ops.""" -import pytest import copy import os import threading @@ -93,7 +92,7 @@ def setUp(self): super().setUp() pipeline_root = os.path.join( os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()), - self.id(), + str(uuid.uuid1()), ) # Makes sure multiple connections within a test always connect to the same @@ -1582,8 +1581,6 @@ def test_stop_node_wait_for_inactivation_timeout(self): expected_run_id='run0', ), ) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.") def test_record_orchestration_time(self, pipeline, expected_run_id): with self._mlmd_cm as mlmd_connection_manager: m = mlmd_connection_manager.primary_mlmd_handle @@ -1767,8 +1764,6 @@ def test_orchestrate_active_pipelines( '_record_orchestration_time', wraps=pipeline_ops._record_orchestration_time, ) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.") def test_orchestrate_stop_initiated_pipelines( self, pipeline, @@ -2122,8 +2117,6 @@ def recorder(event): '_record_orchestration_time', wraps=pipeline_ops._record_orchestration_time, ) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.") def test_orchestrate_update_initiated_pipelines( self, pipeline, mock_record_orchestration_time ): @@ -2336,8 +2329,6 @@ def test_update_pipeline_wait_for_update_timeout(self): @mock.patch.object( task_gen_utils, 'generate_cancel_task_from_running_execution' ) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.") def test_orchestrate_update_initiated_pipelines_preempted( self, pipeline, @@ -2455,8 +2446,6 @@ def test_orchestrate_update_initiated_pipelines_preempted( @mock.patch.object( task_gen_utils, 'generate_cancel_task_from_running_execution' ) - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.") def test_active_pipelines_with_stopped_nodes( self, pipeline, @@ -2679,8 +2668,6 @@ def fn2(): ) @mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator') @mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator') - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.") def test_executor_node_stop_then_start_flow( self, pipeline, mock_async_task_gen, mock_sync_task_gen ): @@ -2865,8 +2852,6 @@ def test_pure_service_node_stop_then_start_flow( ) @mock.patch.object(sync_pipeline_task_gen, 'SyncPipelineTaskGenerator') @mock.patch.object(async_pipeline_task_gen, 'AsyncPipelineTaskGenerator') - @pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. " -"If this test passes, please remove this mark.") def test_mixed_service_node_stop_then_start_flow( self, pipeline, mock_async_task_gen, mock_sync_task_gen ): diff --git a/tfx/orchestration/kubeflow/v2/components/experimental/ai_platform_training_component_integration_test.py b/tfx/orchestration/kubeflow/v2/components/experimental/ai_platform_training_component_integration_test.py index 9c828846fd..d2e23f96a3 100644 --- a/tfx/orchestration/kubeflow/v2/components/experimental/ai_platform_training_component_integration_test.py +++ b/tfx/orchestration/kubeflow/v2/components/experimental/ai_platform_training_component_integration_test.py @@ -31,8 +31,6 @@ _PIPELINE_NAME_PREFIX = 'aip-training-component-pipeline-{}' -@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. " -"If all tests pass, please remove this mark.") @pytest.mark.integration class AiPlatformTrainingComponentIntegrationTest( base_test_case.BaseKubeflowV2Test, parameterized.TestCase diff --git a/tfx/orchestration/kubeflow/v2/e2e_tests/artifact_value_placeholder_integration_test.py b/tfx/orchestration/kubeflow/v2/e2e_tests/artifact_value_placeholder_integration_test.py index d29bd06085..f5002c84f0 100644 --- a/tfx/orchestration/kubeflow/v2/e2e_tests/artifact_value_placeholder_integration_test.py +++ b/tfx/orchestration/kubeflow/v2/e2e_tests/artifact_value_placeholder_integration_test.py @@ -70,8 +70,6 @@ def _tasks_for_pipeline_with_artifact_value_passing(): return [producer_task, print_task] -@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. " -"If all tests pass, please remove this mark.") @pytest.mark.integration @pytest.mark.e2e class ArtifactValuePlaceholderIntegrationTest( diff --git a/tfx/orchestration/kubeflow/v2/e2e_tests/bigquery_integration_test.py b/tfx/orchestration/kubeflow/v2/e2e_tests/bigquery_integration_test.py index 8279df343a..e3a4f6ca86 100644 --- a/tfx/orchestration/kubeflow/v2/e2e_tests/bigquery_integration_test.py +++ b/tfx/orchestration/kubeflow/v2/e2e_tests/bigquery_integration_test.py @@ -53,8 +53,6 @@ < 0.0004""" -@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. " -"If all tests pass, please remove this mark.") @pytest.mark.integration @pytest.mark.e2e class BigqueryIntegrationTest( diff --git a/tfx/orchestration/kubeflow/v2/e2e_tests/csv_example_gen_integration_test.py b/tfx/orchestration/kubeflow/v2/e2e_tests/csv_example_gen_integration_test.py index ba88ac8805..d6962afc31 100644 --- a/tfx/orchestration/kubeflow/v2/e2e_tests/csv_example_gen_integration_test.py +++ b/tfx/orchestration/kubeflow/v2/e2e_tests/csv_example_gen_integration_test.py @@ -31,8 +31,6 @@ _TEST_DATA_ROOT = '/opt/conda/lib/python3.10/site-packages/tfx/examples/chicago_taxi_pipeline/data/simple' -@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. " -"If all tests pass, please remove this mark.") @pytest.mark.integration @pytest.mark.e2e class CsvExampleGenIntegrationTest( diff --git a/tfx/orchestration/kubeflow/v2/e2e_tests/exit_handler_e2e_test.py b/tfx/orchestration/kubeflow/v2/e2e_tests/exit_handler_e2e_test.py index f5cca6e694..c2dcf96803 100644 --- a/tfx/orchestration/kubeflow/v2/e2e_tests/exit_handler_e2e_test.py +++ b/tfx/orchestration/kubeflow/v2/e2e_tests/exit_handler_e2e_test.py @@ -36,8 +36,6 @@ _success_file_name = 'success_final_status.txt' -@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. " -"If all tests pass, please remove this mark.") @pytest.mark.e2e class ExitHandlerE2ETest( base_test_case.BaseKubeflowV2Test, parameterized.TestCase diff --git a/tfx/orchestration/portable/docker_executor_operator_e2e_test.py b/tfx/orchestration/portable/docker_executor_operator_e2e_test.py index 580adc1c04..06ac4bec82 100644 --- a/tfx/orchestration/portable/docker_executor_operator_e2e_test.py +++ b/tfx/orchestration/portable/docker_executor_operator_e2e_test.py @@ -68,8 +68,6 @@ def _create_pipeline( ) -@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. " -"If all tests pass, please remove this mark.") @pytest.mark.e2e class DockerComponentLauncherE2eTest(tf.test.TestCase): diff --git a/tfx/tools/cli/handler/handler_factory_test.py b/tfx/tools/cli/handler/handler_factory_test.py index 9c9141de6b..bb8ac91ede 100644 --- a/tfx/tools/cli/handler/handler_factory_test.py +++ b/tfx/tools/cli/handler/handler_factory_test.py @@ -36,8 +36,6 @@ def __init__(self, host, client_id, namespace): self._output_dir = os.path.join(tempfile.gettempdir(), 'output_dir') -@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. " -"If all tests pass, please remove this mark.") class HandlerFactoryTest(tf.test.TestCase): def setUp(self): @@ -68,6 +66,8 @@ def _MockSubprocessKubeflow(self): @mock.patch('subprocess.check_output', _MockSubprocessKubeflow) @mock.patch('kfp.Client', _MockClientClass) + @pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. " +"If all tests pass, please remove this mark.") def testCreateHandlerKubeflow(self): flags_dict = { labels.ENGINE_FLAG: 'kubeflow',