Skip to content

Commit 9e66ed1

Browse files
authored
Re enable more not e2e xfail test cases (#6947)
* Exclude tests for the deprecated tfx/orchestration/experimental/core module from experimental orchestration. * Include additional testdata files in the package build. * Add module-level pytest cleanup functions for proto classes. * Update doc tests to clean up docs before execution.
1 parent c770a51 commit 9e66ed1

19 files changed

+36
-109
lines changed

MANIFEST.in

+4-1
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,8 @@ include tfx/proto/*.proto
1414
recursive-include tfx/orchestration/kubeflow/v2/testdata *
1515

1616
recursive-include tfx/components/testdata *
17+
recursive-include tfx/orchestration/kubeflow/v2/testdata *
1718

18-
include tfx/examples/imdb/data/
19+
include tfx/examples/imdb/data/*
20+
include tfx/orchestration/beam/testdata/*
21+
include tfx/orchestration/kubeflow/v2/container/testdata/*

pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ Repository = "https://github.com/tensorflow/tfx"
4040
addopts = "--import-mode=importlib"
4141
testpaths = "tfx"
4242
python_files = "*_test.py"
43-
norecursedirs = ["custom_components", ".*", "*.egg"]
43+
norecursedirs = ["custom_components", ".*", "*.egg", "tfx/orchestration/experimental/core"]
4444
markers = [
4545
"e2e: end-to-end tests which are slow and require more dependencies (deselect with '-m \"not end_to_end\"')",
4646
"serial: mark tests that should not run in parallel",

tfx/dsl/placeholder/proto_placeholder_test.py

+9
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,9 @@
1515

1616
import base64
1717
import functools
18+
import importlib
1819
import os
20+
import pytest
1921
from typing import Any, Optional, TypeVar, Union
2022

2123
import tensorflow as tf
@@ -34,6 +36,13 @@
3436
from google.protobuf import text_format
3537
from ml_metadata.proto import metadata_store_pb2
3638

39+
40+
41+
@pytest.fixture(autouse=True,scope="module")
42+
def cleanup():
43+
yield
44+
importlib.reload(pipeline_pb2)
45+
3746
_ExecutionInvocation = functools.partial(
3847
ph.make_proto, execution_invocation_pb2.ExecutionInvocation()
3948
)

tfx/examples/penguin/experimental/sklearn_predict_extractor_test.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,8 @@ def setUp(self):
6969
self._makeExample(age=5.0, language=0.0, label=0),
7070
]
7171

72-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
73-
"If this test passes, please remove this mark.", strict=True)
72+
@pytest.mark.xfail(run=False, reason="This is based on experimental implementation,"
73+
"and the test fails.", strict=True)
7474
def testMakeSklearnPredictExtractor(self):
7575
"""Tests that predictions are made from extracts for a single model."""
7676
feature_extractor = tfma.extractors.FeaturesExtractor(self._eval_config)
@@ -98,8 +98,8 @@ def check_result(actual):
9898

9999
util.assert_that(predict_extracts, check_result)
100100

101-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
102-
"If this test passes, please remove this mark.", strict=True)
101+
@pytest.mark.xfail(run=False, reason="This is based on experimental implementation,"
102+
"and the test fails.", strict=True)
103103
def testMakeSklearnPredictExtractorWithMultiModels(self):
104104
"""Tests that predictions are made from extracts for multiple models."""
105105
eval_config = tfma.EvalConfig(model_specs=[

tfx/examples/ranking/struct2tensor_parsing_utils_test.py

-5
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515

1616

1717

18-
import pytest
1918
import itertools
2019
import unittest
2120

@@ -172,15 +171,11 @@
172171
]
173172

174173

175-
@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. "
176-
"If all tests pass, please remove this mark.")
177174
@unittest.skipIf(struct2tensor_parsing_utils is None,
178175
'Cannot import required modules. This can happen when'
179176
' struct2tensor is not available.')
180177
class ELWCDecoderTest(tf.test.TestCase):
181178

182-
#@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
183-
#"If this test passes, please remove this mark.", strict=True)
184179
def testAllDTypes(self):
185180
context_features = [
186181
struct2tensor_parsing_utils.Feature('ctx.int', tf.int64),

tfx/orchestration/beam/beam_dag_runner_test.py

-3
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
"""Tests for tfx.orchestration.portable.beam_dag_runner."""
1515

1616

17-
import pytest
1817
import os
1918
from typing import Optional
2019

@@ -172,8 +171,6 @@ def _run_node(self):
172171
_executed_components.append(self._node_id)
173172

174173

175-
@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. "
176-
"If all tests pass, please remove this mark.")
177174
class BeamDagRunnerTest(test_case_utils.TfxTest):
178175

179176
def setUp(self):

tfx/orchestration/data_types_utils_test.py

+8
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414
"""Tests for tfx.orchestration.data_types_utils."""
1515

1616

17+
import importlib
18+
import pytest
1719
from absl.testing import parameterized
1820
from tfx import types
1921
from tfx.orchestration import data_types_utils
@@ -32,6 +34,12 @@
3234
_DEFAULT_ARTIFACT_TYPE_NAME = 'Examples'
3335

3436

37+
@pytest.fixture(scope="module", autouse=True)
38+
def cleanup():
39+
yield
40+
importlib.reload(struct_pb2)
41+
42+
3543
def _create_artifact(uri: str) -> types.Artifact:
3644
artifact = types.Artifact(
3745
metadata_store_pb2.ArtifactType(name=_DEFAULT_ARTIFACT_TYPE_NAME))

tfx/orchestration/kubeflow/v2/container/kubeflow_v2_entrypoint_utils_test.py

-3
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515

1616

1717

18-
import pytest
1918
import os
2019
from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2
2120
import tensorflow as tf
@@ -68,8 +67,6 @@
6867
}
6968

7069

71-
@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. "
72-
"If all tests pass, please remove this mark.")
7370
class KubeflowV2EntrypointUtilsTest(tf.test.TestCase):
7471

7572
def setUp(self):

tfx/orchestration/kubeflow/v2/container/kubeflow_v2_run_executor_test.py

-3
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
"""Tests for kubeflow_v2_run_executor.py."""
1515

1616

17-
import pytest
1817
import json
1918
import os
2019
from typing import Any, Mapping, Sequence
@@ -100,8 +99,6 @@ def Do(self, input_dict: Mapping[str, Sequence[artifact.Artifact]],
10099
_EXEC_PROPERTIES = {"key_1": "value_1", "key_2": 536870911}
101100

102101

103-
@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. "
104-
"If all tests pass, please remove this mark.")
105102
class KubeflowV2RunExecutorTest(
106103
test_case_utils.TfxTest, parameterized.TestCase
107104
):

tfx/orchestration/kubeflow/v2/file_based_example_gen/driver_test.py

-3
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414

1515

1616

17-
import pytest
1817
import json
1918
import os
2019

@@ -93,8 +92,6 @@ def _load_test_file(filename: str):
9392
).read()
9493

9594

96-
@pytest.mark.xfail(run=False, reason="PR 6889 This class contains tests that fail and needs to be fixed. "
97-
"If all tests pass, please remove this mark.")
9895
class RunDriverTest(test_case_utils.TfxTest, parameterized.TestCase):
9996

10097
def setUp(self):

tfx/orchestration/local/local_dag_runner_test.py

-9
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
from typing import Any, Dict, List
1919

2020
import absl.testing.absltest
21-
import pytest
2221
from tfx import types
2322
from tfx.dsl.compiler import compiler
2423
from tfx.dsl.components.base import base_component
@@ -165,17 +164,13 @@ def _getTestPipelineIR(self) -> pipeline_pb2.Pipeline: # pylint: disable=invali
165164
c = compiler.Compiler()
166165
return c.compile(test_pipeline)
167166

168-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
169-
"If this test passes, please remove this mark.", strict=True)
170167
def testRun(self):
171168
local_dag_runner.LocalDagRunner().run(self._getTestPipeline())
172169
self.assertEqual(_executed_components, [
173170
'_FakeComponent.a', '_FakeComponent.b', '_FakeComponent.c',
174171
'_FakeComponent.d', '_FakeComponent.e'
175172
])
176173

177-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
178-
"If this test passes, please remove this mark.", strict=True)
179174
def testPartialRun(self):
180175
local_dag_runner.LocalDagRunner().run(
181176
self._getTestPipeline(),
@@ -184,17 +179,13 @@ def testPartialRun(self):
184179
_executed_components,
185180
['_FakeComponent.a', '_FakeComponent.b', '_FakeComponent.c'])
186181

187-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
188-
"If this test passes, please remove this mark.", strict=True)
189182
def testRunWithIR(self):
190183
local_dag_runner.LocalDagRunner().run_with_ir(self._getTestPipelineIR())
191184
self.assertEqual(_executed_components, [
192185
'_FakeComponent.a', '_FakeComponent.b', '_FakeComponent.c',
193186
'_FakeComponent.d', '_FakeComponent.e'
194187
])
195188

196-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
197-
"If this test passes, please remove this mark.", strict=True)
198189
def testPartialRunWithIR(self):
199190
pr_opts = pipeline_pb2.PartialRun()
200191
pr_opts.to_nodes.append('c')

tfx/orchestration/local/local_pipeline_test.py

-9
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@
2828
from typing import Any, List
2929

3030
import absl.testing.absltest
31-
import pytest
3231

3332
from tfx import types
3433
from tfx.dsl.compiler import compiler
@@ -182,17 +181,13 @@ def _getTestPipelineIR(self) -> pipeline_pb2.Pipeline:
182181
c = compiler.Compiler()
183182
return c.compile(test_pipeline)
184183

185-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
186-
"If this test passes, please remove this mark.", strict=True)
187184
def testSimplePipelineRun(self):
188185
self.assertEqual(self.RAN_COMPONENTS, [])
189186

190187
local_dag_runner.LocalDagRunner().run(self._getTestPipeline())
191188

192189
self.assertEqual(self.RAN_COMPONENTS, ['Load', 'Train', 'Validate'])
193190

194-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
195-
"If this test passes, please remove this mark.", strict=True)
196191
def testSimplePipelinePartialRun(self):
197192
self.assertEqual(self.RAN_COMPONENTS, [])
198193

@@ -202,17 +197,13 @@ def testSimplePipelinePartialRun(self):
202197

203198
self.assertEqual(self.RAN_COMPONENTS, ['Load', 'Train'])
204199

205-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
206-
"If this test passes, please remove this mark.", strict=True)
207200
def testSimplePipelineRunWithIR(self):
208201
self.assertEqual(self.RAN_COMPONENTS, [])
209202

210203
local_dag_runner.LocalDagRunner().run_with_ir(self._getTestPipelineIR())
211204

212205
self.assertEqual(self.RAN_COMPONENTS, ['Load', 'Train', 'Validate'])
213206

214-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
215-
"If this test passes, please remove this mark.", strict=True)
216207
def testSimplePipelinePartialRunWithIR(self):
217208
self.assertEqual(self.RAN_COMPONENTS, [])
218209

tfx/orchestration/portable/inputs_utils_test.py

-7
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
"""Tests for tfx.orchestration.portable.inputs_utils."""
1515
import collections
1616
import os
17-
import pytest
1817

1918
from tfx import types
2019
from tfx.dsl.compiler import placeholder_utils
@@ -147,8 +146,6 @@ def testResolveParametersFail(self):
147146
with self.assertRaisesRegex(RuntimeError, 'Parameter value not ready'):
148147
inputs_utils.resolve_parameters(parameters)
149148

150-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
151-
"If this test passes, please remove this mark.", strict=True)
152149
def testResolveInputArtifacts(self):
153150
pipeline = self.load_pipeline_proto(
154151
'pipeline_for_input_resolver_test.pbtxt')
@@ -254,8 +251,6 @@ def _setup_pipeline_for_input_resolver_test(self, num_examples=1):
254251
)
255252
self._examples = output_dict['output_examples']
256253

257-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
258-
"If this test passes, please remove this mark.", strict=True)
259254
def testResolveInputArtifacts_Normal(self):
260255
self._setup_pipeline_for_input_resolver_test()
261256

@@ -266,8 +261,6 @@ def testResolveInputArtifacts_Normal(self):
266261
self.assertArtifactMapListEqual([{'examples_1': self._examples,
267262
'examples_2': self._examples}], result)
268263

269-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
270-
"If this test passes, please remove this mark.", strict=True)
271264
def testResolveInputArtifacts_FilterOutInsufficient(self):
272265
self._setup_pipeline_for_input_resolver_test()
273266
self._my_transform.inputs.inputs['examples_1'].min_count = 2

tfx/orchestration/portable/launcher_test.py

-19
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# limitations under the License.
1414
"""Tests for tfx.orchestration.portable.launcher."""
1515

16-
import pytest
1716
import contextlib
1817
import copy
1918
import os
@@ -490,8 +489,6 @@ def testLauncher_EmptyOptionalInputTriggersExecution(self):
490489
],
491490
)
492491

493-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
494-
"If this test passes, please remove this mark.", strict=True)
495492
def testLauncher_PublishingNewArtifactsAndUseCache(self):
496493
# In this test case, there are two executions:
497494
# In the first one,trainer reads the fake upstream outputs and publish
@@ -578,8 +575,6 @@ def testLauncher_PublishingNewArtifactsAndUseCache(self):
578575
],
579576
)
580577

581-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
582-
"If this test passes, please remove this mark.", strict=True)
583578
def testLauncher_CacheIsSupportedForNodeWithNoOutput(self):
584579
# Even though a node has no output at all, the launcher should treat the
585580
# second execution as CACHED as long as the cache context is the same.
@@ -639,8 +634,6 @@ def testLauncher_CacheIsSupportedForNodeWithNoOutput(self):
639634
],
640635
)
641636

642-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
643-
"If this test passes, please remove this mark.", strict=True)
644637
def testLauncher_CacheDisabled(self):
645638
# In this test case, there are two executions:
646639
# In the first one,trainer reads the fake upstream outputs and publish
@@ -757,8 +750,6 @@ def testLauncher_CacheDisabled(self):
757750
],
758751
)
759752

760-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
761-
"If this test passes, please remove this mark.", strict=True)
762753
def testLauncher_ReEntry(self):
763754
# Some executors or runtime environment may reschedule the launcher job
764755
# before the launcher job can publish any results of the execution to MLMD.
@@ -830,8 +821,6 @@ def create_test_launcher(executor_operators):
830821
execution_preparation_result = third_test_launcher._prepare_execution()
831822
self.assertFalse(execution_preparation_result.is_execution_needed)
832823

833-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
834-
"If this test passes, please remove this mark.", strict=True)
835824
def testLauncher_ToleratesDoubleCleanup(self):
836825
# Some executors or runtime environment may delete stateful_working_dir,
837826
# tmp_dir and unexpectedly. The launcher should handle such cases gracefully
@@ -895,8 +884,6 @@ def testLauncher_ToleratesDoubleCleanup(self):
895884
],
896885
)
897886

898-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
899-
"If this test passes, please remove this mark.", strict=True)
900887
def testLauncher_ExecutionFailed(self):
901888
# In the case that the executor failed and raises an execption.
902889
# An Execution will be published.
@@ -916,8 +903,6 @@ def testLauncher_ExecutionFailed(self):
916903
with self.assertRaises(FakeError):
917904
_ = test_launcher.launch()
918905

919-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
920-
"If this test passes, please remove this mark.", strict=True)
921906
def testLauncher_ExecutionFailedViaReturnCode(self):
922907
# In the case that the executor failed and raises an execption.
923908
# An Execution will be published.
@@ -965,8 +950,6 @@ def testLauncher_ExecutionFailedViaReturnCode(self):
965950
],
966951
)
967952

968-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
969-
"If this test passes, please remove this mark.", strict=True)
970953
def testLauncher_with_CustomDriver_NewSpan(self):
971954
self.reloadPipelineWithNewRunId()
972955
test_launcher = launcher.Launcher(
@@ -1019,8 +1002,6 @@ def testLauncher_with_CustomDriver_NewSpan(self):
10191002
],
10201003
)
10211004

1022-
@pytest.mark.xfail(run=False, reason="PR 6889 This test fails and needs to be fixed. "
1023-
"If this test passes, please remove this mark.", strict=True)
10241005
def testLauncher_with_CustomDriver_ExistingSpan(self):
10251006
LauncherTest.fakeExampleGenOutput(self._mlmd_connection, self._example_gen,
10261007
2, 1)

0 commit comments

Comments
 (0)