From 71c397ea3d40c6be569329242df8287f63e97026 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 19 Oct 2023 20:32:33 +0100 Subject: [PATCH] Cleanup, fix call, add TestRoiAlign, add json file --- pytest.ini | 2 -- test/conftest.py | 1 + test/test_ops.py | 41 +++++++++++++++++++++++++++-------------- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/pytest.ini b/pytest.ini index 18b4e4e47fa..8d52b55d5a6 100644 --- a/pytest.ini +++ b/pytest.ini @@ -11,5 +11,3 @@ addopts = testpaths = test xfail_strict = True -markers = - opcheck_only_one: only opcheck one parametrization diff --git a/test/conftest.py b/test/conftest.py index ea73b09b906..a9768598ded 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -19,6 +19,7 @@ def pytest_configure(config): config.addinivalue_line("markers", "needs_cuda: mark for tests that rely on a CUDA device") config.addinivalue_line("markers", "needs_mps: mark for tests that rely on a MPS device") config.addinivalue_line("markers", "dont_collect: mark for tests that should not be collected") + config.addinivalue_line("markers", "opcheck_only_one: only opcheck one parametrization") def pytest_collection_modifyitems(items): diff --git a/test/test_ops.py b/test/test_ops.py index 2466fc033ca..e82f93ce683 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -4,7 +4,6 @@ from functools import lru_cache from itertools import product from typing import Callable, List, Tuple -import unittest import numpy as np import pytest @@ -21,6 +20,14 @@ from torchvision.models.feature_extraction import get_graph_node_names +OPTESTS = [ + "test_schema", + "test_autograd_registration", + "test_faketensor", + "test_aot_dispatch_dynamic", +] + + # Context manager for setting deterministic flag and automatically # resetting it to its original value class DeterministicGuard: @@ -464,9 +471,10 @@ def test_boxes_shape(self): @pytest.mark.parametrize("aligned", (True, False)) @pytest.mark.parametrize("device", cpu_and_cuda_and_mps()) - @pytest.mark.parametrize("x_dtype", (torch.float16, torch.float32, torch.float64), ids=str) + @pytest.mark.parametrize("x_dtype", (torch.float16, torch.float32, torch.float64)) # , ids=str) @pytest.mark.parametrize("contiguous", (True, False)) @pytest.mark.parametrize("deterministic", (True, False)) + @pytest.mark.opcheck_only_one() def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois_dtype=None): if deterministic and device == "cpu": pytest.skip("cpu is always deterministic, don't retest") @@ -484,6 +492,7 @@ def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois @pytest.mark.parametrize("deterministic", (True, False)) @pytest.mark.parametrize("x_dtype", (torch.float, torch.half)) @pytest.mark.parametrize("rois_dtype", (torch.float, torch.half)) + @pytest.mark.opcheck_only_one() def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype): with torch.cuda.amp.autocast(): self.test_forward( @@ -499,6 +508,7 @@ def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype): @pytest.mark.parametrize("device", cpu_and_cuda_and_mps()) @pytest.mark.parametrize("contiguous", (True, False)) @pytest.mark.parametrize("deterministic", (True, False)) + @pytest.mark.opcheck_only_one() def test_backward(self, seed, device, contiguous, deterministic): if deterministic and device == "cpu": pytest.skip("cpu is always deterministic, don't retest") @@ -513,6 +523,7 @@ def _make_rois(self, img_size, num_imgs, dtype, num_rois=1000): @pytest.mark.parametrize("aligned", (True, False)) @pytest.mark.parametrize("scale, zero_point", ((1, 0), (2, 10), (0.1, 50))) @pytest.mark.parametrize("qdtype", (torch.qint8, torch.quint8, torch.qint32)) + @pytest.mark.opcheck_only_one() def test_qroialign(self, aligned, scale, zero_point, qdtype): """Make sure quantized version of RoIAlign is close to float version""" pool_size = 5 @@ -582,6 +593,15 @@ def test_jit_boxes_list(self): self._helper_jit_boxes_list(model) +optests.generate_opcheck_tests( + testcase=TestRoIAlign, + namespaces=["torchvision"], + failures_dict_path="test/optests_failures_dict.json", + additional_decorators=[], + test_utils=OPTESTS, +) + + class TestPSRoIAlign(RoIOpTester): mps_backward_atol = 5e-2 @@ -837,20 +857,13 @@ def test_batched_nms_implementations(self, seed): empty = torch.empty((0,), dtype=torch.int64) torch.testing.assert_close(empty, ops.batched_nms(empty, None, None, None)) -data_dependent_torchvision_test_checks = [ - "test_schema", - "test_autograd_registration", - "test_faketensor", - "test_aot_dispatch_dynamic", -] optests.generate_opcheck_tests( - TestNMS, - ["torchvision"], - {}, - "test/test_ops.py", - [], - data_dependent_torchvision_test_checks, + testcase=TestNMS, + namespaces=["torchvision"], + failures_dict_path="test/optests_failures_dict.json", + additional_decorators=[], + test_utils=OPTESTS, )