Skip to content

Commit

Permalink
Cleanup, fix call, add TestRoiAlign, add json file
Browse files Browse the repository at this point in the history
  • Loading branch information
NicolasHug committed Oct 19, 2023
1 parent 0eb13d2 commit 71c397e
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 16 deletions.
2 changes: 0 additions & 2 deletions pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,3 @@ addopts =
testpaths =
test
xfail_strict = True
markers =
opcheck_only_one: only opcheck one parametrization
1 change: 1 addition & 0 deletions test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ def pytest_configure(config):
config.addinivalue_line("markers", "needs_cuda: mark for tests that rely on a CUDA device")
config.addinivalue_line("markers", "needs_mps: mark for tests that rely on a MPS device")
config.addinivalue_line("markers", "dont_collect: mark for tests that should not be collected")
config.addinivalue_line("markers", "opcheck_only_one: only opcheck one parametrization")


def pytest_collection_modifyitems(items):
Expand Down
41 changes: 27 additions & 14 deletions test/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
from functools import lru_cache
from itertools import product
from typing import Callable, List, Tuple
import unittest

import numpy as np
import pytest
Expand All @@ -21,6 +20,14 @@
from torchvision.models.feature_extraction import get_graph_node_names


OPTESTS = [
"test_schema",
"test_autograd_registration",
"test_faketensor",
"test_aot_dispatch_dynamic",
]


# Context manager for setting deterministic flag and automatically
# resetting it to its original value
class DeterministicGuard:
Expand Down Expand Up @@ -464,9 +471,10 @@ def test_boxes_shape(self):

@pytest.mark.parametrize("aligned", (True, False))
@pytest.mark.parametrize("device", cpu_and_cuda_and_mps())
@pytest.mark.parametrize("x_dtype", (torch.float16, torch.float32, torch.float64), ids=str)
@pytest.mark.parametrize("x_dtype", (torch.float16, torch.float32, torch.float64)) # , ids=str)
@pytest.mark.parametrize("contiguous", (True, False))
@pytest.mark.parametrize("deterministic", (True, False))
@pytest.mark.opcheck_only_one()
def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois_dtype=None):
if deterministic and device == "cpu":
pytest.skip("cpu is always deterministic, don't retest")
Expand All @@ -484,6 +492,7 @@ def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois
@pytest.mark.parametrize("deterministic", (True, False))
@pytest.mark.parametrize("x_dtype", (torch.float, torch.half))
@pytest.mark.parametrize("rois_dtype", (torch.float, torch.half))
@pytest.mark.opcheck_only_one()
def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype):
with torch.cuda.amp.autocast():
self.test_forward(
Expand All @@ -499,6 +508,7 @@ def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype):
@pytest.mark.parametrize("device", cpu_and_cuda_and_mps())
@pytest.mark.parametrize("contiguous", (True, False))
@pytest.mark.parametrize("deterministic", (True, False))
@pytest.mark.opcheck_only_one()
def test_backward(self, seed, device, contiguous, deterministic):
if deterministic and device == "cpu":
pytest.skip("cpu is always deterministic, don't retest")
Expand All @@ -513,6 +523,7 @@ def _make_rois(self, img_size, num_imgs, dtype, num_rois=1000):
@pytest.mark.parametrize("aligned", (True, False))
@pytest.mark.parametrize("scale, zero_point", ((1, 0), (2, 10), (0.1, 50)))
@pytest.mark.parametrize("qdtype", (torch.qint8, torch.quint8, torch.qint32))
@pytest.mark.opcheck_only_one()
def test_qroialign(self, aligned, scale, zero_point, qdtype):
"""Make sure quantized version of RoIAlign is close to float version"""
pool_size = 5
Expand Down Expand Up @@ -582,6 +593,15 @@ def test_jit_boxes_list(self):
self._helper_jit_boxes_list(model)


optests.generate_opcheck_tests(
testcase=TestRoIAlign,
namespaces=["torchvision"],
failures_dict_path="test/optests_failures_dict.json",
additional_decorators=[],
test_utils=OPTESTS,
)


class TestPSRoIAlign(RoIOpTester):
mps_backward_atol = 5e-2

Expand Down Expand Up @@ -837,20 +857,13 @@ def test_batched_nms_implementations(self, seed):
empty = torch.empty((0,), dtype=torch.int64)
torch.testing.assert_close(empty, ops.batched_nms(empty, None, None, None))

data_dependent_torchvision_test_checks = [
"test_schema",
"test_autograd_registration",
"test_faketensor",
"test_aot_dispatch_dynamic",
]

optests.generate_opcheck_tests(
TestNMS,
["torchvision"],
{},
"test/test_ops.py",
[],
data_dependent_torchvision_test_checks,
testcase=TestNMS,
namespaces=["torchvision"],
failures_dict_path="test/optests_failures_dict.json",
additional_decorators=[],
test_utils=OPTESTS,
)


Expand Down

0 comments on commit 71c397e

Please sign in to comment.