Skip to content

Commit 046e88a

Browse files
XuehaiPanpytorchmergebot
authored andcommitted
[BE] [3/3] Rewrite super() calls in test (pytorch#94592)
Rewrite Python built-in class `super()` calls. Only non-semantic changes should be applied. - pytorch#94587 - pytorch#94588 - pytorch#94592 Also, methods with only a `super()` call are removed: ```diff class MyModule(nn.Module): - def __init__(self): - super().__init__() - def forward(self, ...): ... ``` Some cases that change the semantics should be kept unchanged. E.g.: https://github.com/pytorch/pytorch/blob/f152a79be9612b824e1672b8f8cb88a414ce4c12/caffe2/python/net_printer.py#L184-L190 https://github.com/pytorch/pytorch/blob/f152a79be9612b824e1672b8f8cb88a414ce4c12/test/test_jit_fuser_te.py#L2628-L2635 Pull Request resolved: pytorch#94592 Approved by: https://github.com/ezyang, https://github.com/seemethere
1 parent bdd8f51 commit 046e88a

File tree

190 files changed

+1026
-2238
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

190 files changed

+1026
-2238
lines changed

test/bottleneck_test/test_cuda.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
class Model(nn.Module):
88
def __init__(self):
9-
super(Model, self).__init__()
9+
super().__init__()
1010
self.linear = nn.Linear(20, 20)
1111

1212
def forward(self, input):

test/cpp/jit/test_exception.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ TEST(TestException, TestCustomException) {
113113
py::exec(R"PY(
114114
class SimpleValueError(ValueError):
115115
def __init__(self, message):
116-
super(SimpleValueError, self).__init__(message)
116+
super().__init__(message)
117117
)PY");
118118

119119
std::string pythonCode = R"PY(

test/cpp/jit/test_lite_interpreter.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1157,7 +1157,7 @@ TEST(RunTimeTest, ParseOperator) {
11571157

11581158
// class Add(torch.nn.Module):
11591159
// def __init__(self):
1160-
// super(Add, self).__init__()
1160+
// super().__init__()
11611161

11621162
// def forward(self, a, b):
11631163
// return a + b

test/cpp/jit/tests_setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ class EvalModeForLoadedModule(FileSetup):
2626
def setup(self):
2727
class Model(torch.jit.ScriptModule):
2828
def __init__(self):
29-
super(Model, self).__init__()
29+
super().__init__()
3030
self.dropout = torch.nn.Dropout(0.1)
3131

3232
@torch.jit.script_method

test/cpp/lite_interpreter_runtime/test_lite_interpreter_runtime.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -21,21 +21,21 @@ TEST(RunTimeTest, LoadAndForward) {
2121
// sequence.ptl source code:
2222
// class A(torch.nn.Module):
2323
// def __init__(self):
24-
// super(A, self).__init__()
24+
// super().__init__()
2525
//
2626
// def forward(self, x):
2727
// return x + 1
2828
//
2929
// class B(torch.nn.Module):
3030
// def __init__(self):
31-
// super(B, self).__init__()
31+
// super().__init__()
3232
//
3333
// def forward(self, x):
3434
// return x + 2
3535
//
3636
// class C(torch.nn.Module):
3737
// def __init__(self):
38-
// super(C, self).__init__()
38+
// super().__init__()
3939
// self.A0 = A()
4040
// self.B0 = B()
4141
//

test/cpp_api_parity/sample_module.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
class SampleModule(torch.nn.Module):
1515
def __init__(self, has_parity, has_submodule):
16-
super(SampleModule, self).__init__()
16+
super().__init__()
1717
self.has_parity = has_parity
1818
if has_submodule:
1919
self.submodule = SampleModule(self.has_parity, False)

test/create_dummy_torchscript_model.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
class NeuralNetwork(nn.Module):
88

99
def __init__(self):
10-
super(NeuralNetwork, self).__init__()
10+
super().__init__()
1111
self.flatten = nn.Flatten()
1212
self.linear_relu_stack = nn.Sequential(
1313
nn.Linear(28 * 28, 512),

test/custom_backend/backend.py

-3
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,6 @@ class Model(torch.nn.Module):
4343
and executing in C++.
4444
"""
4545

46-
def __init__(self):
47-
super(Model, self).__init__()
48-
4946
def forward(self, a, b):
5047
return (a + b, a - b)
5148

test/custom_operator/model.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ def get_custom_op_library_path():
1919

2020
class Model(torch.jit.ScriptModule):
2121
def __init__(self):
22-
super(Model, self).__init__()
22+
super().__init__()
2323
self.p = torch.nn.Parameter(torch.eye(5))
2424

2525
@torch.jit.script_method

test/distributed/_composable/test_replicate.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
class Net(nn.Module):
1616
def __init__(self):
17-
super(Net, self).__init__()
17+
super().__init__()
1818
self.fc1 = nn.Linear(2, 10, bias=False)
1919
self.fc2 = nn.Linear(10, 50, bias=False)
2020
self.fc3 = nn.Linear(50, 4, bias=False)

test/distributed/_shard/sharded_optim/test_sharded_optim.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929

3030
class MyShardedModel(torch.nn.Module):
3131
def __init__(self, spec=None, group=None):
32-
super(MyShardedModel, self).__init__()
32+
super().__init__()
3333
# Use same seed.
3434
torch.manual_seed(0)
3535
self.param = torch.nn.Parameter(torch.rand(5, 10))
@@ -47,7 +47,7 @@ def forward(self, input):
4747

4848
class MyShardedLinear(torch.nn.Module):
4949
def __init__(self, rank=None):
50-
super(MyShardedLinear, self).__init__()
50+
super().__init__()
5151
# Use same seed.
5252
torch.manual_seed(0)
5353
self.linear1 = torch.nn.Linear(17, 12)

test/distributed/_tensor/test_dtensor.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
class DummyMLP(torch.nn.Module):
2020
def __init__(self, device):
21-
super(DummyMLP, self).__init__()
21+
super().__init__()
2222
self.net1 = torch.nn.Linear(5, 1024, device=device)
2323
self.relu = torch.nn.ReLU()
2424
self.net2 = torch.nn.Linear(1024, 4, device=device)

test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def gpus_for_rank(world_size):
4343

4444
class Task(nn.Module):
4545
def __init__(self):
46-
super(Task, self).__init__()
46+
super().__init__()
4747
torch.manual_seed(0)
4848
self.p = nn.Parameter(torch.randn(40, 20))
4949

@@ -62,7 +62,7 @@ def forward(self, x, rank):
6262

6363
class DistributedDataParallelCommHookTest(MultiProcessTestCase):
6464
def setUp(self):
65-
super(DistributedDataParallelCommHookTest, self).setUp()
65+
super().setUp()
6666
self._spawn_processes()
6767

6868
def tearDown(self):

test/distributed/algorithms/quantization/test_quantization.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -43,12 +43,12 @@ def _build_tensor(size, value=None, dtype=torch.float, device_id=None):
4343
class DistQuantizationTests(MultiProcessTestCase):
4444

4545
def setUp(self):
46-
super(DistQuantizationTests, self).setUp()
46+
super().setUp()
4747
self._spawn_processes()
4848
torch.backends.cudnn.flags(enabled=True, allow_tf32=False).__enter__()
4949

5050
def tearDown(self):
51-
super(DistQuantizationTests, self).tearDown()
51+
super().tearDown()
5252
try:
5353
os.remove(self.file_name)
5454
except OSError:

test/distributed/algorithms/test_join.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ class AllReducer(Joinable):
8383
per-iteration collective communication.
8484
"""
8585
def __init__(self, device, process_group):
86-
super(AllReducer, self).__init__()
86+
super().__init__()
8787
self.device = device
8888
self.process_group = process_group
8989
self.post_hook_tensor = torch.tensor([BEFORE_CONSTANT], device=self.device)
@@ -139,7 +139,7 @@ def find_common_rank(self, rank, to_consider):
139139
class TestJoin(MultiProcessTestCase):
140140
r"""Test cases for the generic join context."""
141141
def setUp(self):
142-
super(TestJoin, self).setUp()
142+
super().setUp()
143143
os.environ["WORLD_SIZE"] = str(self.world_size)
144144
os.environ["BACKEND"] = BACKEND
145145
self._spawn_processes()

test/distributed/checkpoint/test_2d_fsdp_dt_checkpoint.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939

4040
class SimpleModel(torch.nn.Module):
4141
def __init__(self):
42-
super(SimpleModel, self).__init__()
42+
super().__init__()
4343
self.net1 = torch.nn.Linear(5, 8)
4444
self.relu = torch.nn.ReLU()
4545
self.net2 = torch.nn.Linear(8, 4)

test/distributed/checkpoint/test_checkpoint.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ def _fail_rank_async(self, name, result=None):
185185

186186
class FaultyStorageWriter(TestStorageBase, StorageWriter):
187187
def __init__(self, fail_conf):
188-
super(FaultyStorageWriter, self).__init__(fail_conf)
188+
super().__init__(fail_conf)
189189

190190
def set_up_storage_writer(self, is_coordinator: bool) -> None:
191191
self._fail_rank("fail_set_up_storage_writer")
@@ -212,7 +212,7 @@ def finish(
212212

213213
class FaultyStorageReader(TestStorageBase, StorageReader):
214214
def __init__(self, metadata, fail_conf):
215-
super(FaultyStorageReader, self).__init__(fail_conf)
215+
super().__init__(fail_conf)
216216
self.metadata = metadata
217217

218218
def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None:

test/distributed/checkpoint/test_file_system_checkpoint.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def __init__(
101101
self,
102102
spec: ShardingSpec,
103103
) -> None:
104-
super(MyShardedModel3, self).__init__()
104+
super().__init__()
105105
self.sharded_tensor: ShardedTensor = sharded_tensor.rand(
106106
spec, 10, 20, init_rrefs=False
107107
)

test/distributed/checkpoint/test_file_system_checkpoint_cpu.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def __init__(
100100
self,
101101
spec: ShardingSpec,
102102
) -> None:
103-
super(MyShardedModel3, self).__init__()
103+
super().__init__()
104104
self.sharded_tensor: ShardedTensor = sharded_tensor.rand(
105105
spec, 10, 20, init_rrefs=False
106106
)

test/distributed/fsdp/test_checkpoint_wrapper.py

-3
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,6 @@
2222

2323

2424
class CheckpointWrapperTest(TestCase):
25-
def setUp(self):
26-
super().setUp()
27-
2825
def test_load_activation_checkpointed_module(self):
2926
lin = nn.Linear(10, 10, bias=False)
3027
lin = checkpoint_wrapper(

test/distributed/fsdp/test_fsdp_mixed_precision.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -664,7 +664,7 @@ def test_grads_reduced_precision(self):
664664
def test_mp_batchnorm(self, convert_sync_bn):
665665
class BatchNormNet(nn.Module):
666666
def __init__(self, affine=True):
667-
super(BatchNormNet, self).__init__()
667+
super().__init__()
668668
self.fc1 = nn.Linear(2, 40, bias=False)
669669
self.bn = nn.BatchNorm1d(4, affine=affine)
670670
self.fc2 = nn.Linear(40, 4, bias=False)

test/distributed/fsdp/test_fsdp_optim_state.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,7 @@ def param_group1(self) -> List[torch.nn.Parameter]:
286286

287287
class TestFSDPOptimState(FSDPTest):
288288
def __init__(self, *args, **kwargs):
289-
super(TestFSDPOptimState, self).__init__(*args, **kwargs)
289+
super().__init__(*args, **kwargs)
290290
self._model_class = {
291291
_ModelClass.NESTED: self._init_nested_model,
292292
_ModelClass.TRANSFORMER: self._init_transformer_model,
@@ -1655,7 +1655,7 @@ def forward(self, x):
16551655
def test_with_empty_optimizer_state(self):
16561656
class TestDummyModel(torch.nn.Module):
16571657
def __init__(self):
1658-
super(TestDummyModel, self).__init__()
1658+
super().__init__()
16591659
torch.manual_seed(0)
16601660
self.net1 = nn.Sequential(nn.Linear(8, 16), nn.ReLU())
16611661
self.net2 = nn.Sequential(nn.Linear(16, 32), nn.ReLU())

test/distributed/optim/test_named_optimizer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def _run_model_training(model_optim_lists):
2828

2929
class TestDummyModel(torch.nn.Module):
3030
def __init__(self):
31-
super(TestDummyModel, self).__init__()
31+
super().__init__()
3232
torch.manual_seed(0)
3333
self.net1 = nn.Sequential(nn.Linear(8, 16), nn.ReLU())
3434
self.net2 = nn.Sequential(nn.Linear(16, 32), nn.ReLU())

test/distributed/optim/test_zero_redundancy_optimizer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def _get_backend_for_tests():
6464
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_DEV_DBG_ASAN, "CUDA + ASAN does not work.")
6565
class TestZeroRedundancyOptimizer(common_distributed.MultiProcessTestCase):
6666
def setUp(self):
67-
super(TestZeroRedundancyOptimizer, self).setUp()
67+
super().setUp()
6868
os.environ["WORLD_SIZE"] = str(self.world_size)
6969
self._spawn_processes()
7070

test/distributed/rpc/test_share_memory.py

-3
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,6 @@ def worker_fn(m):
5353
pass
5454

5555
class TestRPCPickler(TestCase):
56-
def setUp(self):
57-
super().setUp()
58-
5956
def test_case(self):
6057
os.environ['MASTER_ADDR'] = 'localhost'
6158
os.environ['MASTER_PORT'] = '29500'

test/distributed/tensor/parallel/test_2d_parallel.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929

3030
class SimpleModel(torch.nn.Module):
3131
def __init__(self):
32-
super(SimpleModel, self).__init__()
32+
super().__init__()
3333
self.net1 = torch.nn.Linear(5, 8)
3434
self.relu = torch.nn.ReLU()
3535
self.net2 = torch.nn.Linear(8, 4)

test/distributed/tensor/parallel/test_parallelize_api.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626

2727
class MLPModule(torch.nn.Module):
2828
def __init__(self, device):
29-
super(MLPModule, self).__init__()
29+
super().__init__()
3030
torch.manual_seed(5)
3131
self.net1 = torch.nn.Linear(10, 16, device=device)
3232
self.relu = torch.nn.ReLU()

test/distributed/tensor/parallel/test_tp_examples.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121
class MLPModule(torch.nn.Module):
2222
def __init__(self, device):
23-
super(MLPModule, self).__init__()
23+
super().__init__()
2424
torch.manual_seed(5)
2525
self.net1 = torch.nn.Linear(10, 16, device=device)
2626
self.relu = torch.nn.ReLU()

0 commit comments

Comments
 (0)