Skip to content

Commit 351d73b

Browse files
cool-RRpytorchmergebot
authored andcommitted
Fix exception causes all over the codebase (pytorch#90271)
This is the continuation to pytorch#90134 and hopefully the final PR in this series. Pull Request resolved: pytorch#90271 Approved by: https://github.com/kit1980
1 parent 8f079b8 commit 351d73b

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

63 files changed

+150
-147
lines changed

benchmarks/dynamo/common.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1001,8 +1001,8 @@ def validate_model(self, model, example_inputs):
10011001

10021002
try:
10031003
self.model_iter_fn(model, example_inputs)
1004-
except Exception:
1005-
raise NotImplementedError("Eager model failed to run")
1004+
except Exception as e:
1005+
raise NotImplementedError("Eager model failed to run") from e
10061006

10071007
def maybe_cast(self, model, example_inputs):
10081008
model = copy.deepcopy(model)

benchmarks/functional_autograd_benchmark/torchaudio_models.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -330,8 +330,9 @@ def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
330330
super(TransformerModel, self).__init__()
331331
try:
332332
from torch.nn import TransformerEncoder, TransformerEncoderLayer
333-
except Exception:
334-
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
333+
except Exception as e:
334+
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or '
335+
'lower.') from e
335336
self.model_type = 'Transformer'
336337
self.src_mask = None
337338
self.pos_encoder = PositionalEncoding(ninp, dropout)

caffe2/python/caffe_translator.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -210,9 +210,9 @@ def TranslateLayer(cls, layer, pretrained_blobs, is_test, **kwargs):
210210
try:
211211
caffe_ops, params = cls.registry_[layer.type](
212212
layer, pretrained_blobs, is_test, **kwargs)
213-
except KeyError:
213+
except KeyError as e:
214214
raise KeyError('No translator registered for layer: %s yet.' %
215-
str(layer))
215+
str(layer)) from e
216216
if caffe_ops is None:
217217
caffe_ops = []
218218
if type(caffe_ops) is not list:

caffe2/python/core.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -970,7 +970,7 @@ def DoGradientAccumulation(self, fwd_op_idx):
970970
input_name,
971971
err
972972
)
973-
)
973+
) from err
974974

975975
# Finally, let's create the sum operator.
976976
sum_ops, g = self._MakeSumOps(input_name, input_version)
@@ -1175,7 +1175,7 @@ def GetGradientForOp(cls, op, g_output):
11751175
raise Exception(
11761176
"Exception when creating gradient for [{}]:{}.\nOp: \n{}".
11771177
format(op.type, e, str(op))
1178-
)
1178+
) from e
11791179

11801180
if gradient_ops is None:
11811181
return [], g_input

caffe2/python/model_helper.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -540,17 +540,17 @@ def ExtractPredictorNet(
540540
'StopGradient'
541541
]
542542
)
543-
except ValueError:
544-
raise Exception("No ops with input={}".format(input_blobs))
543+
except ValueError as e:
544+
raise Exception("No ops with input={}".format(input_blobs)) from e
545545
try:
546546
last_op_with_output = max(
547547
[
548548
j for j in range(len(ops))
549549
if output_blobs.intersection(ops[j].output)
550550
]
551551
)
552-
except ValueError:
553-
raise Exception("No ops with output={}".format(output_blobs))
552+
except ValueError as e:
553+
raise Exception("No ops with output={}".format(output_blobs)) from e
554554

555555
def validate_op(op):
556556
# Check that the op does not have is_test = 0 set. This is a common

caffe2/python/models/download.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -69,10 +69,10 @@ def downloadFromURLToFile(url, filename, show_progress=True):
6969
print("") # New line to fix for progress bar
7070
except HTTPError as e:
7171
raise Exception("Could not download model. [HTTP Error] {code}: {reason}."
72-
.format(code=e.code, reason=e.reason))
72+
.format(code=e.code, reason=e.reason)) from e
7373
except URLError as e:
7474
raise Exception("Could not download model. [URL Error] {reason}."
75-
.format(reason=e.reason))
75+
.format(reason=e.reason)) from e
7676

7777

7878
def getURLFromName(name, filename):

caffe2/python/operator_test/roi_align_rotated_op_test.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -150,9 +150,9 @@ def roialign_flip(m, axis):
150150
indexer = [slice(None)] * m.ndim
151151
try:
152152
indexer[axis] = slice(None, None, -1)
153-
except IndexError:
153+
except IndexError as e:
154154
raise ValueError("axis=%i is invalid for the %i-dimensional input array"
155-
% (axis, m.ndim))
155+
% (axis, m.ndim)) from e
156156
return m[tuple(indexer)]
157157

158158
def roialign_ref(X, R):

caffe2/python/operator_test/video_input_op_test.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@
1313

1414
try:
1515
import lmdb
16-
except ImportError:
17-
raise unittest.SkipTest("python-lmdb is not installed")
16+
except ImportError as e:
17+
raise unittest.SkipTest("python-lmdb is not installed") from e
1818

1919

2020
class VideoInputOpTest(unittest.TestCase):

caffe2/python/schema.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -546,8 +546,8 @@ def __getattr__(self, item):
546546
raise AttributeError(item)
547547
try:
548548
return super(Struct, self).__getattribute__("fields")[item]
549-
except KeyError:
550-
raise AttributeError(item)
549+
except KeyError as e:
550+
raise AttributeError(item) from e
551551

552552
def __setattr__(self, key, value):
553553
# Disable setting attributes after initialization to prevent false

caffe2/python/trt/transform.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ def _get_output_shapes(output_value_infos):
2929
def check_gpu_():
3030
try:
3131
C.get_cuda_version()
32-
except Exception as _:
33-
raise Exception("TensorRT related functions require CUDA support")
32+
except Exception as e:
33+
raise Exception("TensorRT related functions require CUDA support") from e
3434

3535
def convert_onnx_model_to_trt_op(onnx_model,
3636
max_batch_size=64,

setup.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -446,8 +446,8 @@ def build_deps():
446446
def check_pydep(importname, module):
447447
try:
448448
importlib.import_module(importname)
449-
except ImportError:
450-
raise RuntimeError(missing_pydep.format(importname=importname, module=module))
449+
except ImportError as e:
450+
raise RuntimeError(missing_pydep.format(importname=importname, module=module)) from e
451451

452452

453453
class build_ext(setuptools.command.build_ext.build_ext):

test/distributed/fsdp/test_fsdp_state_dict.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -611,8 +611,8 @@ def _initialize_model(
611611
def _state_dict(model: Module, state_dict_type: str):
612612
try:
613613
enum_val = STATE_DICT_MAPPING[state_dict_type]
614-
except KeyError:
615-
raise ValueError(f"No state_dict type for {state_dict_type}")
614+
except KeyError as e:
615+
raise ValueError(f"No state_dict type for {state_dict_type}") from e
616616

617617
with FSDP.state_dict_type(model, enum_val):
618618
return model.state_dict()
@@ -623,8 +623,8 @@ def _load_state_dict(
623623
):
624624
try:
625625
enum_val = STATE_DICT_MAPPING[state_dict_type]
626-
except KeyError:
627-
raise ValueError(f"No state_dict for {state_dict_type}")
626+
except KeyError as e:
627+
raise ValueError(f"No state_dict for {state_dict_type}") from e
628628

629629
with FSDP.state_dict_type(model, enum_val):
630630
return model.load_state_dict(state_dict, strict=True)

test/distributed/test_c10d_nccl.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -2598,7 +2598,7 @@ def test_nccl_timeout(self):
25982598
try:
25992599
pg_gloo.barrier().wait()
26002600
except Exception as e:
2601-
raise ValueError(f"Rank {self.rank} barrier timed out waiting for rank 0 with error: {str(e)}")
2601+
raise ValueError(f"Rank {self.rank} barrier timed out waiting for rank 0 with error: {str(e)}") from e
26022602
# Now verify communicators on this rank have
26032603
# been aborted by watchdog.
26042604
self._wait_for_comm_abort(process_group, failed_collective_timeout)

test/distributed/test_dynamo_distributed.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -101,8 +101,8 @@ def get_hf_bert(rank):
101101
# in a multiprocessing test
102102
try:
103103
from transformers import BertConfig, AutoModelForMaskedLM
104-
except ImportError:
105-
raise unittest.SkipTest("Unable to import transformers")
104+
except ImportError as e:
105+
raise unittest.SkipTest("Unable to import transformers") from e
106106

107107
batch_size, max_length, config, device = 4, 512, BertConfig(), f"cuda:{rank}"
108108
model = AutoModelForMaskedLM.from_config(config).to(device)

test/dynamo/test_repros.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1856,8 +1856,8 @@ def __init__(
18561856
def __getattr__(self, item: str):
18571857
try:
18581858
return self.data[item]
1859-
except KeyError:
1860-
raise AttributeError
1859+
except KeyError as e:
1860+
raise AttributeError from e
18611861

18621862
def tokenization(x):
18631863
encoding = BatchEncoding({"key": x})

test/inductor/test_torchinductor.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@
6565
sys.stderr.write(f"{type(e)}: {e}\n")
6666
if __name__ == "__main__":
6767
sys.exit(0)
68-
raise unittest.SkipTest("requires sympy/functorch/filelock")
68+
raise unittest.SkipTest("requires sympy/functorch/filelock") from e
6969

7070
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
7171

test/jit/test_dtype_analysis.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -128,9 +128,9 @@ def assert_dtype_equal(self, fn, in_shapes, in_dtypes):
128128
inputs = [self.get_rand_tensor(s, d) for s, d in zip(in_shapes, in_dtypes)]
129129
try:
130130
self.assert_dtype_equal_custom_args(fn, inputs)
131-
except Exception:
131+
except Exception as e:
132132
fail_text = f"Failed for shapes {in_shapes}, and dtypes {in_dtypes}"
133-
raise AssertionError(fail_text)
133+
raise AssertionError(fail_text) from e
134134

135135
def assert_dtype_equal_custom_args(self, fn, args):
136136
try:

test/lazy/test_extract_compiled_graph.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ def verify_reusing_compiled_graph(mod, exception_msg_pattern, ncase=10):
141141
raise e # reraise the exception
142142
exception_message = str(e)
143143
if not re.search(exception_msg_pattern, exception_message):
144-
raise RuntimeError(f"Exception message does not match the required pattern: {exception_message}")
144+
raise RuntimeError(f"Exception message does not match the required pattern: {exception_message}") from e
145145
else:
146146
# We are done for the test case that expects an exception
147147
return

test/onnx/test_pytorch_onnx_onnxruntime_cuda.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -110,8 +110,8 @@ def forward(self, x):
110110

111111
try:
112112
from apex import amp
113-
except Exception:
114-
raise unittest.SkipTest("Apex is not available")
113+
except Exception as e:
114+
raise unittest.SkipTest("Apex is not available") from e
115115
input = torch.randn(3, 3, device=torch.device("cuda"))
116116
model = amp.initialize(LinearModel(), opt_level="O2")
117117
self.run_test(model, input)

test/test_autograd.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1703,7 +1703,7 @@ def coro_enable_grad(n=10):
17031703
self.assertTrue(torch.is_grad_enabled())
17041704
yield (-i if has_raised else i)
17051705

1706-
except UnrecoverableException:
1706+
except UnrecoverableException :
17071707
self.assertTrue(torch.is_grad_enabled())
17081708
raise SecondaryException
17091709

test/test_fx.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3805,7 +3805,7 @@ def test_class_member_back_compat(self):
38053805
f"unintended, please revert it. If it was intended, check with the FX " \
38063806
f"team to ensure that the proper deprecation protocols have been followed " \
38073807
f"and subsequently --accept the change."
3808-
raise AssertionError(msg)
3808+
raise AssertionError(msg) from e
38093809

38103810
def test_public_api_surface(self):
38113811
non_back_compat_objects = {}

0 commit comments

Comments
 (0)