86
86
# TODO: document these and remove them from here.
87
87
88
88
coverage_ignore_functions = [
89
+ # torch
90
+ "typename" ,
89
91
# torch.autograd
90
92
"register_py_tensor_class_for_device" ,
91
93
"variable" ,
129
131
"execWrapper" ,
130
132
# torch.onnx
131
133
"unregister_custom_op_symbolic" ,
134
+ # torch.ao.quantization
135
+ "default_eval_fn" ,
136
+ # torch.ao.quantization.fx.backend_config
137
+ "validate_backend_config_dict" ,
138
+ # torch.backends
139
+ "disable_global_flags" ,
140
+ "flags_frozen" ,
141
+ # torch.distributed.algorithms.ddp_comm_hooks
142
+ "register_ddp_comm_hook" ,
143
+ # torch.nn
144
+ "factory_kwargs" ,
145
+ # torch.nn.parallel
146
+ "DistributedDataParallelCPU" ,
147
+ # torch.utils
148
+ "set_module" ,
149
+ # torch.utils.model_dump
150
+ "burn_in_info" ,
151
+ "get_info_and_burn_skeleton" ,
152
+ "get_inline_skeleton" ,
153
+ "get_model_info" ,
154
+ "get_storage_info" ,
155
+ "hierarchical_pickle" ,
132
156
]
133
157
134
158
coverage_ignore_classes = [
159
+ # torch
160
+ "FatalError" ,
161
+ "QUInt2x4Storage" ,
162
+ "Size" ,
163
+ "Storage" ,
164
+ "Stream" ,
165
+ "Tensor" ,
166
+ "finfo" ,
167
+ "iinfo" ,
168
+ "qscheme" ,
135
169
# torch.cuda
136
170
"BFloat16Storage" ,
137
171
"BFloat16Tensor" ,
197
231
# torch.onnx
198
232
"CheckerError" ,
199
233
"ExportTypes" ,
234
+ # torch.backends
235
+ "ContextProp" ,
236
+ "PropModule" ,
237
+ # torch.backends.cuda
238
+ "cuBLASModule" ,
239
+ "cuFFTPlanCache" ,
240
+ "cuFFTPlanCacheAttrContextProp" ,
241
+ "cuFFTPlanCacheManager" ,
242
+ # torch.distributed.algorithms.ddp_comm_hooks
243
+ "DDPCommHookType" ,
244
+ # torch.jit.mobile
245
+ "LiteScriptModule" ,
246
+ # torch.nn.quantized.modules
247
+ "DeQuantize" ,
248
+ "Quantize" ,
249
+ # torch.utils.backcompat
250
+ "Warning" ,
200
251
]
201
252
202
- # List of modules that do not have automodule/py:module in the doc yet
203
- # We should NOT add anything to this list, see the CI failure message
204
- # on how to solve missing automodule issues
205
- coverage_missing_automodule = [
206
- "torch" ,
207
- "torch.ao" ,
208
- "torch.ao.nn" ,
209
- "torch.ao.nn.sparse" ,
210
- "torch.ao.nn.sparse.quantized" ,
211
- "torch.ao.nn.sparse.quantized.dynamic" ,
212
- "torch.ao.ns" ,
213
- "torch.ao.ns.fx" ,
214
- "torch.ao.quantization" ,
215
- "torch.ao.quantization.fx" ,
216
- "torch.ao.quantization.fx.backend_config" ,
217
- "torch.ao.sparsity" ,
218
- "torch.ao.sparsity.experimental" ,
219
- "torch.ao.sparsity.experimental.pruner" ,
220
- "torch.ao.sparsity.scheduler" ,
221
- "torch.ao.sparsity.sparsifier" ,
222
- "torch.backends" ,
223
- "torch.backends.cuda" ,
224
- "torch.backends.cudnn" ,
225
- "torch.backends.mkl" ,
226
- "torch.backends.mkldnn" ,
227
- "torch.backends.openmp" ,
228
- "torch.backends.quantized" ,
229
- "torch.backends.xnnpack" ,
230
- "torch.contrib" ,
231
- "torch.cpu" ,
232
- "torch.cpu.amp" ,
233
- "torch.distributed.algorithms" ,
234
- "torch.distributed.algorithms.ddp_comm_hooks" ,
235
- "torch.distributed.algorithms.model_averaging" ,
236
- "torch.distributed.elastic" ,
237
- "torch.distributed.elastic.utils" ,
238
- "torch.distributed.elastic.utils.data" ,
239
- "torch.distributed.launcher" ,
240
- "torch.distributed.nn" ,
241
- "torch.distributed.nn.api" ,
242
- "torch.distributed.nn.jit" ,
243
- "torch.distributed.nn.jit.templates" ,
244
- "torch.distributed.pipeline" ,
245
- "torch.distributed.pipeline.sync" ,
246
- "torch.distributed.pipeline.sync.skip" ,
247
- "torch.fft" ,
248
- "torch.for_onnx" ,
249
- "torch.fx.experimental" ,
250
- "torch.fx.experimental.unification" ,
251
- "torch.fx.experimental.unification.multipledispatch" ,
252
- "torch.fx.passes" ,
253
- "torch.jit.mobile" ,
254
- "torch.nn" ,
255
- "torch.nn.backends" ,
256
- "torch.nn.intrinsic" ,
257
- "torch.nn.intrinsic.modules" ,
258
- "torch.nn.intrinsic.qat" ,
259
- "torch.nn.intrinsic.qat.modules" ,
260
- "torch.nn.intrinsic.quantized" ,
261
- "torch.nn.intrinsic.quantized.dynamic" ,
262
- "torch.nn.intrinsic.quantized.dynamic.modules" ,
263
- "torch.nn.intrinsic.quantized.modules" ,
264
- "torch.nn.modules" ,
265
- "torch.nn.parallel" ,
266
- "torch.nn.qat" ,
267
- "torch.nn.qat.modules" ,
268
- "torch.nn.qat.dynamic" ,
269
- "torch.nn.qat.dynamic.modules" ,
270
- "torch.nn.quantizable" ,
271
- "torch.nn.quantizable.modules" ,
272
- "torch.nn.quantized" ,
273
- "torch.nn.quantized.dynamic" ,
274
- "torch.nn.quantized.dynamic.modules" ,
275
- "torch.nn.quantized.modules" ,
276
- "torch.nn.utils" ,
277
- "torch.package" ,
278
- "torch.package.analyze" ,
279
- "torch.quantization" ,
280
- "torch.quantization.fx" ,
281
- "torch.sparse" ,
282
- "torch.special" ,
283
- "torch.utils" ,
284
- "torch.utils.backcompat" ,
285
- "torch.utils.benchmark.examples" ,
286
- "torch.utils.benchmark.op_fuzzers" ,
287
- "torch.utils.benchmark.utils" ,
288
- "torch.utils.benchmark.utils.valgrind_wrapper" ,
289
- "torch.utils.bottleneck" ,
290
- "torch.utils.data.communication" ,
291
- "torch.utils.data.datapipes" ,
292
- "torch.utils.data.datapipes.dataframe" ,
293
- "torch.utils.data.datapipes.iter" ,
294
- "torch.utils.data.datapipes.map" ,
295
- "torch.utils.data.datapipes.utils" ,
296
- "torch.utils.ffi" ,
297
- "torch.utils.hipify" ,
298
- "torch.utils.model_dump" ,
299
- "torch.utils.tensorboard" ,
300
- ]
301
-
302
-
303
253
# The suffix(es) of source filenames.
304
254
# You can specify multiple suffix as a list of string:
305
255
#
@@ -417,6 +367,11 @@ def coverage_post_process(app, exception):
417
367
if not isinstance (app .builder , CoverageBuilder ):
418
368
return
419
369
370
+ if not torch .distributed .is_available ():
371
+ raise RuntimeError ("The coverage tool cannot run with a version "
372
+ "of PyTorch that was built with USE_DISTRIBUTED=0 "
373
+ "as this module's API changes." )
374
+
420
375
# These are all the modules that have "automodule" in an rst file
421
376
# These modules are the ones for which coverage is checked
422
377
# Here, we make sure that no module is missing from that list
@@ -443,26 +398,16 @@ def is_not_internal(modname):
443
398
if modname not in modules :
444
399
missing .add (modname )
445
400
446
- expected = set (coverage_missing_automodule )
447
-
448
401
output = []
449
402
450
- unexpected_missing = missing - expected
451
- if unexpected_missing :
452
- mods = ", " .join (unexpected_missing )
403
+ if missing :
404
+ mods = ", " .join (missing )
453
405
output .append (f"\n You added the following module(s) to the PyTorch namespace '{ mods } ' "
454
406
"but they have no corresponding entry in a doc .rst file. You should "
455
407
"either make sure that the .rst file that contains the module's documentation "
456
408
"properly contains either '.. automodule:: mod_name' (if you do not want "
457
- "the paragraph added by the automodule, you can simply use py:module) or "
458
- "make the module private (by appending an '_' at the beginning of its name." )
459
-
460
- unexpected_not_missing = expected - missing
461
- if unexpected_not_missing :
462
- mods = ", " .join (unexpected_not_missing )
463
- output .append (f"\n Thank you for adding the missing .rst entries for '{ mods } ', please update "
464
- "the 'coverage_missing_automodule' in 'torch/docs/source/conf.py' to remove "
465
- "the module(s) you fixed and make sure we do not regress on this in the future." )
409
+ "the paragraph added by the automodule, you can simply use '.. py:module:: mod_name') "
410
+ " or make the module private (by appending an '_' at the beginning of its name)." )
466
411
467
412
# The output file is hard-coded by the coverage tool
468
413
# Our CI is setup to fail if any line is added to this file
0 commit comments