Skip to content

Commit 12ed66e

Browse files
authored
update flake8 to stock pytorch (#3547)
* update flake8 version. * backup lint fix. * fix C417 * fix b020
1 parent 6be715a commit 12ed66e

15 files changed

+63
-36
lines changed

Diff for: .flake8

+11
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,16 @@ ignore =
1212
B007,B008,
1313
# these ignores are from flake8-comprehensions; please fix!
1414
C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415
15+
# Ignore due to that we need decision,
16+
# B907: https://peps.python.org/pep-3101/#explicit-conversion-flag
17+
# B023: https://docs.astral.sh/ruff/rules/function-uses-loop-variable/
18+
# B905: https://docs.astral.sh/ruff/rules/zip-without-explicit-strict/
19+
B907, B023, B905
20+
# B028 is for stack level of warning. We don't want it because we only
21+
# want 1 level of stack.
22+
B028,
23+
# Mostly on CPU side, need CPU team to fix it.
24+
B031, C419
1525
per-file-ignores = __init__.py: F401,F403
1626
optional-ascii-coding = True
1727
exclude =
@@ -24,4 +34,5 @@ exclude =
2434
./tests,
2535
./scripts,
2636
./third_party,
37+
./examples,
2738
*.pyi

Diff for: .lintrunner.toml

+8-8
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,14 @@ init_command = [
1818
'run',
1919
'pip_init',
2020
'--dry-run={{DRYRUN}}',
21-
'flake8==3.8.2',
22-
'flake8-bugbear==20.1.4',
23-
'flake8-comprehensions==3.3.0',
24-
'flake8-executable==2.0.4',
21+
'flake8==6.1.0',
22+
'flake8-bugbear==23.3.23',
23+
'flake8-comprehensions==3.15.0',
24+
'flake8-executable==2.1.3',
2525
# 'git+https://github.com/malfet/flake8-coding.git',
26-
'flake8-pyi==20.5.0',
27-
'mccabe==0.6.1',
28-
'pycodestyle==2.6.0',
29-
'pyflakes==2.2.0',
26+
'flake8-pyi==23.3.1',
27+
'mccabe==0.7.0',
28+
'pycodestyle==2.11.1',
29+
'pyflakes==3.1.0',
3030
'black==24.3.0',
3131
]

Diff for: cmake/ClangFormat.cmake

+5
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,11 @@ if(BUILD_NO_CLANGFORMAT)
33
return()
44
endif()
55

6+
# Skip for WIN32 to avoid potential issue
7+
if(WIN32)
8+
return()
9+
endif()
10+
611
if(CLANGFORMAT_enabled)
712
return()
813
endif()

Diff for: intel_extension_for_pytorch/cpu/hypertune/conf/config.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -191,10 +191,10 @@ def _read_conf(self, conf_fpath):
191191
validated_conf = schema.validate(conf)
192192
return validated_conf
193193

194-
except BaseException:
194+
except BaseException as e:
195195
raise RuntimeError(
196196
"The yaml file format is not correct. Please refer to document."
197-
)
197+
) from e
198198

199199
def _convert_conf(self, src, dst):
200200
hyperparam_default_val = {"launcher": launcher_hyperparam_default_val}
@@ -246,10 +246,10 @@ def _parse_hypertune_token(line):
246246
line = lineseg.group(1)
247247
objective = ast.literal_eval(line)
248248
objective = objective_schema.validate(objective)
249-
except BaseException:
249+
except BaseException as e:
250250
raise RuntimeError(
251251
f"Parsing @hypertune failed for line {line} of {program_fpath} file"
252-
)
252+
) from e
253253
return objective
254254

255255
with Path(program_fpath).open("r") as f:

Diff for: intel_extension_for_pytorch/cpu/hypertune/objective.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -108,10 +108,10 @@ def extract_usr_objectives(self, output):
108108
if HYPERTUNE_TOKEN in s:
109109
try:
110110
objectives.append(float(output[i + 1]))
111-
except BaseException:
111+
except BaseException as e:
112112
raise RuntimeError(
113113
f"Extracting objective {output[i]} failed for {self.program} file. \
114114
Make sure to print an int/float value after the @hypertune token as \
115115
the objective value to be minimized or maximized."
116-
)
116+
) from e
117117
return objectives

Diff for: intel_extension_for_pytorch/distributed/dist.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ def init_process_group(
106106
try:
107107
import oneccl_bindings_for_pytorch # noqa
108108
except ImportError as e:
109-
raise RuntimeError("oneccl_bindings_for_pytorch is not installed!")
109+
raise RuntimeError("oneccl_bindings_for_pytorch is not installed!") from e
110110
return dist.init_process_group(
111111
backend,
112112
init_method,

Diff for: intel_extension_for_pytorch/optim/_optimizer_utils.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -267,13 +267,12 @@ def cast(param, value, key=None):
267267
)
268268

269269
# Update the state
270-
id_map = {
271-
old_id: p
272-
for old_id, p in zip(
270+
id_map = dict(
271+
zip(
273272
chain.from_iterable((g["params"] for g in saved_groups)),
274273
chain.from_iterable((g["params"] for g in groups)),
275274
)
276-
}
275+
)
277276

278277
# Copy state assigned to params (and cast tensors to appropriate types).
279278
# State that is not assigned to params is copied as is (needed for

Diff for: intel_extension_for_pytorch/xpu/cpp_extension.py

+10-4
Original file line numberDiff line numberDiff line change
@@ -433,7 +433,7 @@ def unix_wrap_ninja_compile(
433433
return objects
434434

435435
if self.compiler.compiler_type == "msvc":
436-
raise "Not implemented"
436+
raise NotImplementedError("Not implemented for ninja build!")
437437
else:
438438
if self.use_ninja:
439439
self.compiler.compile = unix_wrap_ninja_compile
@@ -1370,11 +1370,15 @@ def __init__(self):
13701370

13711371
def check_onemkl_cfg(self):
13721372
if self.__onemkl_root is None:
1373-
raise "Didn't detect mkl root. Please source <oneapi_dir>/mkl/<version>/env/vars.sh "
1373+
raise RuntimeError(
1374+
"Didn't detect mkl root. Please source <oneapi_dir>/mkl/<version>/env/vars.sh "
1375+
)
13741376

13751377
def check_onednn_cfg(self):
13761378
if self.__onednn_root is None:
1377-
raise "Didn't detect dnnl root. Please source <oneapi_dir>/dnnl/<version>/env/vars.sh "
1379+
raise RuntimeError(
1380+
"Didn't detect dnnl root. Please source <oneapi_dir>/dnnl/<version>/env/vars.sh "
1381+
)
13781382
else:
13791383
logger.warning(
13801384
"This extension has static linked onednn library. Please attaction to \
@@ -1383,7 +1387,9 @@ def check_onednn_cfg(self):
13831387

13841388
def check_dpcpp_cfg(self):
13851389
if self.__dpcpp_root is None:
1386-
raise "Didn't detect dpcpp root. Please source <oneapi_dir>/compiler/<version>/env/vars.sh "
1390+
raise RuntimeError(
1391+
"Didn't detect dpcpp root. Please source <oneapi_dir>/compiler/<version>/env/vars.sh "
1392+
)
13871393

13881394
def get_default_include_dir(self):
13891395
return [os.path.join(self.__default_root, "include")]

Diff for: scripts/tools/setup/flake8.py

+6
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
import subprocess
44
import sys
55

6+
_IS_WINDOWS = sys.platform == "win32"
7+
68

79
def check_flake8_errors(base_dir, filepath):
810
if shutil.which("flake8") is None:
@@ -51,6 +53,10 @@ def check_flake8_errors(base_dir, filepath):
5153

5254

5355
if __name__ == "__main__":
56+
if _IS_WINDOWS:
57+
print("skip flake8 check for Windows")
58+
sys.exit(0)
59+
5460
base_dir = os.path.abspath(
5561
os.path.dirname(os.path.join(os.path.abspath(__file__), "../../../../"))
5662
)

Diff for: setup.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def _get_build_target():
138138
import torch
139139
from torch.utils.cpp_extension import BuildExtension, CppExtension
140140
except ImportError as e:
141-
raise RuntimeError("Fail to import torch!")
141+
raise RuntimeError("Fail to import torch!") from e
142142

143143

144144
def _check_env_flag(name, default=""):
@@ -159,7 +159,7 @@ def create_if_not_exist(path_dir):
159159
Path(path_dir).mkdir(parents=True, exist_ok=True)
160160
except OSError as exc: # Guard against race condition
161161
if exc.errno != errno.EEXIST:
162-
raise RuntimeError("Fail to create path {}".format(path_dir))
162+
raise RuntimeError("Fail to create path {}".format(path_dir)) from e
163163

164164

165165
def get_version_num():

Diff for: tests/cpu/common_nn.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -5023,12 +5023,12 @@ def test_cuda(self, test_case):
50235023
# are unreachable (which can happen if you differentiate
50245024
# only on the gradient.
50255025
cpu_gg = torch.autograd.grad(
5026-
cpu_output.sum() + sum(map(lambda x: x.sum(), cpu_gradInputs)),
5026+
cpu_output.sum() + sum(x.sum() for x in cpu_gradInputs),
50275027
(cpu_input, cpu_gradOutput) + tuple(cpu_module.parameters()),
50285028
retain_graph=True,
50295029
)
50305030
gpu_gg = torch.autograd.grad(
5031-
gpu_output.sum() + sum(map(lambda x: x.sum(), gpu_gradInputs)),
5031+
gpu_output.sum() + sum(x.sum() for x in gpu_gradInputs),
50325032
(gpu_input, gpu_gradOutput) + tuple(gpu_module.parameters()),
50335033
retain_graph=True,
50345034
)

Diff for: tests/cpu/common_utils.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1142,7 +1142,7 @@ def accept_output(update_type):
11421142
"No expect file exists; to accept the current output, run:\n"
11431143
"python {} {} --accept"
11441144
).format(munged_id, subname_output, s, __main__.__file__, munged_id)
1145-
)
1145+
) from e
11461146

11471147
# a hack for JIT tests
11481148
if IS_WINDOWS:
@@ -1314,10 +1314,10 @@ def download_file(url, binary=True):
13141314
with open(path, "wb" if binary else "w") as f:
13151315
f.write(data)
13161316
return path
1317-
except error.URLError:
1317+
except error.URLError as e:
13181318
msg = "could not download test file '{}'".format(url)
13191319
warnings.warn(msg, RuntimeWarning)
1320-
raise unittest.SkipTest(msg)
1320+
raise unittest.SkipTest(msg) from e
13211321

13221322

13231323
def find_free_port():

Diff for: tests/cpu/test_ipex_optimize_transformers.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -169,12 +169,12 @@ def test_model_replacement(self):
169169
enable_torchcompile = [False, True]
170170
deployment_mode = [True, False]
171171
return_dict = [False, True]
172-
for m, torchcompile, dtype, jit, return_dict in itertools.product(
172+
for m, torchcompile, dtype, jit, ret_dict in itertools.product(
173173
supported_models, enable_torchcompile, dtypes, deployment_mode, return_dict
174174
):
175175
if torchcompile and deployment_mode:
176176
continue
177-
self.model_replacement_check(m, dtype, jit, torchcompile, return_dict)
177+
self.model_replacement_check(m, dtype, jit, torchcompile, ret_dict)
178178
_disable_tpp()
179179

180180
def _model_replacement_check_woq(self, model):

Diff for: tests/cpu/test_ipex_optimize_transformers_nightly.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -396,12 +396,12 @@ def test_model_replacement(self):
396396
enable_torchcompile = [False, True]
397397
deployment_mode = [True, False]
398398
return_dict = [False, True]
399-
for m, torchcompile, dtype, jit, return_dict in itertools.product(
399+
for m, torchcompile, dtype, jit, ret_dict in itertools.product(
400400
supported_models, enable_torchcompile, dtypes, deployment_mode, return_dict
401401
):
402402
if torchcompile and deployment_mode:
403403
continue
404-
self.model_replacement_check(m, dtype, jit, torchcompile, return_dict)
404+
self.model_replacement_check(m, dtype, jit, torchcompile, ret_dict)
405405
_disable_tpp()
406406

407407
def test_load_low_precision_checkpoint(self):

Diff for: tests/cpu/test_quantization_default_recipe.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -134,8 +134,8 @@ def forward(self, x):
134134
]
135135
],
136136
]
137-
for quantized_modules, pattern in zip(quantized_modules, patterns):
138-
m = M(quantized_modules).eval()
137+
for quantized_module, pattern in zip(quantized_modules, patterns):
138+
m = M(quantized_module).eval()
139139

140140
x = torch.rand(1, 2, 14, 14)
141141

0 commit comments

Comments
 (0)