Skip to content

Commit

Permalink
PERF: implement multi-threading parallelism in inifix-validate CLI
Browse files Browse the repository at this point in the history
  • Loading branch information
neutrinoceros committed Aug 10, 2024
1 parent fb7ae1d commit d66930c
Showing 1 changed file with 30 additions and 15 deletions.
45 changes: 30 additions & 15 deletions src/inifix/validate.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,10 @@
import argparse
import os
import sys
from concurrent.futures import ThreadPoolExecutor
from typing import Literal

from inifix._cli import Message, TaskResults
from inifix.io import load


Expand All @@ -12,21 +15,33 @@ def main(argv: list[str] | None = None) -> int:
parser.add_argument("files", nargs="+")

args = parser.parse_args(argv)
retv = 0
for file in args.files:
if not os.path.isfile(file):
print(f"Error: could not find {file}", file=sys.stderr)
retv = 1
continue
try:
load(file)
except ValueError as exc:
print(f"Failed to validate {file}:\n {exc}", file=sys.stderr)
retv = 1
else:
print(f"Validated {file}")

return retv
closure = _validate_single_file_cli
with ThreadPoolExecutor(max_workers=int((os.cpu_count() or 2) / 2)) as executor:
futures = [executor.submit(closure, file) for file in args.files]
results = [f.result() for f in futures]

for res in results:
for message in res.messages:
print(message.content, file=message.dest)

return max(res.status for res in results)


def _validate_single_file_cli(file: str) -> TaskResults:
status: Literal[0, 1] = 0
messages: list[Message] = []
if not os.path.isfile(file):
status = 1
messages.append(Message(f"Error: could not find {file}", sys.stderr))
return TaskResults(status, messages)
try:
load(file)
except ValueError as exc:
status = 1
messages.append(Message(f"Failed to validate {file}:\n {exc}", sys.stderr))
else:
messages.append(Message(f"Validated {file}", sys.stdout))
return TaskResults(status, messages)


if __name__ == "__main__": # pragma: no cover
Expand Down

0 comments on commit d66930c

Please sign in to comment.