From ead3af77c6ad719110f6ece9e110919e8e614104 Mon Sep 17 00:00:00 2001 From: Ivan Razumov Date: Mon, 30 Oct 2023 16:36:05 +0100 Subject: [PATCH] Allow manual override to PR test results --- .git-blame-ignore-revs | 4 + .github/workflows/black.yaml | 19 + DMWM/AggregatePylint.py | 52 +- DMWM/AnalyzePy27.py | 38 +- DMWM/AnalyzePyFuture.py | 15 +- DMWM/AnalyzePylint.py | 134 +- DMWM/CompareTests.py | 85 +- DMWM/IdentifyPythonFiles.py | 8 +- DMWM/IssueMessage.py | 26 +- DMWM/TestWatchdog.py | 27 +- DMWM/xunitparser.py | 109 +- README.md | 17 + RelValArgs.py | 205 +- _py2with3compatibility.py | 30 +- add-externals-gh-labels.py | 283 +- backport-pr.py | 141 +- buildLogAnalyzer.py | 664 ++- cache-pull-request.py | 167 +- categories.py | 310 +- categories_map.py | 3659 ++++++++--------- check-future-commits-prs.py | 53 +- checkDirSizes.py | 11 +- checkLibDeps.py | 30 +- checkLogFile.py | 226 +- checkPyConfigs.py | 14 +- checkTestLog.py | 134 +- chk-invalid-headers.py | 48 +- cms-filename-checks.py | 27 +- cms-jenkins-api | 40 +- cms-jenkins-api.py | 52 + cms_static.py | 42 +- cmsdist-comp-pr-process.py | 160 +- cmsdist_merge_permissions.py | 93 +- cmssw_known_errors.py | 293 +- cmssw_l2/commit.txt | 2 + cmssw_l2/l2.json | 1 + cmssw_l2/update.py | 93 +- cmsutils.py | 216 +- comment-gh-pr | 47 +- comment-gh-pr.py | 89 + comp/create_wm_archs.py | 143 +- compareTriggerResults | 293 +- compareTriggerResults.py | 424 ++ compareTriggerResultsSummary | 178 +- compareTriggerResultsSummary.py | 265 ++ comparisons/analyzeFWComparison.py | 168 +- comparisons/validateJR.py | 207 +- condor/tests/node-check.py | 244 +- config.map | 1 + crab/pset.py | 172 +- crab/task.py | 54 +- create-gh-issue.py | 72 +- create-gh-pr.py | 58 +- create-gh-release.py | 36 +- create-github-hooks | 137 +- create-github-hooks.py | 208 + create-new-data-pr.py | 325 +- create_json.py | 42 +- cuda/install-cuda.py | 497 ++- cvmfs_deployment/has_lease.py | 36 +- das-utils/CMSWeb.py | 266 +- das-utils/cleanup-unused-ibeos.py | 127 +- das-utils/das_cache.py | 701 ++-- das-utils/ib-datasets.py | 79 +- das-utils/ib-eos-files.py | 449 +- das-utils/order-das-files.py | 31 +- deprecate-releases | 92 +- deprecate-releases.py | 131 + deprecate_releases.py | 15 +- docker/check-repositories.py | 107 +- es-cleanup-indexes | 119 +- es-cleanup-indexes.py | 132 + es-reindex | 114 +- es-reindex.py | 127 + es/es_close_indexes.py | 103 +- es/es_delete_indexes.py | 21 +- es/es_get_templates.py | 33 +- es/es_git_repo_size.py | 32 +- es/es_open_indexes.py | 27 +- es/es_send_templates.py | 24 +- es/es_show_indexes.py | 22 +- es_cmsdoxygen_apache.py | 82 +- es_cmsrep_apache.py | 139 +- es_cmssdt_apache.py | 94 +- es_doxygen_apache.py | 85 +- es_externals_stats.py | 1 - es_hypernews.py | 62 +- es_hypernews_log.py | 88 +- es_ib_build_stats.py | 181 +- es_ibs_log.py | 450 +- es_iwyu_logs.py | 41 +- es_reindex_indexes_with_pattern.py | 47 +- es_relval_log.py | 368 +- es_relval_stats.py | 131 +- es_utils.py | 698 ++-- fix-backport-labels.py | 99 +- fix-igprof-sql.py | 58 +- forward-pull-requests | 99 +- forward-pull-requests.py | 114 + forward_ports_map.py | 100 +- gen-relval-jobs.py | 84 +- generate-categories-json | 45 +- generate-categories-json.py | 45 + generate-json-performance-charts | 161 +- generate-json-performance-charts.py | 175 + generate-performance-summary | 161 +- generate-performance-summary.py | 187 + get-builds-stats.py | 114 +- get-git-tags | 30 +- get-git-tags.py | 45 + get-local-build-stats.py | 32 +- get-pr-branch | 46 +- get-pr-branch.py | 54 + get-relval-failures.py | 27 +- getWorkflowStatsFromES.py | 124 +- get_repo_authors.py | 36 +- gh-teams.py | 477 ++- gh_create_branches.py | 87 +- gh_update_pr_milestone.py | 86 +- github-rate-limits | 15 +- github-rate-limits.py | 14 + github_backup.py | 441 +- github_get_file_changes.py | 79 +- github_hooks_config.py | 81 +- github_modified_files.py | 40 +- github_scripts/get_Github_API_rate.py | 2 +- ...simultaneous_files_modifications_by_PRs.py | 38 +- github_utils.py | 643 ++- githublabels.py | 100 +- gitmergesgraph.py | 295 +- ib-create-tag.py | 22 +- ib-pr-workflow-changed.py | 78 +- ib-upload-logs.py | 3 +- jenkins-jobs/es-cmssw-afs-eos.py | 42 +- jenkins-jobs/git/git-mirror-repository | 24 +- jenkins-jobs/git/git-mirror-repository.py | 26 + jenkins-jobs/git/git-notify-ib-updates | 221 +- jenkins-jobs/git/git-notify-ib-updates.py | 207 + jenkins/jenkins-kill-placeholder-job.py | 125 +- jenkins/jenkins-project-report-to-markdown.py | 101 +- jenkins/parser/actions.py | 78 +- jenkins/parser/helpers.py | 29 +- jenkins/parser/jenkins-parser-job.py | 122 +- jenkins/parser/jenkins-parser-monitor-job.py | 20 +- jenkins/parser/jenkins-retry-job.py | 16 +- jenkins/parser/jobs-config.json | 3 +- jenkins/parser/paser-config-unittest.py | 8 +- jenkins/report-jenkins-jobs.py | 147 +- jenkins_callback.py | 63 +- jenkins_monitor_queue.py | 134 +- jobs/create-relval-jobs.py | 167 +- jobs/jobscheduler.py | 478 ++- jobs/stats.py | 42 +- jobs/workflow_final.py | 284 +- lizard-processing/src/lizard_to_html.py | 114 +- lizard-processing/test/test_lizard_to_html.py | 28 +- logRootQA.py | 636 +-- logUpdater.py | 134 +- logreaderUtils.py | 20 +- logwatch.py | 142 +- lxr/checkout-version.py | 39 +- mark_commit_status.py | 111 +- material_budget_ref.py | 39 +- merge-pull-request | 31 +- merge-pull-request.py | 31 + milestones.py | 166 +- modify_comment.py | 97 +- monitor_workflow.py | 144 +- new-release-cycle | 245 +- new-release-cycle.py | 340 ++ package2category.py | 17 +- parse_iwyu_logs.py | 125 +- parse_jenkins_builds.py | 341 +- parse_workflow_time.py | 29 +- port-pull-request.py | 56 +- pr-checks/check-pr-files | 67 +- pr-checks/check-pr-files.py | 90 + pr-checks/find-changed-workflows.py | 72 +- pr_testing/get-merged-prs.py | 100 +- pr_testing/run-das-query.py | 55 +- pr_testing/test_multiple_prs.sh | 3 +- process-build-release-request | 1191 +----- process-build-release-request.py | 1477 +++++++ process-create-data-repo-request | 265 +- process-create-data-repo-request.py | 264 ++ process-error-reports | 387 +- process-error-reports.py | 426 ++ process-partial-logs-relval.py | 8 +- process-pull-request | 42 +- process-pull-request.py | 83 + process_pr.py | 3317 ++++++++------- python/archived_argparse.py | 905 ++-- query-and-process-prs | 68 +- query-and-process-prs.py | 103 + query-new-pull-requests | 37 +- query-new-pull-requests.py | 46 + reco_profiling/profileRunner.py | 299 +- release-notes | 138 +- release-notes.py | 181 + release_notes_collection.py | 274 +- releases.map | 8 +- releases.py | 31 +- repo_config.py | 21 +- report-build-release-status.py | 592 +-- report-cmsdist-pull-request-results | 118 +- report-cmsdist-pull-request-results.py | 143 + report-pull-request-results.py | 929 +++-- report-summary-merged-prs | 1709 +------- report-summary-merged-prs.py | 1957 +++++++++ report_size.py | 42 +- .../HLT_EcalLaserValidation/categories.py | 38 +- .../HLT_EcalLaserValidation/releases.py | 17 +- .../HLT_EcalLaserValidation/repo_config.py | 50 +- .../L1T_EcalLaserValidation/categories.py | 38 +- .../L1T_EcalLaserValidation/releases.py | 17 +- .../L1T_EcalLaserValidation/repo_config.py | 48 +- .../categories.py | 24 +- .../RECO_EcalPulseShapeValidation/releases.py | 17 +- .../repo_config.py | 48 +- .../TPG_EcalLaserValidation/categories.py | 40 +- .../TPG_EcalLaserValidation/releases.py | 17 +- .../TPG_EcalLaserValidation/repo_config.py | 48 +- .../ConditionsValidation/categories.py | 24 +- .../ConditionsValidation/releases.py | 17 +- .../ConditionsValidation/repo_config.py | 46 +- repos/cms_patatrack/cmssw/categories.py | 20 +- repos/cms_patatrack/cmssw/releases.py | 17 +- repos/cms_patatrack/cmssw/repo_config.py | 68 +- repos/dmwm/CRABServer/repo_config.py | 60 +- repos/smuzaffar/SCRAM/repo_config.py | 14 +- repos/smuzaffar/cmssw/categories.py | 18 +- repos/smuzaffar/cmssw/releases.py | 17 +- repos/smuzaffar/cmssw/repo_config.py | 62 +- repos/smuzaffar/int_build/categories.py | 22 +- repos/smuzaffar/int_build/releases.py | 17 +- repos/smuzaffar/int_build/repo_config.py | 32 +- run-ib-addon.py | 22 +- run-ib-relval.py | 198 +- runPyRelValThread.py | 659 +-- runTests.py | 237 +- scram-package-monitor-sender | 115 +- scram-package-monitor-sender.py | 123 + scram-package-monitor-timestamps | 49 +- scram-package-monitor-timestamps.py | 63 + shift/libib.py | 4 +- shift/report.py | 11 +- shift/uniq-errors.py | 16 +- show-ibs-schedule.py | 91 +- splitDepViolationLog.py | 45 +- splitUnitTestLog.py | 100 +- tag-ib | 62 +- tag-ib.py | 92 + tests/test_config-map.py | 18 +- tests/test_logreaderUtils.py | 15 +- tests/test_watchers.py | 56 +- trigger_jenkins_job.py | 48 +- update-github-hooks-ip | 17 +- update-github-hooks-ip.py | 20 + updateVOTags | 86 +- updateVOTags.py | 93 + utils/cmsdist_pip_pkgs_update.py | 355 +- watchers.yaml | 1 + 262 files changed, 25249 insertions(+), 19633 deletions(-) create mode 100644 .git-blame-ignore-revs create mode 100644 .github/workflows/black.yaml mode change 100755 => 120000 cms-jenkins-api create mode 100755 cms-jenkins-api.py mode change 100755 => 120000 comment-gh-pr create mode 100755 comment-gh-pr.py mode change 100755 => 120000 compareTriggerResults create mode 100755 compareTriggerResults.py mode change 100755 => 120000 compareTriggerResultsSummary create mode 100755 compareTriggerResultsSummary.py mode change 100755 => 120000 create-github-hooks create mode 100755 create-github-hooks.py mode change 100755 => 120000 deprecate-releases create mode 100755 deprecate-releases.py mode change 100755 => 120000 es-cleanup-indexes create mode 100755 es-cleanup-indexes.py mode change 100755 => 120000 es-reindex create mode 100755 es-reindex.py mode change 100755 => 120000 forward-pull-requests create mode 100755 forward-pull-requests.py mode change 100755 => 120000 generate-categories-json create mode 100755 generate-categories-json.py mode change 100755 => 120000 generate-json-performance-charts create mode 100755 generate-json-performance-charts.py mode change 100755 => 120000 generate-performance-summary create mode 100755 generate-performance-summary.py mode change 100644 => 120000 get-git-tags create mode 100644 get-git-tags.py mode change 100755 => 120000 get-pr-branch create mode 100755 get-pr-branch.py mode change 100755 => 120000 github-rate-limits create mode 100755 github-rate-limits.py mode change 100755 => 120000 jenkins-jobs/git/git-mirror-repository create mode 100755 jenkins-jobs/git/git-mirror-repository.py mode change 100755 => 120000 jenkins-jobs/git/git-notify-ib-updates create mode 100755 jenkins-jobs/git/git-notify-ib-updates.py mode change 100755 => 120000 merge-pull-request create mode 100755 merge-pull-request.py mode change 100755 => 120000 new-release-cycle create mode 100755 new-release-cycle.py mode change 100755 => 120000 pr-checks/check-pr-files create mode 100755 pr-checks/check-pr-files.py mode change 100755 => 120000 process-build-release-request create mode 100755 process-build-release-request.py mode change 100755 => 120000 process-create-data-repo-request create mode 100755 process-create-data-repo-request.py mode change 100755 => 120000 process-error-reports create mode 100755 process-error-reports.py mode change 100755 => 120000 process-pull-request create mode 100755 process-pull-request.py mode change 100755 => 120000 query-and-process-prs create mode 100755 query-and-process-prs.py mode change 100755 => 120000 query-new-pull-requests create mode 100755 query-new-pull-requests.py mode change 100755 => 120000 release-notes create mode 100755 release-notes.py mode change 100755 => 120000 report-cmsdist-pull-request-results create mode 100755 report-cmsdist-pull-request-results.py mode change 100755 => 120000 report-summary-merged-prs create mode 100755 report-summary-merged-prs.py mode change 100755 => 120000 scram-package-monitor-sender create mode 100755 scram-package-monitor-sender.py mode change 100755 => 120000 scram-package-monitor-timestamps create mode 100755 scram-package-monitor-timestamps.py mode change 100755 => 120000 tag-ib create mode 100755 tag-ib.py mode change 100755 => 120000 update-github-hooks-ip create mode 100755 update-github-hooks-ip.py mode change 100755 => 120000 updateVOTags create mode 100755 updateVOTags.py diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 000000000000..b5d60041ed54 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,4 @@ +# Reformat everything with Black, part 1 +d64ce0527da177ffaf4a9efcb1fc9fd31a9f416e +# Reformat everything with Black, part 2 +64e3a6a4d9a8de478b780ea090fe33f2a8e8646f diff --git a/.github/workflows/black.yaml b/.github/workflows/black.yaml new file mode 100644 index 000000000000..972ee6631690 --- /dev/null +++ b/.github/workflows/black.yaml @@ -0,0 +1,19 @@ +name: Black Style Check + +on: + pull_request: + paths: + - '**/*.py' + +jobs: + black_check: + name: Check Python Code Style with Black + runs-on: ubuntu-latest + + steps: + - name: Check out the code + uses: actions/checkout@v2 + + - uses: psf/black@stable + with: + options: "--check --diff --verbose -l 99 -t py36 -t py37 -t py38 -t py39 -t py310 -t py311" diff --git a/DMWM/AggregatePylint.py b/DMWM/AggregatePylint.py index 98d09187fc0a..bbd062b46348 100755 --- a/DMWM/AggregatePylint.py +++ b/DMWM/AggregatePylint.py @@ -13,7 +13,7 @@ label = args[0] try: - with open('pylintReport.json', 'r') as reportFile: + with open("pylintReport.json", "r") as reportFile: report = json.load(reportFile) except IOError: report = {} @@ -24,22 +24,22 @@ refactors = 0 score = 0 -with open('pylint.out', 'r') as pylintFile: +with open("pylint.out", "r") as pylintFile: for line in pylintFile: - if line.startswith('Your code has been rated at '): - scorePart = line.strip('Your code has been rated at ') - score = scorePart.split('/')[0] + if line.startswith("Your code has been rated at "): + scorePart = line.strip("Your code has been rated at ") + score = scorePart.split("/")[0] try: if not filename in report: report[filename] = {} if not label in report[filename]: report[filename][label] = {} if filename and label: - report[filename][label]['score'] = score + report[filename][label]["score"] = score except NameError: print("Score of %s found, but no filename" % score) - parts = line.split(':') + parts = line.split(":") if len(parts) != 3: continue try: @@ -49,22 +49,22 @@ continue lineNumber = int(lineNumber) filename = newFilename - rmParts = rawMessage.split(']', 1) + rmParts = rawMessage.split("]", 1) rawCode = rmParts[0].strip() message = rmParts[1].strip() severity = rawCode[1:2] code = rawCode[2:6] shortMsg = rawCode[7:] - msgParts = shortMsg.split(',') + msgParts = shortMsg.split(",") objectName = msgParts[1].strip() - if severity == 'R': + if severity == "R": refactors += 1 - elif severity == 'W': + elif severity == "W": warnings += 1 - elif severity == 'E': + elif severity == "E": errors += 1 - elif severity == 'C': + elif severity == "C": comments += 1 if not filename in report: @@ -72,24 +72,20 @@ if not label in report[filename]: report[filename][label] = {} - if not 'events' in report[filename][label]: - report[filename][label]['events'] = [] - report[filename][label]['events'].append((lineNumber, severity, code, objectName, message)) + if not "events" in report[filename][label]: + report[filename][label]["events"] = [] + report[filename][label]["events"].append( + (lineNumber, severity, code, objectName, message) + ) - report[filename][label]['refactors'] = refactors - report[filename][label]['warnings'] = warnings - report[filename][label]['errors'] = errors - report[filename][label]['comments'] = comments + report[filename][label]["refactors"] = refactors + report[filename][label]["warnings"] = warnings + report[filename][label]["errors"] = errors + report[filename][label]["comments"] = comments except ValueError: continue -with open('pylintReport.json', 'w') as reportFile: +with open("pylintReport.json", "w") as reportFile: json.dump(report, reportFile, indent=2) - reportFile.write('\n') - - - - - - + reportFile.write("\n") diff --git a/DMWM/AnalyzePy27.py b/DMWM/AnalyzePy27.py index a309fa69732a..c7d74324e39b 100755 --- a/DMWM/AnalyzePy27.py +++ b/DMWM/AnalyzePy27.py @@ -6,58 +6,60 @@ from github import Github -summaryMessage = '' +summaryMessage = "" reportOn = {} failed = False -with open('added.message', 'r') as messageFile: +with open("added.message", "r") as messageFile: lines = messageFile.readlines() if len(lines): - summaryMessage += 'Imports for Python3 compatability missing in new files. Please fix this:\n' + summaryMessage += ( + "Imports for Python3 compatability missing in new files. Please fix this:\n" + ) - summaryMessage += ''.join(lines) + summaryMessage += "".join(lines) summaryMessage += "\n\n" failed = True -with open('test.patch', 'r') as patchFile: +with open("test.patch", "r") as patchFile: lines = patchFile.readlines() if len(lines): - summaryMessage += 'Pre-python 2.6 constructs are introduced by this pull request. This must be fixed. Suggested patch follows:\n\n' + summaryMessage += "Pre-python 2.6 constructs are introduced by this pull request. This must be fixed. Suggested patch follows:\n\n" summaryMessage += "```diff\n" - summaryMessage += ''.join(lines) + summaryMessage += "".join(lines) summaryMessage += "\n```\n\n" failed = True -with open('idioms.patch', 'r') as patchFile: +with open("idioms.patch", "r") as patchFile: lines = patchFile.readlines() if len(lines): - summaryMessage += 'Pre-python 2.6 idioms found in changed files. Please consider updating the code. Suggested patch follows:\n\n' + summaryMessage += "Pre-python 2.6 idioms found in changed files. Please consider updating the code. Suggested patch follows:\n\n" summaryMessage += "```diff\n" - summaryMessage += ''.join(lines) + summaryMessage += "".join(lines) summaryMessage += "\n```\n\n" issueID = None -if 'ghprbPullId' in os.environ: - issueID = os.environ['ghprbPullId'] +if "ghprbPullId" in os.environ: + issueID = os.environ["ghprbPullId"] -gh = Github(os.environ['DMWMBOT_TOKEN']) -codeRepo = os.environ.get('CODE_REPO', 'WMCore') -repoName = '%s/%s' % (os.environ['WMCORE_REPO'], codeRepo) +gh = Github(os.environ["DMWMBOT_TOKEN"]) +codeRepo = os.environ.get("CODE_REPO", "WMCore") +repoName = "%s/%s" % (os.environ["WMCORE_REPO"], codeRepo) issue = gh.get_repo(repoName).get_issue(int(issueID)) if len(summaryMessage) > 250000: summaryMessage = summaryMessage[:250000] if summaryMessage: - issue.create_comment('%s' % summaryMessage) + issue.create_comment("%s" % summaryMessage) if failed: - print('Testing of python code. DMWM-FAIL-PY27') + print("Testing of python code. DMWM-FAIL-PY27") else: - print('Testing of python code. DMWM-SUCCEED-PY27') + print("Testing of python code. DMWM-SUCCEED-PY27") diff --git a/DMWM/AnalyzePyFuture.py b/DMWM/AnalyzePyFuture.py index 58f67bbcf35a..488d4465125a 100755 --- a/DMWM/AnalyzePyFuture.py +++ b/DMWM/AnalyzePyFuture.py @@ -2,17 +2,20 @@ from __future__ import print_function, division -with open('addedFiles.txt', 'r') as addedFiles: +with open("addedFiles.txt", "r") as addedFiles: for fileName in addedFiles: fileName = fileName.strip() - if fileName.endswith('__init__.py'): + if fileName.endswith("__init__.py"): continue - with open(fileName, 'r') as pyFile: + with open(fileName, "r") as pyFile: pyLines = pyFile.readlines() - if fileName.endswith('.py') or 'python' in pyLines[0]: + if fileName.endswith(".py") or "python" in pyLines[0]: foundDivision = False for line in pyLines: - if '__future__' in line and 'division' in line: + if "__future__" in line and "division" in line: foundDivision = True if not foundDivision: - print ("* New file %s does not use python 3 division. Please add `from __future__ import division`.\n" % fileName) + print( + "* New file %s does not use python 3 division. Please add `from __future__ import division`.\n" + % fileName + ) diff --git a/DMWM/AnalyzePylint.py b/DMWM/AnalyzePylint.py index ce3436c9f9df..73accab5e5b5 100755 --- a/DMWM/AnalyzePylint.py +++ b/DMWM/AnalyzePylint.py @@ -7,114 +7,132 @@ from github import Github -reportWarnings = ['0611', '0612', '0613'] +reportWarnings = ["0611", "0612", "0613"] -summaryMessage = '' -longMessage = '' +summaryMessage = "" +longMessage = "" reportOn = {} failed = False reportExists = False -with open('pylintReport.json', 'r') as reportFile: +with open("pylintReport.json", "r") as reportFile: report = json.load(reportFile) if report: reportExists = True for filename in sorted(report.keys()): fileReport = report[filename] - if 'test' in fileReport and 'base' not in fileReport: - testReport = fileReport['test'] - if not 'score' in testReport: + if "test" in fileReport and "base" not in fileReport: + testReport = fileReport["test"] + if not "score" in testReport: continue reportOn[filename] = True - summaryMessage += '* New file %s with score %s, %s warnings, and %s errors\n' % (filename, testReport['score'], testReport['warnings'], testReport['errors']) - if 'test' in fileReport and 'base' in fileReport: - testReport = fileReport['test'] - baseReport = fileReport['base'] - if not 'score' in testReport or not 'score' in baseReport: + summaryMessage += "* New file %s with score %s, %s warnings, and %s errors\n" % ( + filename, + testReport["score"], + testReport["warnings"], + testReport["errors"], + ) + if "test" in fileReport and "base" in fileReport: + testReport = fileReport["test"] + baseReport = fileReport["base"] + if not "score" in testReport or not "score" in baseReport: continue - if float(testReport['score']) < float(baseReport['score']) or \ - float(testReport['errors']) > float(baseReport['errors']) or \ - float(testReport['warnings']) > float(baseReport['warnings']): + if ( + float(testReport["score"]) < float(baseReport["score"]) + or float(testReport["errors"]) > float(baseReport["errors"]) + or float(testReport["warnings"]) > float(baseReport["warnings"]) + ): reportOn[filename] = True - summaryMessage += '* Score for %s changed from %s to %s with %s (%s) total errors (warnings)\n' % (filename, baseReport['score'], testReport['score'], testReport['errors'], testReport['warnings']) + summaryMessage += ( + "* Score for %s changed from %s to %s with %s (%s) total errors (warnings)\n" + % ( + filename, + baseReport["score"], + testReport["score"], + testReport["errors"], + testReport["warnings"], + ) + ) for filename in sorted(report.keys()): comments = 0 warnings = 0 fileReport = report[filename] - if 'test' in fileReport: - testReport = fileReport['test'] - if not 'score' in testReport: + if "test" in fileReport: + testReport = fileReport["test"] + if not "score" in testReport: continue - if float(testReport['score']) < 8.0 or filename in reportOn: - if float(testReport['score']) < 8.0: + if float(testReport["score"]) < 8.0 or filename in reportOn: + if float(testReport["score"]) < 8.0: failed = True - longMessage += '\n%s fails the pylint check. Report follows:\n' % filename + longMessage += "\n%s fails the pylint check. Report follows:\n" % filename elif filename in reportOn: - longMessage += '\n%s got worse in pylint. Report follows:\n' % filename + longMessage += "\n%s got worse in pylint. Report follows:\n" % filename else: - longMessage += '\nPylint report for %s follows:\n' % filename - for event in testReport['events']: - if event[1] == 'C': # Severity + longMessage += "\nPylint report for %s follows:\n" % filename + for event in testReport["events"]: + if event[1] == "C": # Severity comments += 1 continue - if event[1] == 'I': # Severity + if event[1] == "I": # Severity continue - longMessage += '* Line %s ' % (event[0]) - if event[3]: # Module - longMessage += 'in %s ' % event[3] - longMessage += '%s%s %s\n' % (event[1], event[2], event[4]) + longMessage += "* Line %s " % (event[0]) + if event[3]: # Module + longMessage += "in %s " % event[3] + longMessage += "%s%s %s\n" % (event[1], event[2], event[4]) longMessage += "* plus %s comments on code style\n" % comments else: - conditionalMessage = '' - for event in testReport['events']: - if event[1] == 'C': # Severity + conditionalMessage = "" + for event in testReport["events"]: + if event[1] == "C": # Severity comments += 1 continue - if event[1] == 'I': # Severity + if event[1] == "I": # Severity continue - if event[1] == 'E': - conditionalMessage += '* Line %s ' % (event[0]) - if event[3]: # Module - conditionalMessage += 'in %s ' % event[3] - conditionalMessage += '%s%s %s\n' % (event[1], event[2], event[4]) - if event[1] == 'W': + if event[1] == "E": + conditionalMessage += "* Line %s " % (event[0]) + if event[3]: # Module + conditionalMessage += "in %s " % event[3] + conditionalMessage += "%s%s %s\n" % (event[1], event[2], event[4]) + if event[1] == "W": warnings += 1 - if event[1] == 'W' and event[2] in reportWarnings: - conditionalMessage += '* Line %s ' % (event[0]) - if event[3]: # Module - conditionalMessage += 'in %s ' % event[3] - conditionalMessage += '%s%s %s\n' % (event[1], event[2], event[4]) + if event[1] == "W" and event[2] in reportWarnings: + conditionalMessage += "* Line %s " % (event[0]) + if event[3]: # Module + conditionalMessage += "in %s " % event[3] + conditionalMessage += "%s%s %s\n" % (event[1], event[2], event[4]) if conditionalMessage: - longMessage += ('\nAbbreviated pylint report for %s follows:\n' % filename) + conditionalMessage + longMessage += ( + "\nAbbreviated pylint report for %s follows:\n" % filename + ) + conditionalMessage longMessage += "* plus %s total warnings\n" % warnings longMessage += "* plus %s comments on code style\n" % comments issueID = None -if 'ghprbPullId' in os.environ: - issueID = os.environ['ghprbPullId'] +if "ghprbPullId" in os.environ: + issueID = os.environ["ghprbPullId"] if reportExists: - message = 'No pylint warnings for pull request %s.\n' % issueID + message = "No pylint warnings for pull request %s.\n" % issueID else: - message = 'No python changes for pull request %s.\n' % issueID + message = "No python changes for pull request %s.\n" % issueID if summaryMessage or longMessage: - message = 'Summary of pylint changes for pull request %s:\n' % issueID + summaryMessage + message = "Summary of pylint changes for pull request %s:\n" % issueID + summaryMessage message += longMessage -gh = Github(os.environ['DMWMBOT_TOKEN']) -codeRepo = os.environ.get('CODE_REPO', 'WMCore') -repoName = '%s/%s' % (os.environ['WMCORE_REPO'], codeRepo) +gh = Github(os.environ["DMWMBOT_TOKEN"]) +codeRepo = os.environ.get("CODE_REPO", "WMCore") +repoName = "%s/%s" % (os.environ["WMCORE_REPO"], codeRepo) issue = gh.get_repo(repoName).get_issue(int(issueID)) if len(message) > 250000: message = message[:250000] -issue.create_comment('%s' % message) +issue.create_comment("%s" % message) if failed: - print('Testing of python code. DMWM-FAIL-PYLINT') + print("Testing of python code. DMWM-FAIL-PYLINT") else: - print('Testing of python code. DMWM-SUCCEED-PYLINT') + print("Testing of python code. DMWM-SUCCEED-PYLINT") diff --git a/DMWM/CompareTests.py b/DMWM/CompareTests.py index 8695b20c2f3e..5235c31c51ea 100755 --- a/DMWM/CompareTests.py +++ b/DMWM/CompareTests.py @@ -19,22 +19,21 @@ unstableTests = [] try: - with open('code/test/etc/UnstableTests.txt') as unstableFile: + with open("code/test/etc/UnstableTests.txt") as unstableFile: for line in unstableFile: unstableTests.append(line.strip()) except: print("Was not able to open list of unstable tests") # Parse all the various nose xunit test reports looking for changes -filePattern = '*/nosetests-*.xml' +filePattern = "*/nosetests-*.xml" if len(sys.argv) == 2: filePattern = "*/%s-*.xml" % sys.argv[1] -for kind, directory in [('base', './MasterUnitTests/'), ('test', './LatestUnitTests/')]: +for kind, directory in [("base", "./MasterUnitTests/"), ("test", "./LatestUnitTests/")]: for xunitFile in glob.iglob(directory + filePattern): - ts, tr = xunitparser.parse(open(xunitFile)) for tc in ts: - testName = '%s:%s' % (tc.classname, tc.methodname) + testName = "%s:%s" % (tc.classname, tc.methodname) if testName in testResults: testResults[testName].update({kind: tc.result}) else: @@ -44,71 +43,79 @@ issueID, mode = None, None -if 'ghprbPullId' in os.environ: - issueID = os.environ['ghprbPullId'] - mode = 'PR' -elif 'TargetIssueID' in os.environ: - issueID = os.environ['TargetIssueID'] - mode = 'Daily' +if "ghprbPullId" in os.environ: + issueID = os.environ["ghprbPullId"] + mode = "PR" +elif "TargetIssueID" in os.environ: + issueID = os.environ["TargetIssueID"] + mode = "Daily" print("Comparing tests for issueID: {} in mode: {}".format(issueID, mode)) -message = 'Unit test changes for pull request %s:\n' % issueID -if mode == 'Daily': - message = 'Unit test changes for most recent test of master branch:\n' +message = "Unit test changes for pull request %s:\n" % issueID +if mode == "Daily": + message = "Unit test changes for most recent test of master branch:\n" changed = False stableChanged = False failed = False -errorConditions = ['error', 'failure'] +errorConditions = ["error", "failure"] for testName, testResult in sorted(testResults.items()): - if 'base' in testResult and 'test' in testResult and testName in unstableTests: - if testResult['base'] != testResult['test']: + if "base" in testResult and "test" in testResult and testName in unstableTests: + if testResult["base"] != testResult["test"]: changed = True - message += "* %s (unstable) changed from %s to %s\n" % (testName, testResult['base'], testResult['test']) - elif 'base' in testResult and 'test' in testResult: - if testResult['base'] != testResult['test']: + message += "* %s (unstable) changed from %s to %s\n" % ( + testName, + testResult["base"], + testResult["test"], + ) + elif "base" in testResult and "test" in testResult: + if testResult["base"] != testResult["test"]: changed = True stableChanged = True - message += "* %s changed from %s to %s\n" % (testName, testResult['base'], testResult['test']) - if testResult['test'] in errorConditions: + message += "* %s changed from %s to %s\n" % ( + testName, + testResult["base"], + testResult["test"], + ) + if testResult["test"] in errorConditions: failed = True - elif 'test' in testResult: + elif "test" in testResult: changed = True stableChanged = True - message += "* %s was added. Status is %s\n" % (testName, testResult['test']) - if testResult['test'] in errorConditions: + message += "* %s was added. Status is %s\n" % (testName, testResult["test"]) + if testResult["test"] in errorConditions: failed = True - elif 'base' in testResult: + elif "base" in testResult: changed = True stableChanged = True - message += "* %s was deleted. Prior status was %s\n" % (testName, testResult['base']) + message += "* %s was deleted. Prior status was %s\n" % (testName, testResult["base"]) if failed: - message += '\n\nPreviously working unit tests have failed!\n' + message += "\n\nPreviously working unit tests have failed!\n" -if mode == 'Daily': +if mode == "Daily": # Alan on 25/may/2021: then there is nothing else to be done print(message) sys.exit(0) -gh = Github(os.environ['DMWMBOT_TOKEN']) -codeRepo = os.environ.get('CODE_REPO', 'WMCore') -repoName = '%s/%s' % (os.environ['WMCORE_REPO'], codeRepo) +gh = Github(os.environ["DMWMBOT_TOKEN"]) +codeRepo = os.environ.get("CODE_REPO", "WMCore") +repoName = "%s/%s" % (os.environ["WMCORE_REPO"], codeRepo) issue = gh.get_repo(repoName).get_issue(int(issueID)) -if not changed and mode == 'Daily': +if not changed and mode == "Daily": message = "No changes to unit tests for latest build\n" elif not changed: message = "No changes to unit tests for pull request %s\n" % issueID -if mode == 'Daily' and stableChanged: - issue.create_comment('%s' % message) -elif mode != 'Daily': - issue.create_comment('%s' % message) +if mode == "Daily" and stableChanged: + issue.create_comment("%s" % message) +elif mode != "Daily": + issue.create_comment("%s" % message) if failed: - print('Testing of python code. DMWM-FAIL-UNIT') + print("Testing of python code. DMWM-FAIL-UNIT") else: - print('Testing of python code. DMWM-SUCCEED-UNIT') + print("Testing of python code. DMWM-SUCCEED-UNIT") diff --git a/DMWM/IdentifyPythonFiles.py b/DMWM/IdentifyPythonFiles.py index a6158f90c30c..da6f9a9c7a32 100755 --- a/DMWM/IdentifyPythonFiles.py +++ b/DMWM/IdentifyPythonFiles.py @@ -13,18 +13,18 @@ list_of_files = args[0] -with open(list_of_files, 'r') as changedFiles: +with open(list_of_files, "r") as changedFiles: for fileName in changedFiles: fileName = fileName.strip() if not fileName: continue - if fileName.endswith('.py'): + if fileName.endswith(".py"): print(fileName) continue try: - with open(fileName, 'r') as pyFile: + with open(fileName, "r") as pyFile: pyLines = pyFile.readlines() - if 'python' in pyLines[0]: + if "python" in pyLines[0]: print(fileName) continue except IOError: diff --git a/DMWM/IssueMessage.py b/DMWM/IssueMessage.py index 3fa8d1882a1e..ff47f777cca9 100755 --- a/DMWM/IssueMessage.py +++ b/DMWM/IssueMessage.py @@ -13,24 +13,22 @@ message = args[0] issueID = None -url = '' +url = "" -if 'ghprbPullId' in os.environ: - issueID = os.environ['ghprbPullId'] -if 'BUILD_URL' in os.environ: - url = os.environ['BUILD_URL'] - url = url.replace('cmsjenkins01.cern.ch:443', 'cmssdt.cern.ch') - url = url.replace('cmsjenkins02.cern.ch:443', 'cmssdt.cern.ch') - url = url.replace('cmsjenkins11.cern.ch:443', 'cmssdt.cern.ch') - message += '\nSee %s for details' % url +if "ghprbPullId" in os.environ: + issueID = os.environ["ghprbPullId"] +if "BUILD_URL" in os.environ: + url = os.environ["BUILD_URL"] + url = url.replace("cmsjenkins01.cern.ch:443", "cmssdt.cern.ch") + url = url.replace("cmsjenkins02.cern.ch:443", "cmssdt.cern.ch") + url = url.replace("cmsjenkins11.cern.ch:443", "cmssdt.cern.ch") + message += "\nSee %s for details" % url -gh = Github(os.environ['DMWMBOT_TOKEN']) +gh = Github(os.environ["DMWMBOT_TOKEN"]) -codeRepo = os.environ.get('CODE_REPO', 'WMCore') -repoName = '%s/%s' % (os.environ['WMCORE_REPO'], codeRepo) +codeRepo = os.environ.get("CODE_REPO", "WMCore") +repoName = "%s/%s" % (os.environ["WMCORE_REPO"], codeRepo) issue = gh.get_repo(repoName).get_issue(int(issueID)) issue.create_comment(message) - - diff --git a/DMWM/TestWatchdog.py b/DMWM/TestWatchdog.py index 3fff89ecddf9..69064ddf4fda 100755 --- a/DMWM/TestWatchdog.py +++ b/DMWM/TestWatchdog.py @@ -1,29 +1,36 @@ #! /usr/bin/env python -from __future__ import (print_function, division) +from __future__ import print_function, division import glob import psutil import sys import time -from psutil import (AccessDenied, NoSuchProcess) +from psutil import AccessDenied, NoSuchProcess time.sleep(60) testPid = 0 while not testPid: - print ('TESTWATCH: Polling') + print("TESTWATCH: Polling") for process in psutil.process_iter(): - try: - if 'python' in process.cmdline()[0] and 'setup.py' in process.cmdline()[1] and process.cmdline()[2] == 'test': + if ( + "python" in process.cmdline()[0] + and "setup.py" in process.cmdline()[1] + and process.cmdline()[2] == "test" + ): testPid = process.pid - print ('TESTWATCH: Found pid %s' % testPid) + print("TESTWATCH: Found pid %s" % testPid) except TypeError: - if 'python' in process.cmdline[0] and 'setup.py' in process.cmdline[1] and process.cmdline[2] == 'test': + if ( + "python" in process.cmdline[0] + and "setup.py" in process.cmdline[1] + and process.cmdline[2] == "test" + ): testPid = process.pid - print ('TESTWATCH: Found pid %s' % testPid) + print("TESTWATCH: Found pid %s" % testPid) except (IndexError, AccessDenied, NoSuchProcess): pass time.sleep(10) @@ -38,7 +45,7 @@ userCPU = process.cpu_times().user except AttributeError: userCPU = process.get_cpu_times()[0] - for xunitFile in glob.iglob('nosetests*.xml'): + for xunitFile in glob.iglob("nosetests*.xml"): foundXML = True if not foundXML: @@ -46,7 +53,7 @@ else: xmlAge = time.time() - noXMLTime if xmlAge > 450: - print('TESTWATCH: XML file is %s seconds old. Killing process' % xmlAge) + print("TESTWATCH: XML file is %s seconds old. Killing process" % xmlAge) process.terminate() time.sleep(10) process.kill() diff --git a/DMWM/xunitparser.py b/DMWM/xunitparser.py index 06012ffb18d7..e57a1c9a26e5 100644 --- a/DMWM/xunitparser.py +++ b/DMWM/xunitparser.py @@ -18,7 +18,7 @@ def to_timedelta(val): class TestResult(unittest.TestResult): def _exc_info_to_string(self, err, test): err = (e for e in err if e) - return ': '.join(err) + return ": ".join(err) class TestCase(unittest.TestCase): @@ -35,8 +35,7 @@ def __str__(self): return "%s (%s)" % (self.methodname, self.classname) def __repr__(self): - return "<%s testMethod=%s>" % \ - (self.classname, self.methodname) + return "<%s testMethod=%s>" % (self.classname, self.methodname) def __hash__(self): return hash((type(self), self.classname, self.methodname)) @@ -45,24 +44,24 @@ def id(self): return "%s.%s" % (self.classname, self.methodname) def seed(self, result, typename=None, message=None, trace=None): - """ Provide the expected result """ + """Provide the expected result""" self.result, self.typename, self.message, self.trace = result, typename, message, trace def run(self, tr=None): - """ Fake run() that produces the seeded result """ + """Fake run() that produces the seeded result""" tr = tr or self.TR_CLASS() tr.startTest(self) - if self.result == 'success': + if self.result == "success": tr.addSuccess(self) - elif self.result == 'skipped': + elif self.result == "skipped": try: - tr.addSkip(self, '%s: %s' % (self.typename, self._textMessage())) + tr.addSkip(self, "%s: %s" % (self.typename, self._textMessage())) except AttributeError: - print ("Skip not supported") - elif self.result == 'error': + print("Skip not supported") + elif self.result == "error": tr.addError(self, (self.typename, self._textMessage())) - elif self.result == 'failure': + elif self.result == "failure": tr.addFailure(self, (self.typename, self._textMessage())) tr.stopTest(self) @@ -70,46 +69,46 @@ def run(self, tr=None): def _textMessage(self): msg = (e for e in (self.message, self.trace) if e) - return '\n\n'.join(msg) or None + return "\n\n".join(msg) or None @property def alltext(self): err = (e for e in (self.typename, self.message) if e) - err = ': '.join(err) + err = ": ".join(err) txt = (e for e in (err, self.trace) if e) - return '\n\n'.join(txt) or None + return "\n\n".join(txt) or None def setUp(self): - """ Dummy method so __init__ does not fail """ + """Dummy method so __init__ does not fail""" pass def tearDown(self): - """ Dummy method so __init__ does not fail """ + """Dummy method so __init__ does not fail""" pass def runTest(self): - """ Dummy method so __init__ does not fail """ + """Dummy method so __init__ does not fail""" self.run() @property def basename(self): - return self.classname.rpartition('.')[2] + return self.classname.rpartition(".")[2] @property def success(self): - return self.result == 'success' + return self.result == "success" @property def skipped(self): - return self.result == 'skipped' + return self.result == "skipped" @property def failed(self): - return self.result == 'failure' + return self.result == "failure" @property def errored(self): - return self.result == 'error' + return self.result == "error" @property def good(self): @@ -124,8 +123,8 @@ def bad(self): @property def stdall(self): - """ All system output """ - return '\n'.join([out for out in (self.stdout, self.stderr) if out]) + """All system output""" + return "\n".join([out for out in (self.stdout, self.stderr) if out]) class TestSuite(unittest.TestSuite): @@ -148,7 +147,7 @@ def parse(self, source): def parse_root(self, root): ts = self.TS_CLASS() - if root.tag == 'testsuites': + if root.tag == "testsuites": for subroot in root: self.parse_testsuite(subroot, ts) else: @@ -156,60 +155,60 @@ def parse_root(self, root): tr = ts.run(self.TR_CLASS()) - tr.time = to_timedelta(root.attrib.get('time')) + tr.time = to_timedelta(root.attrib.get("time")) # check totals if they are in the root XML element - if 'errors' in root.attrib: - assert len(tr.errors) == int(root.attrib['errors']) - if 'failures' in root.attrib: - assert len(tr.failures) == int(root.attrib['failures']) + if "errors" in root.attrib: + assert len(tr.errors) == int(root.attrib["errors"]) + if "failures" in root.attrib: + assert len(tr.failures) == int(root.attrib["failures"]) try: - if 'skip' in root.attrib: - assert len(tr.skipped) == int(root.attrib['skip']) + if "skip" in root.attrib: + assert len(tr.skipped) == int(root.attrib["skip"]) except AttributeError: pass - if 'tests' in root.attrib: - assert len(list(ts)) == int(root.attrib['tests']) + if "tests" in root.attrib: + assert len(list(ts)) == int(root.attrib["tests"]) return (ts, tr) def parse_testsuite(self, root, ts): - assert root.tag == 'testsuite' - ts.name = root.attrib.get('name') - ts.package = root.attrib.get('package') + assert root.tag == "testsuite" + ts.name = root.attrib.get("name") + ts.package = root.attrib.get("package") for el in root: - if el.tag == 'testcase': + if el.tag == "testcase": self.parse_testcase(el, ts) - if el.tag == 'properties': + if el.tag == "properties": self.parse_properties(el, ts) - if el.tag == 'system-out' and el.text: + if el.tag == "system-out" and el.text: ts.stdout = el.text.strip() - if el.tag == 'system-err' and el.text: + if el.tag == "system-err" and el.text: ts.stderr = el.text.strip() def parse_testcase(self, el, ts): - tc_classname = el.attrib.get('classname') or ts.name - tc = self.TC_CLASS(tc_classname, el.attrib['name']) - tc.seed('success', trace=el.text or None) - tc.time = to_timedelta(el.attrib.get('time')) + tc_classname = el.attrib.get("classname") or ts.name + tc = self.TC_CLASS(tc_classname, el.attrib["name"]) + tc.seed("success", trace=el.text or None) + tc.time = to_timedelta(el.attrib.get("time")) message = None text = None for e in el: # error takes over failure in JUnit 4 - if e.tag in ('failure', 'error', 'skipped'): - tc = self.TC_CLASS(tc_classname, el.attrib['name']) + if e.tag in ("failure", "error", "skipped"): + tc = self.TC_CLASS(tc_classname, el.attrib["name"]) result = e.tag - typename = e.attrib.get('type') + typename = e.attrib.get("type") # reuse old if empty - message = e.attrib.get('message') or message + message = e.attrib.get("message") or message text = e.text or text tc.seed(result, typename, message, text) - tc.time = to_timedelta(el.attrib.get('time')) - if e.tag == 'system-out' and e.text: + tc.time = to_timedelta(el.attrib.get("time")) + if e.tag == "system-out" and e.text: tc.stdout = e.text.strip() - if e.tag == 'system-err' and e.text: + if e.tag == "system-err" and e.text: tc.stderr = e.text.strip() # add either the original "success" tc or a tc created by elements @@ -217,9 +216,9 @@ def parse_testcase(self, el, ts): def parse_properties(self, el, ts): for e in el: - if e.tag == 'property': - assert e.attrib['name'] not in ts.properties - ts.properties[e.attrib['name']] = e.attrib['value'] + if e.tag == "property": + assert e.attrib["name"] not in ts.properties + ts.properties[e.attrib["name"]] = e.attrib["value"] def parse(source): diff --git a/README.md b/README.md index ac9fe0da5288..91da3e22c5b9 100644 --- a/README.md +++ b/README.md @@ -40,3 +40,20 @@ basis. - [es-templates](https://github.com/cms-sw/cms-bot/tree/master/es-templates): contains the templates for the logged dataes-templates. - [es-cleanup-indexes](https://github.com/cms-sw/cms-bot/blob/master/es-cleanup-indexes): cleanups old indexes in elasticsearch. + +# Code style + +This project uses [Black](https://pypi.org/project/black) to ensure uniform code style. The following options are used: + +``` +--line-length 99 --target-version py36 --target-version py37 --target-version py38 --target-version py39 --target-version py310 --target-version py311 +``` + +## Ignoring formatting commits + +We record commits containing only code-style changes in [`.git-blame-ignore-revs`](.git-blame-ignore-revs) file. Use the following command on your local +copy of this repository to exculse these commits from `git blame`: + +``` +$ git config blame.ignoreRevsFile .git-blame-ignore-revs +``` diff --git a/RelValArgs.py b/RelValArgs.py index 84847525d73e..5efc8bd204dc 100755 --- a/RelValArgs.py +++ b/RelValArgs.py @@ -6,64 +6,89 @@ from _py2with3compatibility import run_cmd monitor_script = "" -if 'CMS_DISABLE_MONITORING' not in environ: - monitor_script = dirname(abspath(__file__))+"/monitor_workflow.py" - e, o = run_cmd("python2 -c 'import psutil'") - if e: - e, o = run_cmd("python3 -c 'import psutil'") +if "CMS_DISABLE_MONITORING" not in environ: + monitor_script = dirname(abspath(__file__)) + "/monitor_workflow.py" + e, o = run_cmd("python2 -c 'import psutil'") if e: - print("Monitering of relval steps disabled: import psutils failed") - monitor_script = "" + e, o = run_cmd("python3 -c 'import psutil'") + if e: + print("Monitering of relval steps disabled: import psutils failed") + monitor_script = "" + else: + monitor_script = "python3 " + monitor_script else: - monitor_script = "python3 " + monitor_script - else: - monitor_script = "python2 " + monitor_script + monitor_script = "python2 " + monitor_script -RELVAL_KEYS = {"customiseWithTimeMemorySummary":[], - "enableIMT":[], - "PREFIX":[], - "USER_OVERRIDE_OPTS": [], - "USER_OVERRIDE_COMMAND_OPTS": [], - "JOB_REPORT":[], - "USE_INPUT":[], - "THREADED":[], - "WORKFLOWS":[], - "TIMEOUT": [] - } -THREADED_ROOT="NON_THREADED_CMSSW" -THREADED_IBS="NON_THREADED_CMSSW" -if not 'CMSSW_NON_THREADED' in environ: - THREADED_ROOT="CMSSW_9_[1-9]_ROOT6_X_.+" - THREADED_IBS="CMSSW_(8_[1-9][0-9]*|(9|[1-9][0-9]+)_[0-9]+)_.+:([a-z]+)([6-9]|[1-9][0-9]+)_[^_]+_gcc(5[3-9]|[6-9]|[1-9][0-9])[0-9]*" -RELVAL_KEYS["customiseWithTimeMemorySummary"].append([".+" ,"--customise Validation/Performance/TimeMemorySummary.customiseWithTimeMemorySummary"]) -RELVAL_KEYS["PREFIX"].append(["CMSSW_[1-7]_.+" ,"--prefix '%s timeout --signal SIGSEGV @TIMEOUT@ '" % monitor_script]) -RELVAL_KEYS["PREFIX"].append(["CMSSW_.+" ,"--prefix '%s timeout --signal SIGTERM @TIMEOUT@ '" % monitor_script]) -RELVAL_KEYS["JOB_REPORT"].append([".+" ,"--job-reports"]) -RELVAL_KEYS["USE_INPUT"].append([".+" ,"--useInput all"]) -RELVAL_KEYS["THREADED"].append([THREADED_IBS ,"-t 4"]) -RELVAL_KEYS["WORKFLOWS"].append(["_SLHCDEV_" ,"-w upgrade -l 10000,10061,10200,10261,10800,10861,12200,12261,14400,14461,12600,12661,14000,14061,12800,12861,13000,13061,13800,13861"]) -RELVAL_KEYS["WORKFLOWS"].append(["_SLHC_" ,"-w upgrade -l 10000,10061,10200,10261,12200,12261,14400,14461,12600,12661,14000,14061,12800,12861,13000,13061,13800,13861"]) -RELVAL_KEYS["WORKFLOWS"].append(["_GPU_" ,"-w gpu"]) -RELVAL_KEYS["enableIMT"].append([THREADED_ROOT ,"--customise FWCore/Concurrency/enableIMT.enableIMT"]) -RELVAL_KEYS["TIMEOUT"].append(["(_ASAN_|_ppc64|_aarch64_)" ,"14400"]) -RELVAL_KEYS["TIMEOUT"].append([".+" ,"9000"]) -if 'CMS_RELVALS_USER_OPTS' in environ: - RELVAL_KEYS["USER_OVERRIDE_OPTS"].append([".+", environ["CMS_RELVALS_USER_OPTS"]]) -if 'CMS_RELVALS_USER_COMMAND_OPTS' in environ: - RELVAL_KEYS["USER_OVERRIDE_COMMAND_OPTS"].append([".+", environ["CMS_RELVALS_USER_COMMAND_OPTS"]]) +RELVAL_KEYS = { + "customiseWithTimeMemorySummary": [], + "enableIMT": [], + "PREFIX": [], + "USER_OVERRIDE_OPTS": [], + "USER_OVERRIDE_COMMAND_OPTS": [], + "JOB_REPORT": [], + "USE_INPUT": [], + "THREADED": [], + "WORKFLOWS": [], + "TIMEOUT": [], +} +THREADED_ROOT = "NON_THREADED_CMSSW" +THREADED_IBS = "NON_THREADED_CMSSW" +if not "CMSSW_NON_THREADED" in environ: + THREADED_ROOT = "CMSSW_9_[1-9]_ROOT6_X_.+" + THREADED_IBS = "CMSSW_(8_[1-9][0-9]*|(9|[1-9][0-9]+)_[0-9]+)_.+:([a-z]+)([6-9]|[1-9][0-9]+)_[^_]+_gcc(5[3-9]|[6-9]|[1-9][0-9])[0-9]*" +RELVAL_KEYS["customiseWithTimeMemorySummary"].append( + [".+", "--customise Validation/Performance/TimeMemorySummary.customiseWithTimeMemorySummary"] +) +RELVAL_KEYS["PREFIX"].append( + ["CMSSW_[1-7]_.+", "--prefix '%s timeout --signal SIGSEGV @TIMEOUT@ '" % monitor_script] +) +RELVAL_KEYS["PREFIX"].append( + ["CMSSW_.+", "--prefix '%s timeout --signal SIGTERM @TIMEOUT@ '" % monitor_script] +) +RELVAL_KEYS["JOB_REPORT"].append([".+", "--job-reports"]) +RELVAL_KEYS["USE_INPUT"].append([".+", "--useInput all"]) +RELVAL_KEYS["THREADED"].append([THREADED_IBS, "-t 4"]) +RELVAL_KEYS["WORKFLOWS"].append( + [ + "_SLHCDEV_", + "-w upgrade -l 10000,10061,10200,10261,10800,10861,12200,12261,14400,14461,12600,12661,14000,14061,12800,12861,13000,13061,13800,13861", + ] +) +RELVAL_KEYS["WORKFLOWS"].append( + [ + "_SLHC_", + "-w upgrade -l 10000,10061,10200,10261,12200,12261,14400,14461,12600,12661,14000,14061,12800,12861,13000,13061,13800,13861", + ] +) +RELVAL_KEYS["WORKFLOWS"].append(["_GPU_", "-w gpu"]) +RELVAL_KEYS["enableIMT"].append( + [THREADED_ROOT, "--customise FWCore/Concurrency/enableIMT.enableIMT"] +) +RELVAL_KEYS["TIMEOUT"].append(["(_ASAN_|_ppc64|_aarch64_)", "14400"]) +RELVAL_KEYS["TIMEOUT"].append([".+", "9000"]) +if "CMS_RELVALS_USER_OPTS" in environ: + RELVAL_KEYS["USER_OVERRIDE_OPTS"].append([".+", environ["CMS_RELVALS_USER_OPTS"]]) +if "CMS_RELVALS_USER_COMMAND_OPTS" in environ: + RELVAL_KEYS["USER_OVERRIDE_COMMAND_OPTS"].append( + [".+", environ["CMS_RELVALS_USER_COMMAND_OPTS"]] + ) RELVAL_ARGS = [] RELVAL_ARGS.append({}) -#For SLHC releases -RELVAL_ARGS[len(RELVAL_ARGS)-1]["_SLHC(DEV|)_"]=""" +# For SLHC releases +RELVAL_ARGS[len(RELVAL_ARGS) - 1][ + "_SLHC(DEV|)_" +] = """ @USE_INPUT@ @WORKFLOWS@ """ -RELVAL_ARGS[len(RELVAL_ARGS)-1]["CMSSW_4_2_"]="" +RELVAL_ARGS[len(RELVAL_ARGS) - 1]["CMSSW_4_2_"] = "" RELVAL_ARGS.append({}) -#For rleease cycles >= 7 -RELVAL_ARGS[len(RELVAL_ARGS)-1]["CMSSW_([1-9][0-9]|[7-9])_"]=""" +# For rleease cycles >= 7 +RELVAL_ARGS[len(RELVAL_ARGS) - 1][ + "CMSSW_([1-9][0-9]|[7-9])_" +] = """ @USE_INPUT@ @JOB_REPORT@ --command " @@ -78,51 +103,57 @@ """ RELVAL_ARGS.append({}) -#For all other releases -RELVAL_ARGS[len(RELVAL_ARGS)-1][".+"]=""" +# For all other releases +RELVAL_ARGS[len(RELVAL_ARGS) - 1][ + ".+" +] = """ @USE_INPUT@ """ + def isThreaded(release, arch): - if re.search(THREADED_IBS,release+":"+arch): return True - return False + if re.search(THREADED_IBS, release + ":" + arch): + return True + return False + def GetMatrixOptions(release, arch, dasfile=None): - rel_arch = release+":"+arch - cmd = "" - for rel in RELVAL_ARGS: - for exp in rel: - if re.search(exp,rel_arch): - cmd = rel[exp].replace("\n"," ") - break - if cmd: break - m=re.search("(@([a-zA-Z_]+)@)",cmd) - while m: - key = m.group(2) - val = "" - if key in RELVAL_KEYS: - for exp,data in RELVAL_KEYS[key]: - if re.search(exp,rel_arch): - val = data + " " - break - cmd = cmd.replace(m.group(1), val) - m=re.search("(@([a-zA-Z_]+)@)",cmd) - - return re.sub("\s+"," ",cmd) + rel_arch = release + ":" + arch + cmd = "" + for rel in RELVAL_ARGS: + for exp in rel: + if re.search(exp, rel_arch): + cmd = rel[exp].replace("\n", " ") + break + if cmd: + break + m = re.search("(@([a-zA-Z_]+)@)", cmd) + while m: + key = m.group(2) + val = "" + if key in RELVAL_KEYS: + for exp, data in RELVAL_KEYS[key]: + if re.search(exp, rel_arch): + val = data + " " + break + cmd = cmd.replace(m.group(1), val) + m = re.search("(@([a-zA-Z_]+)@)", cmd) -def FixWFArgs(release, arch, wf, args): - if isThreaded(release, arch): - if int(release.split("_")[1])>=12: - return args - NonThreadedWF = ["101.0","102.0"] - if wf in NonThreadedWF: - for k in [ "THREADED", "enableIMT" ]: - if (k in RELVAL_KEYS): - thds = [d for e,d in RELVAL_KEYS[k] if THREADED_IBS == e] - roots = [d for e,d in RELVAL_KEYS[k] if THREADED_ROOT == e] - if thds: - args = args.replace(thds[0],"") - elif roots: - args = args.replace(roots[0],"") - return args + return re.sub("\s+", " ", cmd) + +def FixWFArgs(release, arch, wf, args): + if isThreaded(release, arch): + if int(release.split("_")[1]) >= 12: + return args + NonThreadedWF = ["101.0", "102.0"] + if wf in NonThreadedWF: + for k in ["THREADED", "enableIMT"]: + if k in RELVAL_KEYS: + thds = [d for e, d in RELVAL_KEYS[k] if THREADED_IBS == e] + roots = [d for e, d in RELVAL_KEYS[k] if THREADED_ROOT == e] + if thds: + args = args.replace(thds[0], "") + elif roots: + args = args.replace(roots[0], "") + return args diff --git a/_py2with3compatibility.py b/_py2with3compatibility.py index e79b1525c748..d4288f98ebf0 100644 --- a/_py2with3compatibility.py +++ b/_py2with3compatibility.py @@ -2,6 +2,7 @@ Logic to take care of imports depending on the python version """ import sys + if sys.version_info[0] == 2: # python 2 modules from commands import getstatusoutput as run_cmd @@ -10,8 +11,18 @@ # urllib from urllib import urlencode, quote_plus, quote, unquote - from urllib2 import Request, urlopen, HTTPSHandler, build_opener, install_opener, unquote, HTTPError, \ - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, HTTPCookieProcessor + from urllib2 import ( + Request, + urlopen, + HTTPSHandler, + build_opener, + install_opener, + unquote, + HTTPError, + HTTPPasswordMgrWithDefaultRealm, + HTTPBasicAuthHandler, + HTTPCookieProcessor, + ) from urlparse import urlparse from cookielib import CookieJar else: @@ -22,10 +33,19 @@ # urllib from urllib.parse import urlencode, quote_plus, quote, unquote, urlparse - from urllib.request import Request, urlopen, HTTPSHandler, build_opener, install_opener, \ - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, HTTPCookieProcessor + from urllib.request import ( + Request, + urlopen, + HTTPSHandler, + build_opener, + install_opener, + HTTPPasswordMgrWithDefaultRealm, + HTTPBasicAuthHandler, + HTTPCookieProcessor, + ) from urllib.error import HTTPError from http.cookiejar import CookieJar + def cmp_f(a, b): - return ((a > b) - (a < b)) + return (a > b) - (a < b) diff --git a/add-externals-gh-labels.py b/add-externals-gh-labels.py index 91b0b424abe4..93e6e74e1887 100755 --- a/add-externals-gh-labels.py +++ b/add-externals-gh-labels.py @@ -1,124 +1,201 @@ #!/usr/bin/env python3 from github import Github from os.path import expanduser, dirname, abspath, join, exists -from githublabels import LABEL_TYPES, COMMON_LABELS, COMPARISON_LABELS, CMSSW_BUILD_LABELS, LABEL_COLORS -from categories import COMMON_CATEGORIES, EXTERNAL_CATEGORIES, EXTERNAL_REPOS, CMSSW_REPOS, CMSSW_CATEGORIES +from githublabels import ( + LABEL_TYPES, + COMMON_LABELS, + COMPARISON_LABELS, + CMSSW_BUILD_LABELS, + LABEL_COLORS, +) +from categories import ( + COMMON_CATEGORIES, + EXTERNAL_CATEGORIES, + EXTERNAL_REPOS, + CMSSW_REPOS, + CMSSW_CATEGORIES, +) from cms_static import VALID_CMS_SW_REPOS_FOR_TESTS, GH_CMSSW_ORGANIZATION from socket import setdefaulttimeout from github_utils import api_rate_limits from cmsutils import get_config_map_properties from sys import argv + setdefaulttimeout(120) SCRIPT_DIR = dirname(abspath(argv[0])) -def setRepoLabels (gh, repo_name, all_labels, dryRun=False, ignore=[]): - repos = [] - if not "/" in repo_name: - user = gh.get_user(repo_name) - for repo in user.get_repos(): - skip = False - if repo.full_name in ignore: - skip = True - elif repo_name==GH_CMSSW_ORGANIZATION: - if repo.name not in VALID_CMS_SW_REPOS_FOR_TESTS: - skip = True - if skip: - print("Ignoring repo:",repo.full_name) - continue - repos.append(repo) - else: - repos.append(gh.get_repo(repo_name)) - api_rate_limits(gh) - for repo in repos: - print("Checking repository ", repo.full_name, ", DryRun:",dryRun) - xfile = repo.full_name.replace("/","-")+".done" - if exists(xfile): continue - cur_labels = {} - for lab in repo.get_labels(): - cur_labels [lab.name]=lab + +def setRepoLabels(gh, repo_name, all_labels, dryRun=False, ignore=[]): + repos = [] + if not "/" in repo_name: + user = gh.get_user(repo_name) + for repo in user.get_repos(): + skip = False + if repo.full_name in ignore: + skip = True + elif repo_name == GH_CMSSW_ORGANIZATION: + if repo.name not in VALID_CMS_SW_REPOS_FOR_TESTS: + skip = True + if skip: + print("Ignoring repo:", repo.full_name) + continue + repos.append(repo) + else: + repos.append(gh.get_repo(repo_name)) api_rate_limits(gh) - for lab in all_labels: - if not lab in cur_labels: - print(" Creating new label ",lab,"=>",all_labels[lab]) - if not dryRun: - repo.create_label(lab, all_labels[lab]) - api_rate_limits(gh) - elif cur_labels[lab].color != all_labels[lab]: - if not dryRun: - cur_labels[lab].edit(lab, all_labels[lab]) - api_rate_limits(gh) - print(" Label ",lab," color updated: ",cur_labels[lab].color ," => ",all_labels[lab]) - ref = open(xfile,"w") - ref.close() + for repo in repos: + print("Checking repository ", repo.full_name, ", DryRun:", dryRun) + xfile = repo.full_name.replace("/", "-") + ".done" + if exists(xfile): + continue + cur_labels = {} + for lab in repo.get_labels(): + cur_labels[lab.name] = lab + api_rate_limits(gh) + for lab in all_labels: + if not lab in cur_labels: + print(" Creating new label ", lab, "=>", all_labels[lab]) + if not dryRun: + repo.create_label(lab, all_labels[lab]) + api_rate_limits(gh) + elif cur_labels[lab].color != all_labels[lab]: + if not dryRun: + cur_labels[lab].edit(lab, all_labels[lab]) + api_rate_limits(gh) + print( + " Label ", + lab, + " color updated: ", + cur_labels[lab].color, + " => ", + all_labels[lab], + ) + ref = open(xfile, "w") + ref.close() + if __name__ == "__main__": - from optparse import OptionParser - parser = OptionParser(usage="%prog [-n|--dry-run] [-e|--externals] [-c|--cmssw] [-a|--all]") - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) - parser.add_option("-e", "--externals", dest="externals", action="store_true", help="Only process CMS externals repositories", default=False) - parser.add_option("-u", "--users", dest="users", action="store_true", help="Only process Users externals repositories", default=False) - parser.add_option("-c", "--cmssw", dest="cmssw", action="store_true", help="Only process "+",".join(CMSSW_REPOS)+" repository", default=False) - parser.add_option("-r", "--repository",dest="repository", help="Only process the selected repository.", type=str, default=None) - parser.add_option("-a", "--all", dest="all", action="store_true", help="Process all CMS repository i.e. externals and cmssw", default=False) - opts, args = parser.parse_args() + from optparse import OptionParser - if opts.all: - opts.externals = True - opts.cmssw = True - elif (not opts.externals) and (not opts.cmssw) and (not opts.users): - parser.error("Too few arguments, please use either -e, -c or -u") + parser = OptionParser(usage="%prog [-n|--dry-run] [-e|--externals] [-c|--cmssw] [-a|--all]") + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, + ) + parser.add_option( + "-e", + "--externals", + dest="externals", + action="store_true", + help="Only process CMS externals repositories", + default=False, + ) + parser.add_option( + "-u", + "--users", + dest="users", + action="store_true", + help="Only process Users externals repositories", + default=False, + ) + parser.add_option( + "-c", + "--cmssw", + dest="cmssw", + action="store_true", + help="Only process " + ",".join(CMSSW_REPOS) + " repository", + default=False, + ) + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Only process the selected repository.", + type=str, + default=None, + ) + parser.add_option( + "-a", + "--all", + dest="all", + action="store_true", + help="Process all CMS repository i.e. externals and cmssw", + default=False, + ) + opts, args = parser.parse_args() - import repo_config - gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) - api_rate_limits(gh) + if opts.all: + opts.externals = True + opts.cmssw = True + elif (not opts.externals) and (not opts.cmssw) and (not opts.users): + parser.error("Too few arguments, please use either -e, -c or -u") + + import repo_config + + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) + api_rate_limits(gh) - if opts.cmssw or opts.externals: - all_labels = COMMON_LABELS - for cat in COMMON_CATEGORIES+EXTERNAL_CATEGORIES+list(CMSSW_CATEGORIES.keys()): - for lab in LABEL_TYPES: - all_labels[cat+"-"+lab]=LABEL_TYPES[lab] - for lab in COMPARISON_LABELS: - all_labels[lab] = COMPARISON_LABELS[lab] + if opts.cmssw or opts.externals: + all_labels = COMMON_LABELS + for cat in COMMON_CATEGORIES + EXTERNAL_CATEGORIES + list(CMSSW_CATEGORIES.keys()): + for lab in LABEL_TYPES: + all_labels[cat + "-" + lab] = LABEL_TYPES[lab] + for lab in COMPARISON_LABELS: + all_labels[lab] = COMPARISON_LABELS[lab] - if opts.externals: - repos = EXTERNAL_REPOS if not opts.repository else [opts.repository] - for repo_name in repos: - setRepoLabels (gh, repo_name, all_labels, opts.dryRun, ignore=CMSSW_REPOS) + if opts.externals: + repos = EXTERNAL_REPOS if not opts.repository else [opts.repository] + for repo_name in repos: + setRepoLabels(gh, repo_name, all_labels, opts.dryRun, ignore=CMSSW_REPOS) - if opts.cmssw: - for lab in CMSSW_BUILD_LABELS: - all_labels[lab] = CMSSW_BUILD_LABELS[lab] - specs = get_config_map_properties() - for s in specs: - if 'DISABLED' in s: continue - if 'IB_ONLY' in s: continue - arch = s['SCRAM_ARCH'] - for ltype in ['build', 'installation', 'tool-conf', 'upload']: - all_labels['%s-%s-error' % (arch, ltype)] = LABEL_COLORS["rejected"] - all_labels['%s-%s-ok' % (arch, ltype)] = LABEL_COLORS["approved"] - for inproc in [ 'building', 'tool-conf-building', 'uploading', 'build-queued', 'tool-conf-waiting']: - all_labels[arch+'-'+inproc] = LABEL_COLORS["hold"] - all_labels[arch+'-finished'] = LABEL_COLORS["approved"] - repos = CMSSW_REPOS if not opts.repository else [opts.repository] - for repo_name in CMSSW_REPOS: - setRepoLabels (gh, repo_name, all_labels, opts.dryRun) + if opts.cmssw: + for lab in CMSSW_BUILD_LABELS: + all_labels[lab] = CMSSW_BUILD_LABELS[lab] + specs = get_config_map_properties() + for s in specs: + if "DISABLED" in s: + continue + if "IB_ONLY" in s: + continue + arch = s["SCRAM_ARCH"] + for ltype in ["build", "installation", "tool-conf", "upload"]: + all_labels["%s-%s-error" % (arch, ltype)] = LABEL_COLORS["rejected"] + all_labels["%s-%s-ok" % (arch, ltype)] = LABEL_COLORS["approved"] + for inproc in [ + "building", + "tool-conf-building", + "uploading", + "build-queued", + "tool-conf-waiting", + ]: + all_labels[arch + "-" + inproc] = LABEL_COLORS["hold"] + all_labels[arch + "-finished"] = LABEL_COLORS["approved"] + repos = CMSSW_REPOS if not opts.repository else [opts.repository] + for repo_name in CMSSW_REPOS: + setRepoLabels(gh, repo_name, all_labels, opts.dryRun) - if opts.users: - from glob import glob - for rconf in glob(join(SCRIPT_DIR,"repos","*","*","repo_config.py")): - repo_data = rconf.split("/")[-4:-1] - exec('from '+".".join(repo_data)+' import repo_config') - try: - if not repo_config.ADD_LABELS: continue - except: continue - exec('from '+".".join(repo_data)+' import categories') - print(repo_config.GH_TOKEN, repo_config.GH_REPO_FULLNAME) - gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) - all_labels = COMMON_LABELS - for lab in COMPARISON_LABELS: - all_labels[lab] = COMPARISON_LABELS[lab] - for cat in categories.COMMON_CATEGORIES+list(categories.CMSSW_CATEGORIES.keys()): - for lab in LABEL_TYPES: - all_labels[cat+"-"+lab]=LABEL_TYPES[lab] - setRepoLabels (gh, repo_config.GH_REPO_FULLNAME, all_labels, opts.dryRun) + if opts.users: + from glob import glob + for rconf in glob(join(SCRIPT_DIR, "repos", "*", "*", "repo_config.py")): + repo_data = rconf.split("/")[-4:-1] + exec("from " + ".".join(repo_data) + " import repo_config") + try: + if not repo_config.ADD_LABELS: + continue + except: + continue + exec("from " + ".".join(repo_data) + " import categories") + print(repo_config.GH_TOKEN, repo_config.GH_REPO_FULLNAME) + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) + all_labels = COMMON_LABELS + for lab in COMPARISON_LABELS: + all_labels[lab] = COMPARISON_LABELS[lab] + for cat in categories.COMMON_CATEGORIES + list(categories.CMSSW_CATEGORIES.keys()): + for lab in LABEL_TYPES: + all_labels[cat + "-" + lab] = LABEL_TYPES[lab] + setRepoLabels(gh, repo_config.GH_REPO_FULLNAME, all_labels, opts.dryRun) diff --git a/backport-pr.py b/backport-pr.py index 503b8cc9c635..c9ead23213d4 100755 --- a/backport-pr.py +++ b/backport-pr.py @@ -7,55 +7,102 @@ from cms_static import GH_CMSSW_ORGANIZATION from cms_static import GH_CMSSW_REPO from _py2with3compatibility import run_cmd -CMSSW_GIT_REF="/cvmfs/cms.cern.ch/cmssw.git.daily" -def backport_pull (repo, pr, branch): - pr_branch = pr.base.label.split(":")[1] - print("Source Branch:",pr_branch) - if pr_branch == branch: return "Warning: Can not backport, same branch %s vs %s" % (pr_branch, branch),False - br = gh_repo.get_branch(branch) - commits = [] - for c in pr.get_commits().reversed: commits.insert(0,"git cherry-pick %s" % c.sha) - if not commits: return "There are no commits to backport",False - print("Cherry-pick commands:") - print(" "+"\n ".join(commits)) - if len(commits)>=250: - return "Error: Too many commits in PR %s\nBot can only handle max 250 commits." % len(commits),False - new_branch = "backport-%s-%s" % (branch.replace("/","_"), pr.number) - print("New Branch:",new_branch) - git_ref = "" - if repo.name == "cmssw": git_ref = "--reference "+CMSSW_GIT_REF - print("GIT REF:",git_ref) - e , o = run_cmd("rm -rf pr_backport; git clone --branch %s %s git@github.com:%s pr_backport && cd pr_backport && git checkout -b %s" % (branch, git_ref, repo.full_name, new_branch)) - if e: - print(o) - exit(1) - e, o = run_cmd('cd pr_backport; %s' % ";".join(commits)) - if e: return "Error: Failed to cherry-pick commits. Please backport this PR yourself.\n```"+o+"\n```",False - e, o = run_cmd("cd pr_backport; git push origin %s" % new_branch) - if e: - print(o) - exit(1) - run_cmd("rm -rf pr_backport") - newBody = "backport of #%s\n\n%s" %(pr.number, pr.body) - newPR = repo.create_pull(title = pr.title, body = newBody, base = branch, head = new_branch ) - return "Successfully backported PR #%s as #%s for branch %s" % (pr.number, newPR.number, branch),True +CMSSW_GIT_REF = "/cvmfs/cms.cern.ch/cmssw.git.daily" + + +def backport_pull(repo, pr, branch): + pr_branch = pr.base.label.split(":")[1] + print("Source Branch:", pr_branch) + if pr_branch == branch: + return "Warning: Can not backport, same branch %s vs %s" % (pr_branch, branch), False + br = gh_repo.get_branch(branch) + commits = [] + for c in pr.get_commits().reversed: + commits.insert(0, "git cherry-pick %s" % c.sha) + if not commits: + return "There are no commits to backport", False + print("Cherry-pick commands:") + print(" " + "\n ".join(commits)) + if len(commits) >= 250: + return ( + "Error: Too many commits in PR %s\nBot can only handle max 250 commits." + % len(commits), + False, + ) + new_branch = "backport-%s-%s" % (branch.replace("/", "_"), pr.number) + print("New Branch:", new_branch) + git_ref = "" + if repo.name == "cmssw": + git_ref = "--reference " + CMSSW_GIT_REF + print("GIT REF:", git_ref) + e, o = run_cmd( + "rm -rf pr_backport; git clone --branch %s %s git@github.com:%s pr_backport && cd pr_backport && git checkout -b %s" + % (branch, git_ref, repo.full_name, new_branch) + ) + if e: + print(o) + exit(1) + e, o = run_cmd("cd pr_backport; %s" % ";".join(commits)) + if e: + return ( + "Error: Failed to cherry-pick commits. Please backport this PR yourself.\n```" + + o + + "\n```", + False, + ) + e, o = run_cmd("cd pr_backport; git push origin %s" % new_branch) + if e: + print(o) + exit(1) + run_cmd("rm -rf pr_backport") + newBody = "backport of #%s\n\n%s" % (pr.number, pr.body) + newPR = repo.create_pull(title=pr.title, body=newBody, base=branch, head=new_branch) + return ( + "Successfully backported PR #%s as #%s for branch %s" % (pr.number, newPR.number, branch), + True, + ) + if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cmssw.", type=str, default=GH_CMSSW_REPO) - parser.add_argument("-b", "--branch", dest="branch", help="Repository branch for which new Pull request should be created.", type=str, default=None) - parser.add_argument("-p", "--pull", dest="pull", help="Pull request number to be backported.", type=int, default=0) - args = parser.parse_args() + parser = ArgumentParser() + parser.add_argument( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cmssw.", + type=str, + default=GH_CMSSW_REPO, + ) + parser.add_argument( + "-b", + "--branch", + dest="branch", + help="Repository branch for which new Pull request should be created.", + type=str, + default=None, + ) + parser.add_argument( + "-p", + "--pull", + dest="pull", + help="Pull request number to be backported.", + type=int, + default=0, + ) + args = parser.parse_args() - if args.pull == 0: parser.error("Missing pull request number.") - if not args.branch: parser.error("Missing branch name.") + if args.pull == 0: + parser.error("Missing pull request number.") + if not args.branch: + parser.error("Missing branch name.") - gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) - gh_repo = gh.get_repo(GH_CMSSW_ORGANIZATION+"/"+args.repository) - pr = gh_repo.get_pull(args.pull) - res = backport_pull (gh_repo, pr, args.branch) - status = "done" - if not res[1]: status = "failed\n**Reason:**\n" - print(res) - pr.create_issue_comment("backport %s\n%s" % (status, res[0])) + gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) + gh_repo = gh.get_repo(GH_CMSSW_ORGANIZATION + "/" + args.repository) + pr = gh_repo.get_pull(args.pull) + res = backport_pull(gh_repo, pr, args.branch) + status = "done" + if not res[1]: + status = "failed\n**Reason:**\n" + print(res) + pr.create_issue_comment("backport %s\n%s" % (status, res[0])) diff --git a/buildLogAnalyzer.py b/buildLogAnalyzer.py index df89b13eb3e9..79b78b221eb3 100755 --- a/buildLogAnalyzer.py +++ b/buildLogAnalyzer.py @@ -9,32 +9,41 @@ import getopt if sys.version_info[0] == 3: - def cmp(a,b): - return ((a > b) - (a < b)) + def cmp(a, b): + return (a > b) - (a < b) + + +def pkgCmp(a, b): + if a.subsys == b.subsys: + return cmp(a.pkg, b.pkg) + else: + return cmp(a.subsys, b.subsys) -def pkgCmp(a,b): - if a.subsys == b.subsys: return cmp(a.pkg, b.pkg) - else: return cmp(a.subsys, b.subsys) # ================================================================================ + class ErrorInfo(object): """keeps track of information for errors""" + def __init__(self, errType, msg): super(ErrorInfo, self).__init__() self.errType = errType - self.errMsg = msg + self.errMsg = msg + # ================================================================================ + class PackageInfo(object): """keeps track of information for each package""" + def __init__(self, subsys, pkg): super(PackageInfo, self).__init__() - self.subsys = subsys - self.pkg = pkg + self.subsys = subsys + self.pkg = pkg self.errInfo = [] self.errSummary = {} self.errLines = {} @@ -43,8 +52,9 @@ def __init__(self, subsys, pkg): def addErrInfo(self, errInfo, lineNo): """docstring for addErr""" self.warnOnly = True - if 'Error' in errInfo.errType: self.warnOnly = False - self.errInfo.append( errInfo ) + if "Error" in errInfo.errType: + self.warnOnly = False + self.errInfo.append(errInfo) if errInfo.errType not in self.errSummary.keys(): self.errSummary[errInfo.errType] = 1 else: @@ -53,56 +63,65 @@ def addErrInfo(self, errInfo, lineNo): def name(self): """docstring for name""" - return self.subsys+'/'+self.pkg + return self.subsys + "/" + self.pkg + # ================================================================================ + class LogFileAnalyzer(object): """docstring for LogFileAnalyzer""" - def __init__(self, topDirIn='.', topUrlIn='', verbose = -1, pkgsList = None, release = None, ignoreWarnings=[]): + + def __init__( + self, topDirIn=".", topUrlIn="", verbose=-1, pkgsList=None, release=None, ignoreWarnings=[] + ): super(LogFileAnalyzer, self).__init__() - self.topDir = os.path.abspath( topDirIn ) + self.topDir = os.path.abspath(topDirIn) self.ignoreWarnings = ignoreWarnings self.topURL = topUrlIn - if not pkgsList: pkgsList = "../../../../../src/PackageList.cmssw" - if self.topURL != '' : - if self.topURL[-1] != '/' : self.topURL += '/' - if not release: release = self.topURL.split('/')[-3] # TODO no error catching + if not pkgsList: + pkgsList = "../../../../../src/PackageList.cmssw" + if self.topURL != "": + if self.topURL[-1] != "/": + self.topURL += "/" + if not release: + release = self.topURL.split("/")[-3] # TODO no error catching self.release = release self.pkgsList = pkgsList self.verbose = verbose self.tagList = {} - self.nErrorInfo = {} + self.nErrorInfo = {} self.nFailedPkgs = [] self.packageList = [] - self.pkgOK = [] - self.pkgErr = [] - self.pkgWarn = [] - - self.errorKeys = ['dictError', - 'compError', - 'linkError', - 'pythonError', - 'compWarning', - 'dwnlError', - 'miscError', - 'ignoreWarning', - ] - - self.styleClass = {'dictError' : 'dictErr', - 'compError' : 'compErr', - 'linkError' : 'linkErr', - 'pythonError' : 'pyErr', - 'dwnlError' : 'dwnldErr', - 'miscError' : 'miscErr', - 'compWarning' : 'compWarn', - 'ignoreWarning' : 'compWarn', - 'ok' : 'ok', - } - + self.pkgOK = [] + self.pkgErr = [] + self.pkgWarn = [] + + self.errorKeys = [ + "dictError", + "compError", + "linkError", + "pythonError", + "compWarning", + "dwnlError", + "miscError", + "ignoreWarning", + ] + + self.styleClass = { + "dictError": "dictErr", + "compError": "compErr", + "linkError": "linkErr", + "pythonError": "pyErr", + "dwnlError": "dwnldErr", + "miscError": "miscErr", + "compWarning": "compWarn", + "ignoreWarning": "compWarn", + "ok": "ok", + } # get the lists separately for "priority" treatment ... self.errMap = {} @@ -114,7 +133,7 @@ def __init__(self, topDirIn='.', topUrlIn='', verbose = -1, pkgsList = None, rel for key in self.errorKeys: self.errMapAll[key] = [] - def getDevelAdmins(self): + def getDevelAdmins(self): """ get list of admins and developers from .admin/developers file in each package needed for sending out e-mails @@ -122,12 +141,12 @@ def getDevelAdmins(self): pass def getTagList(self): - import glob - srcdir = os.path.dirname(self.pkgsList)+"/" - for pkg in glob.glob(srcdir+'*/*'): - pkg = pkg.replace(srcdir,"") - self.tagList[pkg] = "" + + srcdir = os.path.dirname(self.pkgsList) + "/" + for pkg in glob.glob(srcdir + "*/*"): + pkg = pkg.replace(srcdir, "") + self.tagList[pkg] = "" return def analyze(self): @@ -138,21 +157,24 @@ def analyze(self): self.getTagList() import glob + start = time.time() - packageList = glob.glob('*/*/build.log') + packageList = glob.glob("*/*/build.log") - if self.verbose > 0: print("going to analyze ", len(packageList), 'files.') + if self.verbose > 0: + print("going to analyze ", len(packageList), "files.") for logFile in packageList: self.analyzeFile(logFile) pkgDone = [] for pkg in self.packageList: - if pkg.warnOnly : self.pkgWarn.append(pkg) + if pkg.warnOnly: + self.pkgWarn.append(pkg) if pkg.errInfo: self.pkgErr.append(pkg) for key in self.errorKeys: - if key in pkg.errSummary.keys() : + if key in pkg.errSummary.keys(): self.errMapAll[key].append(pkg) if key in pkg.errSummary.keys() and pkg not in pkgDone: self.errMap[key].append(pkg) @@ -161,60 +183,77 @@ def analyze(self): self.pkgOK.append(pkg) stop = time.time() - self.anaTime = stop-start + self.anaTime = stop - start pass def report(self): """show collected info""" - print('analyzed ', len(self.packageList), 'log files in', str(self.anaTime), 'sec.') + print("analyzed ", len(self.packageList), "log files in", str(self.anaTime), "sec.") totErr = 0 for key, val in self.nErrorInfo.items(): totErr += int(val) - print('found ', totErr, ' errors and warnings in total, by type:') + print("found ", totErr, " errors and warnings in total, by type:") for key, val in self.nErrorInfo.items(): - print('\t', key, ' : ', val, ' in ', len(self.errMapAll[key]), 'packages') - - print('found ', len(self.pkgOK), 'packages without errors/warnings.') - print('found ', len(self.pkgErr), 'packages with errors or warnings, ', len(self.pkgWarn), ' with warnings only.') + print("\t", key, " : ", val, " in ", len(self.errMapAll[key]), "packages") + + print("found ", len(self.pkgOK), "packages without errors/warnings.") + print( + "found ", + len(self.pkgErr), + "packages with errors or warnings, ", + len(self.pkgWarn), + " with warnings only.", + ) start = time.time() self.makeHTMLSummaryPage() for key in self.errorKeys: - pkgList = sorted(self.errMap[key],key=lambda x: x.name()) + pkgList = sorted(self.errMap[key], key=lambda x: x.name()) for pkg in pkgList: self.makeHTMLLogFile(pkg) for pkg in self.pkgOK: self.makeHTMLLogFile(pkg) stop = time.time() - print("creating html pages took ", str(stop-start), 'sec.') + print("creating html pages took ", str(stop - start), "sec.") def makeHTMLSummaryPage(self): - keyList = self.errorKeys - htmlDir = '../html/' + htmlDir = "../html/" if not os.path.exists(htmlDir): os.makedirs(htmlDir) htmlFileName = htmlDir + "index.html" - htmlFile = open (htmlFileName, 'w') + htmlFile = open(htmlFileName, "w") htmlFile.write("\n") htmlFile.write("\n") - htmlFile.write('\n') - htmlFile.write("Summary for build of "+self.release+"") + htmlFile.write( + '\n' + ) + htmlFile.write("Summary for build of " + self.release + "") htmlFile.write("\n") htmlFile.write("\n") - htmlFile.write("

Summary for build of "+self.release+"

\n") - htmlFile.write("

Platform: "+os.environ["SCRAM_ARCH"]+"

\n") - htmlFile.write('analyzed '+ str(len(self.packageList)) + ' log files in ' + str(self.anaTime) +' sec.\n') + htmlFile.write("

Summary for build of " + self.release + "

\n") + htmlFile.write("

Platform: " + os.environ["SCRAM_ARCH"] + "

\n") + htmlFile.write( + "analyzed " + + str(len(self.packageList)) + + " log files in " + + str(self.anaTime) + + " sec.\n" + ) totErr = 0 for key, val in self.nErrorInfo.items(): totErr += int(val) - htmlFile.write('

found '+ str(totErr)+ ' errors and warnings in total, by type:

\n') + htmlFile.write( + "

found " + str(totErr) + " errors and warnings in total, by type:

\n" + ) htmlFile.write('') - htmlFile.write('\n') + htmlFile.write( + "\n" + ) for key in keyList: val = 0 try: @@ -222,16 +261,26 @@ def makeHTMLSummaryPage(self): except KeyError: pass nPkgE = len(self.errMapAll[key]) - htmlFile.write('\n') - htmlFile.write('
error type # packages total # errors
error type # packages total # errors
'+ key + ' ' + str(nPkgE) + ' ' + str(val) + '
') + htmlFile.write( + '\n" + ) + htmlFile.write("
' + + key + + " " + + str(nPkgE) + + " " + + str(val) + + "
") htmlFile.write('
\n') htmlFile.write(" ") htmlFile.write("") htmlFile.write("") for key in keyList: htmlFile.write("') - htmlFile.write('') - htmlFile.write('") + htmlFile.write('') + htmlFile.write("") for pKey in keyList: htmlFile.write("") htmlFile.write("\n") @@ -271,16 +331,26 @@ def makeHTMLSummaryPage(self): pkgList = sorted(self.pkgOK, key=lambda x: x.name()) for pkg in pkgList: - if not pkg.name() in self.tagList: continue - htmlFile.write(' ') + if not pkg.name() in self.tagList: + continue + htmlFile.write(" ") htmlFile.write('') - htmlFile.write('") for pKey in self.errorKeys: htmlFile.write("") htmlFile.write("\n") @@ -292,9 +362,10 @@ def makeHTMLSummaryPage(self): # write out all info also as pkl files so we can re-use it: from pickle import Pickler - summFile = open(htmlDir+'/'+'logAnalysis.pkl','wb') + + summFile = open(htmlDir + "/" + "logAnalysis.pkl", "wb") pklr = Pickler(summFile, protocol=2) - pklr.dump([self.release,os.environ["SCRAM_ARCH"], self.anaTime]) + pklr.dump([self.release, os.environ["SCRAM_ARCH"], self.anaTime]) pklr.dump(self.errorKeys) pklr.dump(self.nErrorInfo) pklr.dump(self.errMapAll) @@ -310,33 +381,44 @@ def makeHTMLSummaryPage(self): def makeHTMLLogFile(self, pkg): """docstring for makeHTMLFile""" - if not pkg.name() in self.tagList: return - htmlDir = '../html/'+pkg.name()+'/' + if not pkg.name() in self.tagList: + return + htmlDir = "../html/" + pkg.name() + "/" if not os.path.exists(htmlDir): os.makedirs(htmlDir) - htmlFileName = htmlDir +'log.html' + htmlFileName = htmlDir + "log.html" - logFileName = pkg.name()+'/build.log' - logFile = open(logFileName, 'r') - htmlFile = open (htmlFileName, 'w') + logFileName = pkg.name() + "/build.log" + logFile = open(logFileName, "r") + htmlFile = open(htmlFileName, "w") htmlFile.write("\n") htmlFile.write("\n") htmlFile.write('') - htmlFile.write("Log File for "+pkg.name()+"") + htmlFile.write("Log File for " + pkg.name() + "") htmlFile.write("\n") htmlFile.write("\n") - htmlFile.write("

Log File for "+pkg.name()+' '+self.tagList[pkg.name()]+"

\n") + htmlFile.write( + "

Log File for " + pkg.name() + " " + self.tagList[pkg.name()] + "

\n" + ) htmlFile.write("
\n")
         lineNo = -1
         for line in logFile.readlines():
             lineNo += 1
             # HTML sanitisation:
-            newLine = line.replace('&','&') # do this first to not escape it again in the next subs
-            newLine = newLine.replace('<','<').replace('>','>')
+            newLine = line.replace(
+                "&", "&"
+            )  # do this first to not escape it again in the next subs
+            newLine = newLine.replace("<", "<").replace(">", ">")
             if lineNo in pkg.errLines.keys():
-                newLine = '  '+newLine+' '
-            if sys.version_info[0]<3:
-                htmlFile.write(newLine.decode('ascii','ignore'))
+                newLine = (
+                    "  "
+                    + newLine
+                    + " "
+                )
+            if sys.version_info[0] < 3:
+                htmlFile.write(newLine.decode("ascii", "ignore"))
             else:
                 htmlFile.write(newLine)
         htmlFile.write("
\n") @@ -346,63 +428,234 @@ def makeHTMLLogFile(self, pkg): def analyzeFile(self, fileNameIn): """read in file and check for errors""" - subsys, pkg, logFile = fileNameIn.split('/') - - if self.verbose > 5 : print("analyzing file : ", fileNameIn) - - fileIn = open(fileNameIn, 'r') - shLib = 'so' - if os.uname()[0] == 'Darwin' : - shLib = 'dylib' - errorInf =[ - {str('^.*? cannot find -l(.*?)$') : ['linkError', 'missing library "%s"']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/src/'+subsys+pkg+'/classes_rflx\.cpp') : ['dictError', 'for package dictionary']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/src/'+subsys+pkg+'/.*?\.'+shLib) : ['linkError', 'for package library']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/src/'+subsys+pkg+'/.*?\.o') : ['compError', 'for package library']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/bin/(.*?)/.*?\.o') : ['compError', 'for executable %s']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/bin/(.*?)/\1') : ['linkError', 'for executable %s']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/bin/(.*?)/lib\1\.'+shLib) : ['linkError', 'for shared library %s in bin']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/test/stubs/lib(.*?)\.'+shLib) : ['linkError', 'for shared library %s in test/stubs']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/test/(.*?)/.*?\.'+shLib) : ['linkError', 'for shared library %s in test']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/test/stubs/.*?\.o') : ['compError', 'for library in test/stubs']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/test/(.*?)/.*?\.o') : ['compError', 'for executable %s in test']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/test/(.*?)\.'+shLib) : ['linkError', 'for shared library %s in test']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/test/(.*?)\.o') : ['compError', 'for executable %s in test']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/test/(.*?)/\1') : ['linkError', 'for executable %s in test']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/plugins/(.*?)/.*?\.o') : ['compError', 'for plugin %s in plugins']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/plugins/(.*?)/lib.*?\.'+shLib) : ['linkError', 'for plugin library %s in plugins']}, - {str('^ *\*\*\* Break \*\*\* illegal instruction') : ['compError', 'Break illegal instruction']}, - {str('^AttributeError: .*') : ['pythonError', 'importing another module']}, - {str('^ImportError: .*') : ['pythonError', 'importing another module']}, - {str('^SyntaxError: .*') : ['pythonError', 'syntax error in module']}, - {str('^NameError: .*') : ['pythonError', 'name error in module']}, - {str('^TypeError: .*') : ['pythonError', 'type error in module']}, - {str('^ValueError: .*') : ['pythonError', 'value error in module']}, - {str('^gmake: \*\*\* .*?/src/'+subsys+'/'+pkg+'/test/data/download\.url') : ['dwnlError', 'for file in data/download.url in test']}, - {str('^ */.*?/'+self.release+'/src/'+subsys+'/'+pkg+'.*?\:\d*\: warning: ') : ['compWarning', 'for file in package']}, - {str('^ */.*?/'+self.release+'/src/.*?\:\d+\: warning: ') : ['compWarning', 'for file in release']}, - {str('^Warning: ') : ['compWarning', 'for file in package']}, - {str('^ */.*?/'+self.release+'/src/'+subsys+'/'+pkg+'.*?\:\d+\: error: ') : ['compError', 'for file in package']}, - {str('^ */.*?/'+self.release+'/src/.*?\:\d+\: error: ') : ['compError', 'for file in release']}, - {str('^.*?\:\d+\: error: ') : ['compError', 'for file in externals']}, - {str('^ *tmp/.*?/src/'+subsys+'/'+pkg+'/src/(.*?)/lib.*?\.'+shLib+'\: undefined reference to .*') : ['linkError', 'for package library %s ']}, - {str('^ *tmp/.*?/src/'+subsys+'/'+pkg+'/plugins/(.*?)/lib.*?\.'+shLib+'\: undefined reference to .*') : ['linkError', 'for plugin library %s in plugins']}, - {str("^error: class '.*?' has a different checksum for ClassVersion") : ['compError', 'for a different checksum for ClassVersion']}, - {str('^.*: (more undefined references to|undefined reference to).*') : ['compError', 'Missing symbols in a package']}, - ] - - miscErrRe = re.compile('^gmake: \*\*\* (.*)$') - genericLinkErrRe = re.compile('^gmake: \*\*\* \[tmp/.*?/lib.*?'+shLib+'\] Error 1') - - if ('_gcc46' in os.environ["SCRAM_ARCH"]): - errorInf.append({str('^.*?:\d+\: warning\: ') : ['compWarning', 'from external in package']}) + subsys, pkg, logFile = fileNameIn.split("/") + + if self.verbose > 5: + print("analyzing file : ", fileNameIn) + + fileIn = open(fileNameIn, "r") + shLib = "so" + if os.uname()[0] == "Darwin": + shLib = "dylib" + errorInf = [ + {str("^.*? cannot find -l(.*?)$"): ["linkError", 'missing library "%s"']}, + { + str( + "^gmake: \*\*\* .*?/src/" + + subsys + + "/" + + pkg + + "/src/" + + subsys + + pkg + + "/classes_rflx\.cpp" + ): ["dictError", "for package dictionary"] + }, + { + str( + "^gmake: \*\*\* .*?/src/" + + subsys + + "/" + + pkg + + "/src/" + + subsys + + pkg + + "/.*?\." + + shLib + ): ["linkError", "for package library"] + }, + { + str( + "^gmake: \*\*\* .*?/src/" + + subsys + + "/" + + pkg + + "/src/" + + subsys + + pkg + + "/.*?\.o" + ): ["compError", "for package library"] + }, + { + str("^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/bin/(.*?)/.*?\.o"): [ + "compError", + "for executable %s", + ] + }, + { + str("^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/bin/(.*?)/\1"): [ + "linkError", + "for executable %s", + ] + }, + { + str( + "^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/bin/(.*?)/lib\1\." + shLib + ): ["linkError", "for shared library %s in bin"] + }, + { + str( + "^gmake: \*\*\* .*?/src/" + + subsys + + "/" + + pkg + + "/test/stubs/lib(.*?)\." + + shLib + ): ["linkError", "for shared library %s in test/stubs"] + }, + { + str( + "^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/test/(.*?)/.*?\." + shLib + ): ["linkError", "for shared library %s in test"] + }, + { + str("^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/test/stubs/.*?\.o"): [ + "compError", + "for library in test/stubs", + ] + }, + { + str("^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/test/(.*?)/.*?\.o"): [ + "compError", + "for executable %s in test", + ] + }, + { + str("^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/test/(.*?)\." + shLib): [ + "linkError", + "for shared library %s in test", + ] + }, + { + str("^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/test/(.*?)\.o"): [ + "compError", + "for executable %s in test", + ] + }, + { + str("^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/test/(.*?)/\1"): [ + "linkError", + "for executable %s in test", + ] + }, + { + str("^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/plugins/(.*?)/.*?\.o"): [ + "compError", + "for plugin %s in plugins", + ] + }, + { + str( + "^gmake: \*\*\* .*?/src/" + + subsys + + "/" + + pkg + + "/plugins/(.*?)/lib.*?\." + + shLib + ): ["linkError", "for plugin library %s in plugins"] + }, + { + str("^ *\*\*\* Break \*\*\* illegal instruction"): [ + "compError", + "Break illegal instruction", + ] + }, + {str("^AttributeError: .*"): ["pythonError", "importing another module"]}, + {str("^ImportError: .*"): ["pythonError", "importing another module"]}, + {str("^SyntaxError: .*"): ["pythonError", "syntax error in module"]}, + {str("^NameError: .*"): ["pythonError", "name error in module"]}, + {str("^TypeError: .*"): ["pythonError", "type error in module"]}, + {str("^ValueError: .*"): ["pythonError", "value error in module"]}, + { + str("^gmake: \*\*\* .*?/src/" + subsys + "/" + pkg + "/test/data/download\.url"): [ + "dwnlError", + "for file in data/download.url in test", + ] + }, + { + str( + "^ */.*?/" + + self.release + + "/src/" + + subsys + + "/" + + pkg + + ".*?\:\d*\: warning: " + ): ["compWarning", "for file in package"] + }, + { + str("^ */.*?/" + self.release + "/src/.*?\:\d+\: warning: "): [ + "compWarning", + "for file in release", + ] + }, + {str("^Warning: "): ["compWarning", "for file in package"]}, + { + str( + "^ */.*?/" + self.release + "/src/" + subsys + "/" + pkg + ".*?\:\d+\: error: " + ): ["compError", "for file in package"] + }, + { + str("^ */.*?/" + self.release + "/src/.*?\:\d+\: error: "): [ + "compError", + "for file in release", + ] + }, + {str("^.*?\:\d+\: error: "): ["compError", "for file in externals"]}, + { + str( + "^ *tmp/.*?/src/" + + subsys + + "/" + + pkg + + "/src/(.*?)/lib.*?\." + + shLib + + "\: undefined reference to .*" + ): ["linkError", "for package library %s "] + }, + { + str( + "^ *tmp/.*?/src/" + + subsys + + "/" + + pkg + + "/plugins/(.*?)/lib.*?\." + + shLib + + "\: undefined reference to .*" + ): ["linkError", "for plugin library %s in plugins"] + }, + { + str("^error: class '.*?' has a different checksum for ClassVersion"): [ + "compError", + "for a different checksum for ClassVersion", + ] + }, + { + str("^.*: (more undefined references to|undefined reference to).*"): [ + "compError", + "Missing symbols in a package", + ] + }, + ] + + miscErrRe = re.compile("^gmake: \*\*\* (.*)$") + genericLinkErrRe = re.compile("^gmake: \*\*\* \[tmp/.*?/lib.*?" + shLib + "\] Error 1") + + if "_gcc46" in os.environ["SCRAM_ARCH"]: + errorInf.append( + {str("^.*?:\d+\: warning\: "): ["compWarning", "from external in package"]} + ) else: - errorInf.append({str('^.*?:\d+\: warning\: ') : ['ignoreWarning', 'from external in package']}) - errorInf.append({str('^.*?ERROR:Private Header:') : ['compError', 'Private header usage.']}) + errorInf.append( + {str("^.*?:\d+\: warning\: "): ["ignoreWarning", "from external in package"]} + ) + errorInf.append( + {str("^.*?ERROR:Private Header:"): ["compError", "Private header usage."]} + ) errors = [] for errI in errorInf: - for err, info in errI.items(): - errors.append({re.compile(err) : info}) + for err, info in errI.items(): + errors.append({re.compile(err): info}) pkgInfo = PackageInfo(subsys, pkg) lineNo = -1 @@ -410,45 +663,51 @@ def analyzeFile(self, fileNameIn): lineNo += 1 errFound = False for errI in errors: - isMatched = False - for errRe, info in errI.items(): - errMatch = errRe.match(line) - if errMatch: - errFound = True - isMatched = True - errTyp, msg = info - if self.ignoreWarnings and (errTyp=="compWarning") and [w for w in self.ignoreWarnings if w in line]: - errTyp = 'ignoreWarning' - if '%s' in msg : - msg = info[1] % errMatch.groups(1) - if errTyp in self.nErrorInfo.keys(): - self.nErrorInfo[errTyp] += 1 - else: - self.nErrorInfo[errTyp] = 1 - pkgInfo.addErrInfo( ErrorInfo(errTyp, msg), lineNo ) + isMatched = False + for errRe, info in errI.items(): + errMatch = errRe.match(line) + if errMatch: + errFound = True + isMatched = True + errTyp, msg = info + if ( + self.ignoreWarnings + and (errTyp == "compWarning") + and [w for w in self.ignoreWarnings if w in line] + ): + errTyp = "ignoreWarning" + if "%s" in msg: + msg = info[1] % errMatch.groups(1) + if errTyp in self.nErrorInfo.keys(): + self.nErrorInfo[errTyp] += 1 + else: + self.nErrorInfo[errTyp] = 1 + pkgInfo.addErrInfo(ErrorInfo(errTyp, msg), lineNo) + break + if isMatched: break - if isMatched: break - if not errFound : + if not errFound: miscErrMatch = miscErrRe.match(line) if miscErrMatch: - if not genericLinkErrRe.match(line) : - errTyp = 'miscError' - msg = 'Unknown error found: %s' % miscErrMatch.groups(1) + if not genericLinkErrRe.match(line): + errTyp = "miscError" + msg = "Unknown error found: %s" % miscErrMatch.groups(1) if errTyp in self.nErrorInfo.keys(): self.nErrorInfo[errTyp] += 1 else: self.nErrorInfo[errTyp] = 1 - pkgInfo.addErrInfo( ErrorInfo(errTyp, msg), lineNo ) + pkgInfo.addErrInfo(ErrorInfo(errTyp, msg), lineNo) fileIn.close() - self.packageList.append( pkgInfo ) + self.packageList.append(pkgInfo) return + # ================================================================================ -help_message = ''' +help_message = """ -h, --help : print this message -l, --logDir : the path to the dir with the log files to analyze -r, --release : Release version @@ -462,7 +721,8 @@ def analyzeFile(self, fileNameIn): the script will produce the html version of the log files of the IB and write them into: /build/intBld/rc/wed-21/CMSSW_3_1_X_2009-07-08-2100/tmp/slc4_ia32_gcc345/cache/log/html/ -''' +""" + class Usage(Exception): def __init__(self, msg): @@ -470,25 +730,38 @@ def __init__(self, msg): def main(argv=None): - logDir = '.' - topURL = './' + logDir = "." + topURL = "./" verbose = -1 - pkgList = os.getenv("CMSSW_BASE",None) - if pkgList: pkgList+="/src/PackageList.cmssw" - rel = os.getenv("CMSSW_VERSION",None) + pkgList = os.getenv("CMSSW_BASE", None) + if pkgList: + pkgList += "/src/PackageList.cmssw" + rel = os.getenv("CMSSW_VERSION", None) igWarning = [] if argv is None: argv = sys.argv try: try: - opts, args = getopt.getopt(argv[1:], "hv:l:t:p:r:", ["help", "verbose=", "logDir=", "topURL=", "pkgList=", "release=", "ignoreWarning="]) + opts, args = getopt.getopt( + argv[1:], + "hv:l:t:p:r:", + [ + "help", + "verbose=", + "logDir=", + "topURL=", + "pkgList=", + "release=", + "ignoreWarning=", + ], + ) except getopt.error as msg: raise Usage(msg) # option processing for option, value in opts: - if option in ("-v", '--verbose'): + if option in ("-v", "--verbose"): verbose = int(value) if option in ("-h", "--help"): raise Usage(help_message) @@ -506,7 +779,8 @@ def main(argv=None): if not topURL: raise Usage(help_message) - if not os.path.exists(logDir): return + if not os.path.exists(logDir): + return lfa = LogFileAnalyzer(logDir, topURL, verbose, pkgList, rel, igWarning) lfa.analyze() diff --git a/cache-pull-request.py b/cache-pull-request.py index 77541b29f5a7..5d32b13abc9d 100755 --- a/cache-pull-request.py +++ b/cache-pull-request.py @@ -6,81 +6,110 @@ from github_utils import api_rate_limits from json import dumps, load import re + setdefaulttimeout(120) + def process(repo, prId, prCache): - data = {} - issue = repo.get_issue(prId) - if not issue.pull_request: - print("WARNING: Only cache Pull requests, %s is an issue." % prId) + data = {} + issue = repo.get_issue(prId) + if not issue.pull_request: + print("WARNING: Only cache Pull requests, %s is an issue." % prId) + return data + pr = repo.get_pull(prId) + if prCache and exists(prCache): + refData = load(open(prCache)) + if "freeze" in refData: + data["freeze"] = refData["freeze"] + data["auther_sha"] = refData["auther_sha"] + data["merge_commit_sha"] = refData["merge_commit_sha"] + data["number"] = issue.number + data["user"] = issue.user.login.encode("ascii", "ignore").decode() + data["title"] = issue.title.encode("ascii", "ignore").decode() + data["comments"] = issue.comments + data["labels"] = [x.name.encode("ascii", "ignore").decode() for x in issue.labels] + if issue.body: + data["body"] = issue.body.encode("ascii", "ignore").decode() + else: + data["body"] = "" + if issue.milestone: + data["milestone"] = issue.milestone.title.encode("ascii", "ignore").decode() + + data["branch"] = pr.base.ref.encode("ascii", "ignore").decode() + data["created_at"] = pr.created_at.strftime("%s") + data["updated_at"] = pr.updated_at.strftime("%s") + if pr.head.user: + data["author"] = pr.head.user.login.encode("ascii", "ignore").decode() + data["auther_ref"] = pr.head.ref.encode("ascii", "ignore").decode() + if not "freeze" in data: + data["auther_sha"] = pr.head.sha.encode("ascii", "ignore").decode() + data["review_comments"] = pr.review_comments + data["commits"] = pr.commits + data["additions"] = pr.additions + data["deletions"] = pr.deletions + data["changed_files"] = pr.changed_files + data["state"] = pr.state + if pr.state == "closed": + data["closed_at"] = pr.closed_at.strftime("%s") + if pr.merged: + data["merged_at"] = pr.merged_at.strftime("%s") + data["merged_by"] = pr.merged_by.login.encode("ascii", "ignore").decode() + if not "freeze" in data: + if pr.merge_commit_sha: + data["merge_commit_sha"] = pr.merge_commit_sha.encode( + "ascii", "ignore" + ).decode() + else: + data["merge_commit_sha"] = "" + data["release-notes"] = [] + REGEX_RN = re.compile("^release(-| )note(s|)\s*:\s*", re.I) + if issue.body: + msg = issue.body.encode("ascii", "ignore").decode().strip() + if REGEX_RN.match(msg): + data["release-notes"].append(REGEX_RN.sub("", msg).strip()) + for comment in issue.get_comments(): + if not comment.body: + continue + msg = comment.body.encode("ascii", "ignore").decode().strip() + if REGEX_RN.match(msg): + data["release-notes"].append(REGEX_RN.sub("", msg).strip()) return data - pr = repo.get_pull(prId) - if prCache and exists(prCache): - refData = load(open(prCache)) - if 'freeze' in refData: - data['freeze'] = refData['freeze'] - data['auther_sha'] = refData['auther_sha'] - data['merge_commit_sha'] = refData['merge_commit_sha'] - data['number'] = issue.number - data['user'] = issue.user.login.encode("ascii", "ignore").decode() - data['title'] = issue.title.encode("ascii", "ignore").decode() - data['comments'] = issue.comments - data['labels'] = [x.name.encode("ascii", "ignore").decode() for x in issue.labels] - if issue.body: data['body']=issue.body.encode("ascii", "ignore").decode() - else: data['body']="" - if issue.milestone: data['milestone']=issue.milestone.title.encode("ascii", "ignore").decode() - data['branch'] = pr.base.ref.encode("ascii", "ignore").decode() - data['created_at'] = pr.created_at.strftime("%s") - data['updated_at'] = pr.updated_at.strftime("%s") - if pr.head.user: data['author'] = pr.head.user.login.encode("ascii", "ignore").decode() - data['auther_ref'] = pr.head.ref.encode("ascii", "ignore").decode() - if not 'freeze' in data: data['auther_sha'] = pr.head.sha.encode("ascii", "ignore").decode() - data['review_comments'] = pr.review_comments - data['commits'] = pr.commits - data['additions'] = pr.additions - data['deletions'] = pr.deletions - data['changed_files'] = pr.changed_files - data['state'] = pr.state - if pr.state == "closed": - data['closed_at'] = pr.closed_at.strftime("%s") - if pr.merged: - data['merged_at'] = pr.merged_at.strftime("%s") - data['merged_by'] = pr.merged_by.login.encode("ascii", "ignore").decode() - if not 'freeze' in data: - if pr.merge_commit_sha:data['merge_commit_sha'] = pr.merge_commit_sha.encode("ascii", "ignore").decode() - else: data['merge_commit_sha']="" - data['release-notes'] = [] - REGEX_RN = re.compile('^release(-| )note(s|)\s*:\s*',re.I) - if issue.body: - msg = issue.body.encode("ascii", "ignore").decode().strip() - if REGEX_RN.match(msg): data['release-notes'].append(REGEX_RN.sub('',msg).strip()) - for comment in issue.get_comments(): - if not comment.body: continue - msg = comment.body.encode("ascii", "ignore").decode().strip() - if REGEX_RN.match(msg): - data['release-notes'].append(REGEX_RN.sub('',msg).strip()) - return data if __name__ == "__main__": - parser = OptionParser(usage="%prog ") - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) - parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw") - parser.add_option("-u", "--user", dest="user", help="GH API user.", type=str, default="") - opts, args = parser.parse_args() - - if len(args) != 2: - parser.error("Too many/few arguments") - prId = int(args[0]) - ghtoken=".github-token" - if opts.user: ghtoken=".github-token-"+opts.user + parser = OptionParser(usage="%prog ") + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, + ) + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default="cms-sw/cmssw", + ) + parser.add_option("-u", "--user", dest="user", help="GH API user.", type=str, default="") + opts, args = parser.parse_args() - gh = Github(login_or_token=open(expanduser("~/"+ghtoken)).read().strip()) - api_rate_limits(gh) - data = process(gh.get_repo(opts.repository), prId, args[1]) - if opts.dryRun: print(dumps(data)) - else: - j = open(args[0]+".json","w") - j.write(dumps(data,sort_keys=True)) - j.close() + if len(args) != 2: + parser.error("Too many/few arguments") + prId = int(args[0]) + ghtoken = ".github-token" + if opts.user: + ghtoken = ".github-token-" + opts.user + gh = Github(login_or_token=open(expanduser("~/" + ghtoken)).read().strip()) + api_rate_limits(gh) + data = process(gh.get_repo(opts.repository), prId, args[1]) + if opts.dryRun: + print(dumps(data)) + else: + j = open(args[0] + ".json", "w") + j.write(dumps(data, sort_keys=True)) + j.close() diff --git a/categories.py b/categories.py index 9a0ecb2e1680..03bfac791c18 100644 --- a/categories.py +++ b/categories.py @@ -7,165 +7,195 @@ from releases import SPECIAL_RELEASE_MANAGERS authors = {} -#Any Githib user whose comments/requests should be ignored +# Any Githib user whose comments/requests should be ignored GITHUB_BLACKLIST_AUTHORS = [] -#CMS Offline Release Planning managers +# CMS Offline Release Planning managers CMSSW_L1 = ["sextonkennedy", "rappoccio", "antoniovilela"] -#CMS-SDT members who has admin rights to various github organizations and repositories. -#They are also reposionsible to sign for externals -CMS_SDT = [ "iarspider", "smuzaffar", "aandvalenzuela" ] -#List of gh users who can approve a release build request -APPROVE_BUILD_RELEASE = list(set([ "smuzaffar"] + CMSSW_L1 + SPECIAL_RELEASE_MANAGERS)) -#List of gh users who can request to build a release. +# CMS-SDT members who has admin rights to various github organizations and repositories. +# They are also reposionsible to sign for externals +CMS_SDT = ["iarspider", "smuzaffar", "aandvalenzuela"] +# List of gh users who can approve a release build request +APPROVE_BUILD_RELEASE = list(set(["smuzaffar"] + CMSSW_L1 + SPECIAL_RELEASE_MANAGERS)) +# List of gh users who can request to build a release. REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE -#List og gh users who are allowed to trigger Pull Request testing -TRIGGER_PR_TESTS = list(set([ "felicepantaleo", "rovere", "lgray", "bsunanda", "VinInn", "kpedro88", "makortel", "wddgit", "mtosi", "gpetruc", "gartung", "nsmith-","mmusich","Sam-Harper","sroychow","silviodonato","slava77"] + REQUEST_BUILD_RELEASE + [ a for a in authors if authors[a]>10 and not a in GITHUB_BLACKLIST_AUTHORS ])) -#List of on additional release managers -PR_HOLD_MANAGERS = [ "kpedro88" ] +# List og gh users who are allowed to trigger Pull Request testing +TRIGGER_PR_TESTS = list( + set( + [ + "felicepantaleo", + "rovere", + "lgray", + "bsunanda", + "VinInn", + "kpedro88", + "makortel", + "wddgit", + "mtosi", + "gpetruc", + "gartung", + "nsmith-", + "mmusich", + "Sam-Harper", + "sroychow", + "silviodonato", + "slava77", + ] + + REQUEST_BUILD_RELEASE + + [a for a in authors if authors[a] > 10 and not a in GITHUB_BLACKLIST_AUTHORS] + ) +) +# List of on additional release managers +PR_HOLD_MANAGERS = ["kpedro88"] -COMMON_CATEGORIES = [ "orp", "tests", "code-checks" ] -EXTERNAL_CATEGORIES = [ "externals" ] -EXTERNAL_REPOS = [ "cms-data", "cms-externals", gh_user] +COMMON_CATEGORIES = ["orp", "tests", "code-checks"] +EXTERNAL_CATEGORIES = ["externals"] +EXTERNAL_REPOS = ["cms-data", "cms-externals", gh_user] -CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ] -CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ] -CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1 + [ "smuzaffar", "Dr15Jones", "makortel" ])) -COMPARISON_MISSING_MAP = [ "slava77" ] +CMSSW_REPOS = [gh_user + "/" + gh_cmssw] +CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist] +CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1 + ["smuzaffar", "Dr15Jones", "makortel"])) +COMPARISON_MISSING_MAP = ["slava77"] # CMS L2's and the CMSSW categories they are responsible for. They can also request to start pull requests testing CMSSW_L2 = { - "Dr15Jones": ["core", "visualization", "geometry"], - "Martin-Grunewald": ["hlt"], - "mmusich": ["hlt"], - "AdrianoDee": ["upgrade", "pdmv"], - "alberto-sanchez": ["generators"], - "alja": ["visualization"], - "bbilin": ["generators"], - "civanch": ["simulation", "geometry", "fastsim"], - "bsunanda": ["geometry"], - "davidlange6": ["operations"], - "emeschi": ["daq"], - "vlimant": ["xpog"], - "simonepigazzini": ["xpog"], - "fwyzard": ["heterogeneous"], - "GurpreetSinghChahal": ["generators"], - "jfernan2": ["reconstruction"], - "kskovpen": ["pdmv"], - "sunilUIET": ["pdmv"], - "miquork": ["pdmv", "jetmet-pog"], - "makortel": ["heterogeneous", "core", "visualization", "geometry"], - "mandrenguyen": ["reconstruction"], - "mdhildreth": ["simulation", "geometry", "fastsim"], - "missirol": ["hlt"], - "mkirsano": ["generators"], - "menglu21": ["generators"], - "rappoccio": ["operations"], - "antoniovilela": ["operations"], - "epalencia": ["l1"], - "aloeliger": ["l1"], - "rvenditti": ["dqm"], - "syuvivida": ["dqm"], - "tjavaid": ["dqm"], - "nothingface0": ["dqm"], - "antoniovagnerini": ["dqm"], - "sbein": ["fastsim"], - "SiewYan": ["generators"], - "smorovic": ["daq"], - "smuzaffar": ["core"], - "srimanob": ["upgrade"], - "ssekmen": ["fastsim"], - "francescobrivio": ["db"], - "tvami": ["analysis"], - "saumyaphor4252": ["alca","db"], - "perrotta": ["alca","db"], - "consuegs": ["alca","db"], - CMSBUILD_USER: ["tests" ], - # dpgs - "connorpa": ["trk-dpg"], - "sroychow": ["trk-dpg"], - "wang0jin": ["ecal-dpg"], - "thomreis": ["ecal-dpg"], - "wang-hui": ["hcal-dpg"], - "jhakala": ["hcal-dpg"], - "abdoulline": ["hcal-dpg"], - "igv4321": ["hcal-dpg"], - "mileva": ["muon-dpg"], - "battibass": ["muon-dpg","dt-dpg"], - "fcavallo": ["dt-dpg"], - "namapane": ["dt-dpg"], - "ptcox": ["csc-dpg"], - "jhgoh": ["rpc-dpg"], - "andresib": ["rpc-dpg"], - "pavlov": ["rpc-dpg"], - "kamon": ["gem-dpg"], - "jshlee": ["gem-dpg"], - "watson-ij": ["gem-dpg"], - "fabferro": ["ctpps-dpg"], - "jan-kaspar": ["ctpps-dpg"], - "vavati": ["ctpps-dpg"], - "rovere": ["hgcal-dpg"], - "cseez": ["hgcal-dpg"], - "pfs": ["hgcal-dpg"], - "felicepantaleo": ["hgcal-dpg"], - "fabiocos": ["mtd-dpg", "operations"], - "MartinaMalberti": ["mtd-dpg"], - "parbol": ["mtd-dpg"], - # pogs - "bellan": ["pf"], - "kdlong": ["pf"], - "swagata87": ["pf"], - "a-kapoor": ["egamma-pog"], - "RSalvatico": ["egamma-pog"], - "kirschen": ["jetmet-pog"], - "alkaloge": ["jetmet-pog"], - "knollejo": ["lumi-pog"], - "cschwick": ["lumi-pog"], - "gkaratha": ["muon-pog"], - "JanFSchulte": ["muon-pog"], - "SWuchterl": ["btv-pog"], - "mondalspandan": ["btv-pog"], - "michael-pitt": ["proton-pog"], - "kshcheli": ["proton-pog"], - "kandrosov": ["tau-pog"], - "alebihan": ["tau-pog"], - "slava77": ["tracking-pog"], - "kskovpen": ["tracking-pog"], - # PPD - "malbouis": ["ppd"], - "jordan-martins": ["ppd"], + "Dr15Jones": ["core", "visualization", "geometry"], + "Martin-Grunewald": ["hlt"], + "mmusich": ["hlt"], + "AdrianoDee": ["upgrade", "pdmv"], + "alberto-sanchez": ["generators"], + "alja": ["visualization"], + "bbilin": ["generators"], + "civanch": ["simulation", "geometry", "fastsim"], + "bsunanda": ["geometry"], + "davidlange6": ["operations"], + "emeschi": ["daq"], + "vlimant": ["xpog"], + "simonepigazzini": ["xpog"], + "fwyzard": ["heterogeneous"], + "GurpreetSinghChahal": ["generators"], + "jfernan2": ["reconstruction"], + "kskovpen": ["pdmv"], + "sunilUIET": ["pdmv"], + "miquork": ["pdmv", "jetmet-pog"], + "makortel": ["heterogeneous", "core", "visualization", "geometry"], + "mandrenguyen": ["reconstruction"], + "mdhildreth": ["simulation", "geometry", "fastsim"], + "mkirsano": ["generators"], + "menglu21": ["generators"], + "rappoccio": ["operations"], + "antoniovilela": ["operations"], + "epalencia": ["l1"], + "aloeliger": ["l1"], + "rvenditti": ["dqm"], + "syuvivida": ["dqm"], + "tjavaid": ["dqm"], + "nothingface0": ["dqm"], + "antoniovagnerini": ["dqm"], + "sbein": ["fastsim"], + "SiewYan": ["generators"], + "smorovic": ["daq"], + "smuzaffar": ["core"], + "srimanob": ["upgrade"], + "ssekmen": ["fastsim"], + "francescobrivio": ["db"], + "tvami": ["analysis"], + "saumyaphor4252": ["alca", "db"], + "perrotta": ["alca", "db"], + "consuegs": ["alca", "db"], + CMSBUILD_USER: ["tests"], + # dpgs + "connorpa": ["trk-dpg"], + "sroychow": ["trk-dpg"], + "wang0jin": ["ecal-dpg"], + "thomreis": ["ecal-dpg"], + "wang-hui": ["hcal-dpg"], + "jhakala": ["hcal-dpg"], + "abdoulline": ["hcal-dpg"], + "igv4321": ["hcal-dpg"], + "mileva": ["muon-dpg"], + "battibass": ["muon-dpg", "dt-dpg"], + "fcavallo": ["dt-dpg"], + "namapane": ["dt-dpg"], + "ptcox": ["csc-dpg"], + "jhgoh": ["rpc-dpg"], + "andresib": ["rpc-dpg"], + "pavlov": ["rpc-dpg"], + "kamon": ["gem-dpg"], + "jshlee": ["gem-dpg"], + "watson-ij": ["gem-dpg"], + "fabferro": ["ctpps-dpg"], + "jan-kaspar": ["ctpps-dpg"], + "vavati": ["ctpps-dpg"], + "rovere": ["hgcal-dpg"], + "cseez": ["hgcal-dpg"], + "pfs": ["hgcal-dpg"], + "felicepantaleo": ["hgcal-dpg"], + "fabiocos": ["mtd-dpg", "operations"], + "MartinaMalberti": ["mtd-dpg"], + "parbol": ["mtd-dpg"], + # pogs + "bellan": ["pf"], + "kdlong": ["pf"], + "swagata87": ["pf"], + "a-kapoor": ["egamma-pog"], + "RSalvatico": ["egamma-pog"], + "kirschen": ["jetmet-pog"], + "alkaloge": ["jetmet-pog"], + "knollejo": ["lumi-pog"], + "cschwick": ["lumi-pog"], + "gkaratha": ["muon-pog"], + "JanFSchulte": ["muon-pog"], + "SWuchterl": ["btv-pog"], + "mondalspandan": ["btv-pog"], + "michael-pitt": ["proton-pog"], + "kshcheli": ["proton-pog"], + "kandrosov": ["tau-pog"], + "alebihan": ["tau-pog"], + "slava77": ["tracking-pog"], + "kskovpen": ["tracking-pog"], + # PPD + "malbouis": ["ppd"], + "jordan-martins": ["ppd"], } -#All CMS_SDT members can sign externals ( e.g Pull Requests in cms-sw/cmsdist , cms-data and cms-externals +# All CMS_SDT members can sign externals ( e.g Pull Requests in cms-sw/cmsdist , cms-data and cms-externals for user in CMS_SDT: - if user not in CMSSW_L2: CMSSW_L2[user] = ['externals'] - elif not 'externals' in CMSSW_L2[user]: CMSSW_L2[user].append('externals') + if user not in CMSSW_L2: + CMSSW_L2[user] = ["externals"] + elif not "externals" in CMSSW_L2[user]: + CMSSW_L2[user].append("externals") -#All CMSSW L1 can sign for ORP +# All CMSSW L1 can sign for ORP for user in CMSSW_L1: - if user not in CMSSW_L2: CMSSW_L2[user] = ['orp'] - else: CMSSW_L2[user].append('orp') + if user not in CMSSW_L2: + CMSSW_L2[user] = ["orp"] + else: + CMSSW_L2[user].append("orp") USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys())) CMS_REPOS = set(CMSDIST_REPOS + CMSSW_REPOS + EXTERNAL_REPOS) for user in CMSSW_L2: - for cat in CMSSW_L2[user]: - if cat not in CMSSW_CATEGORIES: - CMSSW_CATEGORIES[cat] = [] + for cat in CMSSW_L2[user]: + if cat not in CMSSW_CATEGORIES: + CMSSW_CATEGORIES[cat] = [] + def external_to_package(repo_fullname): - org, repo = repo_fullname.split("/",1) - if org == "cms-data": - return repo.replace('-','/') - return '' + org, repo = repo_fullname.split("/", 1) + if org == "cms-data": + return repo.replace("-", "/") + return "" -#extra labels which bot cn add via 'type label' comment + +# extra labels which bot cn add via 'type label' comment def get_dpg_pog(): - groups = ['pf', 'l1t', 'castor'] - for user in CMSSW_L2: - for cat in CMSSW_L2[user]: - if '-' not in cat: continue - grp, ctype = cat.split('-',1) - if ctype in ['pog', 'dpg']: - groups.append(grp) - return list(set(groups)) + groups = ["pf", "l1t", "castor"] + for user in CMSSW_L2: + for cat in CMSSW_L2[user]: + if "-" not in cat: + continue + grp, ctype = cat.split("-", 1) + if ctype in ["pog", "dpg"]: + groups.append(grp) + return list(set(groups)) diff --git a/categories_map.py b/categories_map.py index 192b61bb9bb3..8190b643b40c 100644 --- a/categories_map.py +++ b/categories_map.py @@ -1,1837 +1,1834 @@ CMSSW_CATEGORIES = { - "alca": [ - "Alignment/APEEstimation", - "Alignment/CTPPS", - "Alignment/CocoaAnalysis", - "Alignment/CocoaApplication", - "Alignment/CocoaDDLObjects", - "Alignment/CocoaDaq", - "Alignment/CocoaFit", - "Alignment/CocoaModel", - "Alignment/CocoaToDDL", - "Alignment/CocoaUtilities", - "Alignment/CommonAlignment", - "Alignment/CommonAlignmentAlgorithm", - "Alignment/CommonAlignmentMonitor", - "Alignment/CommonAlignmentParametrization", - "Alignment/CommonAlignmentProducer", - "Alignment/Geners", - "Alignment/HIPAlignmentAlgorithm", - "Alignment/KalmanAlignmentAlgorithm", - "Alignment/LaserAlignment", - "Alignment/LaserAlignmentSimulation", - "Alignment/LaserDQM", - "Alignment/MillePedeAlignmentAlgorithm", - "Alignment/MuonAlignment", - "Alignment/MuonAlignmentAlgorithms", - "Alignment/OfflineValidation", - "Alignment/ReferenceTrajectories", - "Alignment/SurveyAnalysis", - "Alignment/TrackerAlignment", - "Alignment/TwoBodyDecay", - "CalibCalorimetry/CaloMiscalibTools", - "CalibCalorimetry/CaloTPG", - "CalibCalorimetry/CastorCalib", - "CalibCalorimetry/Configuration", - "CalibCalorimetry/EBPhase2TPGTools", - "CalibCalorimetry/EcalCorrectionModules", - "CalibCalorimetry/EcalCorrelatedNoiseAnalysisAlgos", - "CalibCalorimetry/EcalCorrelatedNoiseAnalysisModules", - "CalibCalorimetry/EcalLaserAnalyzer", - "CalibCalorimetry/EcalLaserCorrection", - "CalibCalorimetry/EcalLaserSorting", - "CalibCalorimetry/EcalPedestalOffsets", - "CalibCalorimetry/EcalSRTools", - "CalibCalorimetry/EcalTBCondTools", - "CalibCalorimetry/EcalTPGTools", - "CalibCalorimetry/EcalTrivialCondModules", - "CalibCalorimetry/HcalAlgos", - "CalibCalorimetry/HcalPlugins", - "CalibCalorimetry/HcalStandardModules", - "CalibCalorimetry/HcalTPGAlgos", - "CalibCalorimetry/HcalTPGEventSetup", - "CalibCalorimetry/HcalTPGIO", - "CalibFormats/CaloObjects", - "CalibFormats/CaloTPG", - "CalibFormats/CastorObjects", - "CalibFormats/HcalObjects", - "CalibFormats/SiPixelObjects", - "CalibFormats/SiStripObjects", - "CalibMuon/CSCCalibration", - "CalibMuon/Configuration", - "CalibMuon/DTCalibration", - "CalibMuon/DTDigiSync", - "CalibMuon/RPCCalibration", - "CalibPPS/AlignmentRelative", - "CalibPPS/AlignmentGlobal", - "CalibPPS/ESProducers", - "CalibPPS/TimingCalibration", - "CalibTracker/Configuration", - "CalibTracker/Records", - "CalibTracker/SiPhase2TrackerESProducers", - "CalibTracker/SiPixelConnectivity", - "CalibTracker/SiPixelESProducers", - "CalibTracker/SiPixelErrorEstimation", - "CalibTracker/SiPixelGainCalibration", - "CalibTracker/SiPixelIsAliveCalibration", - "CalibTracker/SiPixelLorentzAngle", - "CalibTracker/SiPixelQuality", - "CalibTracker/SiPixelSCurveCalibration", - "CalibTracker/SiPixelTools", - "CalibTracker/SiStripAPVAnalysis", - "CalibTracker/SiStripChannelGain", - "CalibTracker/SiStripCommon", - "CalibTracker/SiStripDCS", - "CalibTracker/SiStripESProducers", - "CalibTracker/SiStripHitEfficiency", - "CalibTracker/SiStripHitResolution", - "CalibTracker/SiStripLorentzAngle", - "CalibTracker/SiStripQuality", - "CalibTracker/StandaloneTrackerTopology", - "Calibration/EcalAlCaRecoProducers", - "Calibration/EcalCalibAlgos", - "Calibration/EcalTBTools", - "Calibration/HcalAlCaRecoProducers", - "Calibration/HcalCalibAlgos", - "Calibration/HcalConnectivity", - "Calibration/HcalIsolatedTrackReco", - "Calibration/Hotline", - "Calibration/IsolatedParticles", - "Calibration/LumiAlCaRecoProducers", - "Calibration/PPSAlCaRecoProducer", - "Calibration/TkAlCaRecoProducers", - "Calibration/Tools", - "CaloOnlineTools/EcalTools", - "CondCore/DBCommon", - "CondCore/DBOutputService", - "CondCore/ESSources", - "CondCore/IOVService", - "CondCore/MetaDataService", - "CondCore/Modules", - "CondCore/PluginSystem", - "CondCore/PopCon", - "CondCore/TagCollection", - "CondFormats/Alignment", - "CondFormats/AlignmentRecord", - "CondFormats/BTauObjects", - "CondFormats/BeamSpotObjects", - "CondFormats/CSCObjects", - "CondFormats/CTPPSReadoutObjects", - "CondFormats/Calibration", - "CondFormats/CastorObjects", - "CondFormats/Common", - "CondFormats/DTObjects", - "CondFormats/DataRecord", - "CondFormats/ESObjects", - "CondFormats/EcalCorrections", - "CondFormats/EcalObjects", - "CondFormats/EgammaObjects", - "CondFormats/GBRForest", - "CondFormats/GeometryObjects", - "CondFormats/HGCalObjects", - "CondFormats/HIObjects", - "CondFormats/HLTObjects", - "CondFormats/HcalMapping", - "CondFormats/HcalObjects", - "CondFormats/L1TObjects", - "CondFormats/Luminosity", - "CondFormats/MFObjects", - "CondFormats/OptAlignObjects", - "CondFormats/PCLConfig", - "CondFormats/PhysicsToolsObjects", - "CondFormats/PPSObjects", - "CondFormats/RPCObjects", - "CondFormats/RecoMuonObjects", - "CondFormats/RunInfo", - "CondFormats/SiPhase2TrackerObjects", - "CondFormats/SiPixelObjects", - "CondFormats/SiStripObjects", - "CondFormats/TotemReadoutObjects", - "CondTools/BeamSpot", - "CondTools/SiPhase2Tracker", - "Configuration/AlCa", - "DataFormats/Alignment", - "DataFormats/HcalCalibObjects", - "IORawData/CaloPatterns", - "IORawData/DTCommissioning", - "IORawData/HcalTBInputService", - "IORawData/SiPixelInputSources", - "MuonAnalysis/MomentumScaleCalibration", - "RecoVertex/BeamSpotProducer", - ], - "analysis": [ - "AnalysisAlgos/SiStripClusterInfoProducer", - "AnalysisAlgos/TrackInfoProducer", - "AnalysisDataFormats/EWK", - "AnalysisDataFormats/Egamma", - "AnalysisDataFormats/SUSYBSMObjects", - "AnalysisDataFormats/SiStripClusterInfo", - "AnalysisDataFormats/TopObjects", - "AnalysisDataFormats/TrackInfo", - "CommonTools/RecoUtils", - "DPGAnalysis/SiStripTools", - "DiffractiveForwardAnalysis/Configuration", - "DiffractiveForwardAnalysis/Skimming", - "EgammaAnalysis/CSA07Skims", - "EgammaAnalysis/Configuration", - "EgammaAnalysis/ElectronIDESSources", - "EgammaAnalysis/ElectronTools", - "EgammaAnalysis/PhotonIDProducers", - "ElectroWeakAnalysis/Configuration", - "ElectroWeakAnalysis/Skimming", - "ElectroWeakAnalysis/Utilities", - "ElectroWeakAnalysis/WENu", - "ElectroWeakAnalysis/WMuNu", - "ElectroWeakAnalysis/ZEE", - "ElectroWeakAnalysis/ZMuMu", - "HeavyFlavorAnalysis/Configuration", - "HeavyFlavorAnalysis/Onia2MuMu", - "HeavyFlavorAnalysis/RecoDecay", - "HeavyFlavorAnalysis/Skimming", - "HeavyFlavorAnalysis/SpecificDecay", - "HeavyIonsAnalysis/Configuration", - "HiggsAnalysis/CombinedLimit", - "HiggsAnalysis/Configuration", - "HiggsAnalysis/HiggsToGammaGamma", - "HiggsAnalysis/Skimming", - "JetMETAnalysis/Configuration", - "JetMETAnalysis/METSkims", - "JetMETCorrections/Algorithms", - "JetMETCorrections/FFTJetModules", - "JetMETCorrections/FFTJetObjects", - "JetMETCorrections/InterpolationTables", - "JetMETCorrections/IsolatedParticles", - "JetMETCorrections/JetParton", - "JetMETCorrections/JetVertexAssociation", - "JetMETCorrections/MCJet", - "JetMETCorrections/MinBias", - "JetMETCorrections/TauJet", - "MuonAnalysis/Configuration", - "MuonAnalysis/MuonAssociators", - "PhysicsTools/CandAlgos", - "PhysicsTools/CandUtils", - "PhysicsTools/CondLiteIO", - "PhysicsTools/Configuration", - "PhysicsTools/FWLite", - "PhysicsTools/HepMCCandAlgos", - "PhysicsTools/Heppy", - "PhysicsTools/HeppyCore", - "PhysicsTools/IsolationUtils", - "PhysicsTools/JetCharge", - "PhysicsTools/JetExamples", - "PhysicsTools/JetMCAlgos", - "PhysicsTools/JetMCUtils", - "PhysicsTools/KinFitter", - "PhysicsTools/MVAComputer", - "PhysicsTools/MVATrainer", - "PhysicsTools/ParallelAnalysis", - "PhysicsTools/PatExamples", - "PhysicsTools/PythonAnalysis", - "PhysicsTools/RecoAlgos", - "PhysicsTools/RecoUtils", - "PhysicsTools/RooStatsCms", - "PhysicsTools/TagAndProbe", - "PhysicsTools/UtilAlgos", - "PhysicsTools/Utilities", - "QCDAnalysis/ChargedHadronSpectra", - "QCDAnalysis/Configuration", - "QCDAnalysis/Skimming", - "QCDAnalysis/UEAnalysis", - "SUSYBSMAnalysis/Configuration", - "SUSYBSMAnalysis/HSCP", - "SUSYBSMAnalysis/Skimming", - "TBDataFormats/EcalTBObjects", - "TBDataFormats/HcalTBObjects", - "TopQuarkAnalysis/Configuration", - "TopQuarkAnalysis/Examples", - "TopQuarkAnalysis/TopEventProducers", - "TopQuarkAnalysis/TopEventSelection", - "TopQuarkAnalysis/TopHitFit", - "TopQuarkAnalysis/TopJetCombination", - "TopQuarkAnalysis/TopKinFitter", - "TopQuarkAnalysis/TopObjectResolutions", - "TopQuarkAnalysis/TopPairBSM", - "TopQuarkAnalysis/TopSkimming", - "TopQuarkAnalysis/TopTools", - "Utilities/BinningTools", - ], - "core": [ - ".clang-format", - ".clang-tidy", - ".gitignore", - "BigProducts/Simulation", - "Configuration/SVSuite", - "DataFormats/CLHEP", - "DataFormats/Common", - "DataFormats/FWLite", - "DataFormats/Histograms", - "DataFormats/Provenance", - "DataFormats/Scouting", - "DataFormats/StdDictionaries", - "DataFormats/Streamer", - "DataFormats/TestObjects", - "DataFormats/WrappedStdDictionaries", - "Documentation/CodingRules", - "Documentation/DataFormats", - "Documentation/PhysicsTools", - "Documentation/ReferenceManualScripts", - "FWCore/Catalog", - "FWCore/Common", - "FWCore/Concurrency", - "FWCore/FWLite", - "FWCore/Framework", - "FWCore/GuiBrowsers", - "FWCore/Integration", - "FWCore/MessageLogger", - "FWCore/MessageService", - "FWCore/Modules", - "FWCore/ParameterSet", - "FWCore/ParameterSetReader", - "FWCore/PluginManager", - "FWCore/PrescaleService", - "FWCore/PyDevParameterSet", - "FWCore/Python", - "FWCore/PythonFramework", - "FWCore/PythonParameterSet", - "FWCore/PythonUtilities", - "FWCore/ROOTTests", - "FWCore/Reflection", - "FWCore/RootAutoLibraryLoader", - "FWCore/SOA", - "FWCore/ServiceRegistry", - "FWCore/Services", - "FWCore/SharedMemory", - "FWCore/Skeletons", - "FWCore/Sources", - "FWCore/TFWLiteSelector", - "FWCore/TFWLiteSelectorTest", - "FWCore/TestProcessor", - "FWCore/Utilities", - "FWCore/Version", - "IOMC/Input", - "IOMC/RandomEngine", - "IOPool/Common", - "IOPool/Input", - "IOPool/Output", - "IOPool/Provenance", - "IOPool/SecondaryInput", - "IOPool/Streamer", - "IOPool/TFileAdaptor", - "IgTools/IgProf", - "LICENSE", - "NOTICE", - "PerfTools/AllocMonitor", - "PerfTools/AllocMonitorPreload", - "PerfTools/Callgrind", - "PerfTools/EdmEvent", - "PerfTools/JeProf", - "PerfTools/MaxMemoryPreload", - "Utilities/DCacheAdaptor", - "Utilities/DavixAdaptor", - "Utilities/General", - "Utilities/LStoreAdaptor", - "Utilities/OpenSSL", - "Utilities/RFIOAdaptor", - "Utilities/ReleaseScripts", - "Utilities/StaticAnalyzers", - "Utilities/StorageFactory", - "Utilities/Testing", - "Utilities/Timing", - "Utilities/Xerces", - "Utilities/XrdAdaptor", - "Validation/Performance", - "Validation/Tools", - "pull_request_template.md", - ], - "daq": [ - "Configuration/SiStripDAQ", - "DataFormats/FEDRawData", - "DataFormats/OnlineMetaData", - "DataFormats/Scalers", - "DataFormats/TCDS", - "EventFilter/AutoBU", - "EventFilter/Configuration", - "EventFilter/Cosmics", - "EventFilter/ESDigiToRaw", - "EventFilter/EcalDigiToRaw", - "EventFilter/FEDInterface", - "EventFilter/Goodies", - "EventFilter/Message2log4cplus", - "EventFilter/Modules", - "EventFilter/OnlineMetaDataRawToDigi", - "EventFilter/Phase2TrackerRawToDigi", - "EventFilter/Playback", - "EventFilter/Processor", - "EventFilter/RawDataCollector", - "EventFilter/ResourceBroker", - "EventFilter/SMProxyServer", - "EventFilter/ShmBuffer", - "EventFilter/ShmReader", - "EventFilter/SiStripChannelChargeFilter", - "EventFilter/StorageManager", - "EventFilter/Utilities", - "IORawData/CSCCommissioning", - "IORawData/DaqSource", - ], - "db": [ - "CaloOnlineTools/HcalOnlineDb", - "CommonTools/ConditionDBWriter", - "CondCore/AlignmentPlugins", - "CondCore/BTauPlugins", - "CondCore/BaseKeyedPlugins", - "CondCore/BasicCondPlugins", - "CondCore/BeamSpotPlugins", - "CondCore/CSCPlugins", - "CondCore/CTPPSPlugins", - "CondCore/CalibPlugins", - "CondCore/CastorPlugins", - "CondCore/CondDB", - "CondCore/CondHDF5ESSource", - "CondCore/DBCommon", - "CondCore/DBOutputService", - "CondCore/DQMPlugins", - "CondCore/DTPlugins", - "CondCore/ESPlugins", - "CondCore/ESSources", - "CondCore/EcalPlugins", - "CondCore/EgammaPlugins", - "CondCore/GBRForestPlugins", - "CondCore/GEMPlugins", - "CondCore/GeometryPlugins", - "CondCore/HIPlugins", - "CondCore/HLTPlugins", - "CondCore/HcalPlugins", - "CondCore/IOVService", - "CondCore/JetMETPlugins", - "CondCore/L1TPlugins", - "CondCore/LuminosityPlugins", - "CondCore/MetaDataService", - "CondCore/ORA", - "CondCore/OptAlignPlugins", - "CondCore/PCLConfigPlugins", - "CondCore/PhysicsToolsPlugins", - "CondCore/PopCon", - "CondCore/RPCPlugins", - "CondCore/RecoMuonPlugins", - "CondCore/RegressionTest", - "CondCore/RunInfoPlugins", - "CondCore/SiPhase2TrackerPlugins", - "CondCore/SiPixelPlugins", - "CondCore/SiStripPlugins", - "CondCore/Utilities", - "CondFormats/Alignment", - "CondFormats/AlignmentRecord", - "CondFormats/BTauObjects", - "CondFormats/BeamSpotObjects", - "CondFormats/CSCObjects", - "CondFormats/CTPPSReadoutObjects", - "CondFormats/Calibration", - "CondFormats/CastorObjects", - "CondFormats/Common", - "CondFormats/DQMObjects", - "CondFormats/DTObjects", - "CondFormats/DataRecord", - "CondFormats/ESObjects", - "CondFormats/EcalCorrections", - "CondFormats/EcalObjects", - "CondFormats/EgammaObjects", - "CondFormats/External", - "CondFormats/GBRForest", - "CondFormats/GEMObjects", - "CondFormats/GeometryObjects", - "CondFormats/HIObjects", - "CondFormats/HLTObjects", - "CondFormats/HcalMapping", - "CondFormats/HcalObjects", - "CondFormats/HGCalObjects", - "CondFormats/JetMETObjects", - "CondFormats/L1TObjects", - "CondFormats/Luminosity", - "CondFormats/OptAlignObjects", - "CondFormats/PCLConfig", - "CondFormats/PhysicsToolsObjects", - "CondFormats/RPCObjects", - "CondFormats/RecoMuonObjects", - "CondFormats/RunInfo", - "CondFormats/Serialization", - "CondFormats/SerializationHelper", - "CondFormats/SiPhase2TrackerObjects", - "CondFormats/SiPixelObjects", - "CondFormats/SiPixelTransient", - "CondFormats/SiStripObjects", - "CondFormats/TotemReadoutObjects", - "CondTools/BTau", - "CondTools/BeamSpot", - "CondTools/CTPPS", - "CondTools/DQM", - "CondTools/DT", - "CondTools/Ecal", - "CondTools/GEM", - "CondTools/Geometry", - "CondTools/HLT", - "CondTools/Hcal", - "CondTools/JetMET", - "CondTools/IntegrationTest", - "CondTools/L1Trigger", - "CondTools/L1TriggerExt", - "CondTools/O2OFramework", - "CondTools/RPC", - "CondTools/RunInfo", - "CondTools/SiPixel", - "CondTools/SiStrip", - "CondTools/Utilities", - "DQM/BeamMonitor", - "OnlineDB/CSCCondDB", - "OnlineDB/EcalCondDB", - "OnlineDB/HcalCondDB", - "OnlineDB/Oracle", - "OnlineDB/SiStripConfigDb", - "OnlineDB/SiStripESSources", - "OnlineDB/SiStripO2O", - "RecoLuminosity/LumiDB", - ], - "dqm": [ - "CommonTools/TrackerMap", - "CondCore/DQMPlugins", - "CondFormats/DQMObjects", - "CondTools/DQM", - "DQM/BeamMonitor", - "DQM/CSCMonitorModule", - "DQM/CTPPS", - "DQM/CastorMonitor", - "DQM/DTMonitorClient", - "DQM/DTMonitorModule", - "DQM/DataScouting", - "DQM/EcalBarrelMonitorClient", - "DQM/EcalBarrelMonitorDbModule", - "DQM/EcalBarrelMonitorModule", - "DQM/EcalBarrelMonitorTasks", - "DQM/EcalCommon", - "DQM/EcalEndcapMonitorClient", - "DQM/EcalEndcapMonitorDbModule", - "DQM/EcalEndcapMonitorModule", - "DQM/EcalEndcapMonitorTasks", - "DQM/EcalMonitorClient", - "DQM/EcalMonitorDbModule", - "DQM/EcalMonitorTasks", - "DQM/EcalPreshowerMonitorClient", - "DQM/EcalPreshowerMonitorModule", - "DQM/GEM", - "DQM/HLTEvF", - "DQM/HLXMonitor", - "DQM/HcalCommon", - "DQM/HcalMonitorClient", - "DQM/HcalMonitorModule", - "DQM/HcalMonitorTasks", - "DQM/HcalTasks", - "DQM/Integration", - "DQM/L1TMonitor", - "DQM/L1TMonitorClient", - "DQM/MuonMonitor", - "DQM/Phase2OuterTracker", - "DQM/Physics", - "DQM/PhysicsHWW", - "DQM/PhysicsObjectsMonitoring", - "DQM/PixelLumi", - "DQM/RCTMonitor", - "DQM/RPCMonitorClient", - "DQM/RPCMonitorDigi", - "DQM/SiOuterTracker", - "DQM/SiPixelCommon", - "DQM/SiPixelHeterogeneous", - "DQM/SiPixelHistoricInfoClient", - "DQM/SiPixelMonitorClient", - "DQM/SiPixelMonitorCluster", - "DQM/SiPixelMonitorDigi", - "DQM/SiPixelMonitorRawData", - "DQM/SiPixelMonitorRecHit", - "DQM/SiPixelMonitorTrack", - "DQM/SiPixelPhase1Clusters", - "DQM/SiPixelPhase1Common", - "DQM/SiPixelPhase1Config", - "DQM/SiPixelPhase1DeadFEDChannels", - "DQM/SiPixelPhase1Digis", - "DQM/SiPixelPhase1Heterogeneous", - "DQM/SiPixelPhase1RawData", - "DQM/SiPixelPhase1RecHits", - "DQM/SiPixelPhase1Summary", - "DQM/SiPixelPhase1Track", - "DQM/SiPixelPhase1TrackClusters", - "DQM/SiPixelPhase1TrackEfficiency", - "DQM/SiPixelPhase1TrackResiduals", - "DQM/SiStripCommissioningAnalysis", - "DQM/SiStripCommissioningClients", - "DQM/SiStripCommissioningDbClients", - "DQM/SiStripCommissioningSources", - "DQM/SiStripCommissioningSummary", - "DQM/SiStripCommon", - "DQM/SiStripHistoricInfoClient", - "DQM/SiStripMonitorApproximateCluster", - "DQM/SiStripMonitorClient", - "DQM/SiStripMonitorCluster", - "DQM/SiStripMonitorDigi", - "DQM/SiStripMonitorHardware", - "DQM/SiStripMonitorPedestals", - "DQM/SiStripMonitorSummary", - "DQM/SiStripMonitorTrack", - "DQM/TrackerCommon", - "DQM/TrackerMonitorTrack", - "DQM/TrackerRemapper", - "DQM/TrackingMonitor", - "DQM/TrackingMonitorClient", - "DQM/TrackingMonitorSource", - "DQM/SiTrackerPhase2", - "DQM/TrigXMonitor", - "DQM/TrigXMonitorClient", - "DQMOffline/Alignment", - "DQMOffline/CalibCalo", - "DQMOffline/CalibMuon", - "DQMOffline/CalibTracker", - "DQMOffline/Configuration", - "DQMOffline/EGamma", - "DQMOffline/Ecal", - "DQMOffline/Hcal", - "DQMOffline/JetMET", - "DQMOffline/L1Trigger", - "DQMOffline/Lumi", - "DQMOffline/Muon", - "DQMOffline/MuonDPG", - "DQMOffline/PFTau", - "DQMOffline/RecoB", - "DQMOffline/Trigger", - "DQMServices/ClientConfig", - "DQMServices/Components", - "DQMServices/Core", - "DQMServices/Demo", - "DQMServices/Diagnostic", - "DQMServices/Examples", - "DQMServices/FileIO", - "DQMServices/FwkIO", - "DQMServices/StreamerIO", - "DQMServices/XdaqCollector", - "DataFormats/Histograms", - "DPGAnalysis/HcalTools", - "HLTriggerOffline/B2G", - "HLTriggerOffline/Btag", - "HLTriggerOffline/Common", - "HLTriggerOffline/Egamma", - "HLTriggerOffline/Exotica", - "HLTriggerOffline/HeavyFlavor", - "HLTriggerOffline/Higgs", - "HLTriggerOffline/JetMET", - "HLTriggerOffline/Muon", - "HLTriggerOffline/SMP", - "HLTriggerOffline/SUSYBSM", - "HLTriggerOffline/Tau", - "HLTriggerOffline/Top", - "Utilities/RelMon", - "Validation/CSCRecHits", - "Validation/CTPPS", - "Validation/CaloTowers", - "Validation/Configuration", - "Validation/DTRecHits", - "Validation/EcalClusters", - "Validation/EcalDigis", - "Validation/EcalHits", - "Validation/EcalRecHits", - "Validation/EventGenerator", - "Validation/Geometry", - "Validation/GlobalDigis", - "Validation/GlobalHits", - "Validation/GlobalRecHits", - "Validation/HGCalTriggerPrimitives", - "Validation/HGCalValidation", - "Validation/HcalDigis", - "Validation/HcalHits", - "Validation/HcalRecHits", - "Validation/HLTrigger", - "Validation/L1T", - "Validation/Mixing", - "Validation/MuonCSCDigis", - "Validation/MuonDTDigis", - "Validation/MuonGEMDigis", - "Validation/MuonGEMHits", - "Validation/MuonGEMRecHits", - "Validation/MuonHits", - "Validation/MuonIdentification", - "Validation/MuonIsolation", - "Validation/MuonME0Digis", - "Validation/MuonME0Hits", - "Validation/MuonME0RecHits", - "Validation/MuonME0Validation", - "Validation/MuonRPCDigis", - "Validation/MtdValidation", - "Validation/RPCRecHits", - "Validation/RecoB", - "Validation/RecoEgamma", - "Validation/RecoHI", - "Validation/RecoJets", - "Validation/RecoMET", - "Validation/RecoMuon", - "Validation/RecoParticleFlow", - "Validation/RecoPixelVertexing", - "Validation/RecoTau", - "Validation/RecoTrack", - "Validation/RecoVertex", - "Validation/SiOuterTrackerV", - "Validation/SiPixelPhase1ConfigV", - "Validation/SiPixelPhase1DigisV", - "Validation/SiPixelPhase1HitsV", - "Validation/SiPixelPhase1RecHitsV", - "Validation/SiPixelPhase1TrackClustersV", - "Validation/SiPixelPhase1TrackingParticleV", - "Validation/SiTrackerPhase2V", - "Validation/TrackerConfiguration", - "Validation/TrackerDigis", - "Validation/TrackerHits", - "Validation/TrackerRecHits", - "Validation/TrackingMCTruth", - ], - "externals": [ - "", - ], - "fastsim": [ - "CommonTools/BaseParticlePropagator", - "FastSimDataFormats/External", - "FastSimDataFormats/L1GlobalMuonTrigger", - "FastSimDataFormats/NuclearInteractions", - "FastSimDataFormats/CTPPSFastSim", - "FastSimulation/BaseParticlePropagator", - "FastSimDataFormats/PileUpEvents", - "FastSimulation/CTPPSFastGeometry", - "FastSimulation/CTPPSFastSim", - "FastSimulation/CTPPSFastTrackingProducer", - "FastSimulation/CTPPSRecHitProducer", - "FastSimulation/CTPPSSimHitProducer", - "FastSimulation/CaloGeometryTools", - "FastSimulation/CaloHitMakers", - "FastSimulation/CaloRecHitsProducer", - "FastSimulation/CalorimeterProperties", - "FastSimulation/Calorimetry", - "FastSimulation/Configuration", - "FastSimulation/EgammaElectronAlgos", - "FastSimulation/Event", - "FastSimulation/EventProducer", - "FastSimulation/ForwardDetectors", - "FastSimulation/HighLevelTrigger", - "FastSimulation/L1CaloTriggerProducer", - "FastSimulation/MaterialEffects", - "FastSimulation/MuonSimHitProducer", - "FastSimulation/Muons", - "FastSimulation/Particle", - "FastSimulation/ParticleDecay", - "FastSimulation/ParticleFlow", - "FastSimulation/ParticlePropagator", - "FastSimulation/PileUpProducer", - "FastSimulation/ShowerDevelopment", - "FastSimulation/SimplifiedGeometryPropagator", - "FastSimulation/TrackerSetup", - "FastSimulation/Tracking", - "FastSimulation/TrackingRecHitProducer", - "FastSimulation/TrajectoryManager", - "FastSimulation/Utilities", - "FastSimulation/Validation", - ], - "generators": [ - "Configuration/Generator", - "DataFormats/HepMCCandidate", - "GeneratorInterface/AMPTInterface", - "GeneratorInterface/AlpgenInterface", - "GeneratorInterface/BeamHaloGenerator", - "GeneratorInterface/CascadeInterface", - "GeneratorInterface/Configuration", - "GeneratorInterface/Core", - "GeneratorInterface/CosmicMuonGenerator", - "GeneratorInterface/EvtGenInterface", - "GeneratorInterface/ExhumeInterface", - "GeneratorInterface/ExternalDecays", - "GeneratorInterface/GenExtensions", - "GeneratorInterface/GenFilters", - "GeneratorInterface/Herwig6Interface", - "GeneratorInterface/Herwig7Interface", - "GeneratorInterface/HiGenCommon", - "GeneratorInterface/HijingInterface", - "GeneratorInterface/Hydjet2Interface", - "GeneratorInterface/HydjetInterface", - "GeneratorInterface/LHEInterface", - "GeneratorInterface/MCatNLOInterface", - "GeneratorInterface/PartonShowerVeto", - "GeneratorInterface/PhotosInterface", - "GeneratorInterface/PomwigInterface", - "GeneratorInterface/PyquenInterface", - "GeneratorInterface/Pythia6Interface", - "GeneratorInterface/Pythia8Interface", - "GeneratorInterface/ReggeGribovPartonMCInterface", - "GeneratorInterface/RivetInterface", - "GeneratorInterface/SherpaInterface", - "GeneratorInterface/TauolaInterface", - "GeneratorInterface/ThePEGInterface", - "IOMC/ParticleGuns", - "SimDataFormats/GeneratorProducts", - "SimDataFormats/HTXS", - "Validation/EventGenerator", - ], - "geometry": [ - "Configuration/Geometry", - "DataFormats/CTPPSAlignment", - "DetectorDescription/Algorithm", - "DetectorDescription/Base", - "DetectorDescription/Core", - "DetectorDescription/DDCMS", - "DetectorDescription/DDVecCMS", - "DetectorDescription/ExprAlgo", - "DetectorDescription/OfflineDBLoader", - "DetectorDescription/OnlineDBLoader", - "DetectorDescription/Parser", - "DetectorDescription/RecoGeometry", - "DetectorDescription/RegressionTest", - "DetectorDescription/Schema", - "Geometry/CMSCommonData", - "Geometry/CSCGeometry", - "Geometry/CSCGeometryBuilder", - "Geometry/CaloEventSetup", - "Geometry/CaloGeometry", - "Geometry/CaloTopology", - "Geometry/CommonDetUnit", - "Geometry/CommonTopologies", - "Geometry/DTGeometry", - "Geometry/DTGeometryBuilder", - "Geometry/EcalAlgo", - "Geometry/EcalCommonData", - "Geometry/EcalMapping", - "Geometry/EcalSimData", - "Geometry/EcalTestBeam", - "Geometry/FP420CommonData", - "Geometry/FP420SimData", - "Geometry/ForwardCommonData", - "Geometry/ForwardGeometry", - "Geometry/ForwardSimData", - "Geometry/GEMGeometry", - "Geometry/GEMGeometryBuilder", - "Geometry/GlobalTrackingGeometryBuilder", - "Geometry/HGCalCommonData", - "Geometry/HGCalGeometry", - "Geometry/HGCalSimData", - "Geometry/HGCalTBCommonData", - "Geometry/HcalAlgo", - "Geometry/HcalCommonData", - "Geometry/HcalEventSetup", - "Geometry/HcalSimData", - "Geometry/HcalTestBeamData", - "Geometry/HcalTowerAlgo", - "Geometry/MTCCTrackerCommonData", - "Geometry/MTDCommonData", - "Geometry/MTDGeometryBuilder", - "Geometry/MTDNumberingBuilder", - "Geometry/MTDSimData", - "Geometry/MuonCommonData", - "Geometry/MuonNumbering", - "Geometry/MuonSimData", - "Geometry/RPCGeometry", - "Geometry/RPCGeometryBuilder", - "Geometry/Records", - "Geometry/TrackerCommonData", - "Geometry/TrackerGeometryBuilder", - "Geometry/TrackerNumberingBuilder", - "Geometry/TrackerRecoData", - "Geometry/TrackerSimData", - "Geometry/TrackingGeometryAligner", - "Geometry/TwentyFivePercentTrackerCommonData", - "Geometry/VeryForwardData", - "Geometry/VeryForwardGeometry", - "Geometry/VeryForwardGeometryBuilder", - "Geometry/VeryForwardProtonTransport", - "Geometry/VeryForwardRPTopology", - "GeometryReaders/XMLIdealGeometryESSource", - "SLHCUpgradeSimulations/Geometry", - "Validation/CheckOverlap", - "Validation/Geometry", - "Validation/MuonRPCGeometry", - "Validation/Shashlik", - ], - "heterogeneous": [ - "CUDADataFormats/BeamSpot", - "CUDADataFormats/CaloCommon", - "CUDADataFormats/Common", - "CUDADataFormats/EcalDigi", - "CUDADataFormats/EcalRecHitSoA", - "CUDADataFormats/HGCal", - "CUDADataFormats/HcalDigi", - "CUDADataFormats/HcalRecHitSoA", - "CUDADataFormats/PortableTestObjects", - "CUDADataFormats/SiPixelCluster", - "CUDADataFormats/SiPixelDigi", - "CUDADataFormats/SiStripCluster", - "CUDADataFormats/StdDictionaries", - "CUDADataFormats/Track", - "CUDADataFormats/TrackingRecHit", - "CUDADataFormats/Vertex", - "DataFormats/Portable", - "DataFormats/PortableTestObjects", - "DataFormats/SoATemplate", - "HeterogeneousCore/AlpakaCore", - "HeterogeneousCore/AlpakaInterface", - "HeterogeneousCore/AlpakaServices", - "HeterogeneousCore/AlpakaTest", - "HeterogeneousCore/CUDACore", - "HeterogeneousCore/CUDAServices", - "HeterogeneousCore/CUDATest", - "HeterogeneousCore/CUDAUtilities", - "HeterogeneousCore/Common", - "HeterogeneousCore/MPICore", - "HeterogeneousCore/MPIServices", - "HeterogeneousCore/Producer", - "HeterogeneousCore/Product", - "HeterogeneousCore/ROCmCore", - "HeterogeneousCore/ROCmServices", - "HeterogeneousCore/ROCmUtilities", - "HeterogeneousCore/SonicCore", - "HeterogeneousCore/SonicTriton", - "HeterogeneousTest/AlpakaTest", - "HeterogeneousTest/CUDADevice", - "HeterogeneousTest/CUDAKernel", - "HeterogeneousTest/CUDAOpaque", - "HeterogeneousTest/CUDATest", - "HeterogeneousTest/CUDAWrapper", - "HeterogeneousTest/ROCmDevice", - "HeterogeneousTest/ROCmKernel", - "HeterogeneousTest/ROCmOpaque", - "HeterogeneousTest/ROCmWrapper", - ], - "hlt": [ - "CommonTools/TriggerUtils", - "CondCore/HLTPlugins", - "CondFormats/HLTObjects", - "CondTools/HLT", - "Configuration/HLT", - "DQM/HLTEvF", - "DataFormats/HLTReco", - "HLTrigger/Configuration", - "HLTrigger/Egamma", - "HLTrigger/HLTanalyzers", - "HLTrigger/HLTcore", - "HLTrigger/HLTexample", - "HLTrigger/HLTfilters", - "HLTrigger/JSONMonitoring", - "HLTrigger/JetMET", - "HLTrigger/Muon", - "HLTrigger/Timer", - "HLTrigger/Tools", - "HLTrigger/btau", - "HLTrigger/special", - "RecoEgamma/EgammaHLTAlgos", - "RecoEgamma/EgammaHLTProducers", - "RecoMuon/L2MuonIsolationProducer", - "RecoMuon/L2MuonProducer", - "RecoMuon/L2MuonSeedGenerator", - "RecoMuon/L3MuonIsolationProducer", - "RecoMuon/L3MuonProducer", - "RecoMuon/L3TrackFinder", - "RecoTauTag/HLTProducers", - ], - "l1": [ - "CalibCalorimetry/CaloTPG", - "CalibCalorimetry/EBPhase2TPGTools", - "CalibCalorimetry/EcalTPGTools", - "CalibCalorimetry/HcalTPGAlgos", - "CalibCalorimetry/HcalTPGEventSetup", - "CalibCalorimetry/HcalTPGIO", - "CommonTools/TriggerUtils", - "CondCore/L1TPlugins", - "CondFormats/L1TObjects", - "CondTools/L1Trigger", - "DQMOffline/L1Trigger", - "DataFormats/L1CSCTrackFinder", - "DataFormats/L1CaloTrigger", - "DataFormats/L1DTTrackFinder", - "DataFormats/L1GlobalCaloTrigger", - "DataFormats/L1GlobalMuonTrigger", - "DataFormats/L1GlobalTrigger", - "DataFormats/L1TCalorimeter", - "DataFormats/L1TCalorimeterPhase2", - "DataFormats/L1TCorrelator", - "DataFormats/L1TGlobal", - "DataFormats/L1THGCal", - "DataFormats/L1TMuon", - "DataFormats/L1TMuonPhase2", - "DataFormats/L1TotemRP", - "DataFormats/L1TrackTrigger", - "DataFormats/L1Trigger", - "DataFormats/L1TParticleFlow", - "DataFormats/Phase2L1Taus", - "DataFormats/LTCDigi", - "DataFormats/Scalers", - "EventFilter/CSCTFRawToDigi", - "EventFilter/DTTFRawToDigi", - "EventFilter/GctRawToDigi", - "EventFilter/L1GlobalTriggerRawToDigi", - "EventFilter/L1TRawToDigi", - "EventFilter/L1TXRawToDigi", - "EventFilter/RctRawToDigi", - "EventFilter/TwinMuxRawToDigi", - "L1Trigger/CSCCommonTrigger", - "L1Trigger/CSCTrackFinder", - "L1Trigger/CSCTriggerPrimitives", - "L1Trigger/Configuration", - "L1Trigger/DTBti", - "L1Trigger/DTPhase2Trigger", - "L1Trigger/DTSectorCollector", - "L1Trigger/DTTrackFinder", - "L1Trigger/DTTraco", - "L1Trigger/DTTrigger", - "L1Trigger/DTTriggerPhase2", - "L1Trigger/DTTriggerServerPhi", - "L1Trigger/DTTriggerServerTheta", - "L1Trigger/DTUtilities", - "L1Trigger/DemonstratorTools", - "L1Trigger/GlobalCaloTrigger", - "L1Trigger/GlobalMuonTrigger", - "L1Trigger/GlobalTrigger", - "L1Trigger/GlobalTriggerAnalyzer", - "L1Trigger/HardwareValidation", - "L1Trigger/L1CaloTrigger", - "L1Trigger/L1ExtraFromDigis", - "L1Trigger/L1GctAnalyzer", - "L1Trigger/L1TCaloLayer1", - "L1Trigger/L1TCalorimeter", - "L1Trigger/L1TCommon", - "L1Trigger/L1TGlobal", - "L1Trigger/L1TGEM", - "L1Trigger/L1THGCal", - "L1Trigger/L1THGCalUtilities", - "L1Trigger/L1TTrackMatch", - "L1Trigger/L1TMuon", - "L1Trigger/L1TMuonBarrel", - "L1Trigger/L1TMuonCPPF", - "L1Trigger/L1TMuonEndCap", - "L1Trigger/L1TMuonOverlap", - "L1Trigger/L1TMuonOverlapPhase1", - "L1Trigger/L1TNtuples", - "L1Trigger/L1TTwinMux", - "L1Trigger/L1TZDC", - "L1Trigger/ME0Trigger", - "L1Trigger/Phase2L1GMT", - "L1Trigger/Phase2L1GT", - "L1Trigger/Phase2L1ParticleFlow", - "L1Trigger/Phase2L1Taus", - "L1Trigger/RPCTechnicalTrigger", - "L1Trigger/RPCTrigger", - "L1Trigger/RPCTriggerPrimitives", - "L1Trigger/RegionalCaloTrigger", - "L1Trigger/Skimmer", - "L1Trigger/TextToDigi", - "L1Trigger/TrackerTFP", - "L1Trigger/TrackFindingAM", - "L1Trigger/TrackFindingTMTT", - "L1Trigger/TrackFindingTracklet", - "L1Trigger/TrackTrigger", - "L1Trigger/TrackerDTC", - "L1Trigger/VertexFinder", - "L1TriggerConfig/CSCTFConfigProducers", - "L1TriggerConfig/DTTPGConfig", - "L1TriggerConfig/DTTPGConfigProducers", - "L1TriggerConfig/DTTrackFinder", - "L1TriggerConfig/GMTConfigProducers", - "L1TriggerConfig/GctConfigProducers", - "L1TriggerConfig/L1CSCTPConfigProducers", - "L1TriggerConfig/L1GeometryProducers", - "L1TriggerConfig/L1GtConfigProducers", - "L1TriggerConfig/L1ScalesProducers", - "L1TriggerConfig/L1TConfigProducers", - "L1TriggerConfig/L1TUtmTriggerMenuProducers", - "L1TriggerConfig/RCTConfigProducers", - "L1TriggerConfig/RPCTriggerConfig", - "L1TriggerConfig/Utilities", - "L1TriggerConfig/XmlConfigTools", - "L1TriggerOffline/Configuration", - "L1TriggerOffline/L1Analyzer", - "Phase2L1Trigger/CalibratedDigis", - "SLHCUpgradeSimulations/L1CaloTrigger", - "SimCalorimetry/EcalEBTrigPrimAlgos", - "SimCalorimetry/EcalEBTrigPrimProducers", - "SimCalorimetry/EcalTrigPrimAlgos", - "SimCalorimetry/EcalTrigPrimProducers", - "SimCalorimetry/HcalTrigPrimAlgos", - "SimCalorimetry/HcalTrigPrimProducers", - "SimTracker/TrackTriggerAssociation", - "Validation/EcalTriggerPrimitives", - ], - "operations": [ - "Configuration/Applications", - "Configuration/DataProcessing", - "Configuration/Eras", - "Configuration/EventContent", - "Configuration/GlobalRuns", - "Configuration/ProcessModifiers", - "Configuration/StandardSequences", - ], - "pdmv": [ - "Configuration/PyReleaseValidation", - "Configuration/Skimming", - "DPGAnalysis/Skims", - ], - "reconstruction": [ - "CUDADataFormats/BeamSpot", - "CUDADataFormats/CaloCommon", - "CUDADataFormats/EcalDigi", - "CUDADataFormats/EcalRecHitSoA", - "CUDADataFormats/HcalDigi", - "CUDADataFormats/HcalRecHitSoA", - "CUDADataFormats/HGCal", - "CUDADataFormats/SiPixelCluster", - "CUDADataFormats/SiPixelDigi", - "CUDADataFormats/SiStripCluster", - "CUDADataFormats/Track", - "CUDADataFormats/TrackingRecHit", - "CUDADataFormats/Vertex", - "CommonTools/BaseParticlePropagator", - "CommonTools/CandAlgos", - "CommonTools/CandUtils", - "CommonTools/Clustering1D", - "CommonTools/Egamma", - "CommonTools/MVAUtils", - "CommonTools/ParticleFlow", - "CommonTools/PileupAlgos", - "CommonTools/RecoAlgos", - "CommonTools/Statistics", - "CommonTools/TrackerMap", - "CommonTools/UtilAlgos", - "CommonTools/Utils", - "CondFormats/SiPixelTransient", - "Configuration/EcalTB", - "Configuration/JetMET", - "DataFormats/BTauReco", - "DataFormats/BeamSpot", - "DataFormats/CSCRecHit", - "DataFormats/CTPPSDigi", - "DataFormats/CTPPSReco", - "DataFormats/CaloRecHit", - "DataFormats/CaloTowers", - "DataFormats/Candidate", - "DataFormats/CastorReco", - "DataFormats/DTRecHit", - "DataFormats/EcalRecHit", - "DataFormats/EgammaCandidates", - "DataFormats/EgammaReco", - "DataFormats/EgammaTrackReco", - "DataFormats/FP420Cluster", - "DataFormats/FTLRecHit", - "DataFormats/GEMRecHit", - "DataFormats/GsfTrackReco", - "DataFormats/HGCRecHit", - "DataFormats/HGCalReco", - "DataFormats/HcalIsolatedTrack", - "DataFormats/HcalRecHit", - "DataFormats/HeavyIonEvent", - "DataFormats/JetReco", - "DataFormats/Luminosity", - "DataFormats/METObjects", - "DataFormats/METReco", - "DataFormats/Math", - "DataFormats/MuonData", - "DataFormats/MuonReco", - "DataFormats/MuonSeed", - "DataFormats/OnlineMetaData", - "DataFormats/ParticleFlowCandidate", - "DataFormats/ParticleFlowReco", - "DataFormats/PatCandidates", - "DataFormats/Phase2ITPixelCluster", - "DataFormats/Phase2TrackerCluster", - "DataFormats/Phase2TrackerDigi", - "DataFormats/Phase2TrackerRecHit", - "DataFormats/PixelMatchTrackReco", - "DataFormats/ProtonReco", - "DataFormats/RPCRecHit", - "DataFormats/RecoCandidate", - "DataFormats/Scalers", - "DataFormats/SiPixelCluster", - "DataFormats/SiStripCluster", - "DataFormats/SiStripCommon", - "DataFormats/TCDS", - "DataFormats/TauReco", - "DataFormats/TotemDigi", - "DataFormats/TotemReco", - "DataFormats/TrackCandidate", - "DataFormats/TrackReco", - "DataFormats/TrackerCommon", - "DataFormats/TrackerRecHit2D", - "DataFormats/TrackingRecHit", - "DataFormats/TrackingSeed", - "DataFormats/TrajectorySeed", - "DataFormats/TrajectoryState", - "RecoLocalTracker/SiPhase2VectorHitBuilder", - "DataFormats/V0Candidate", - "DataFormats/VZero", - "DataFormats/VertexReco", - "EventFilter/CSCRawToDigi", - "EventFilter/CTPPSRawToDigi", - "EventFilter/CastorRawToDigi", - "EventFilter/DTRawToDigi", - "EventFilter/ESRawToDigi", - "EventFilter/EcalRawToDigi", - "EventFilter/EcalTBRawToDigi", - "EventFilter/GEMRawToDigi", - "EventFilter/HGCalRawToDigi", - "EventFilter/HcalRawToDigi", - "EventFilter/LTCRawToDigi", - "EventFilter/OnlineMetaDataRawToDigi", - "EventFilter/Phase2TrackerRawToDigi", - "EventFilter/RPCRawToDigi", - "EventFilter/ScalersRawToDigi", - "EventFilter/SiPixelRawToDigi", - "EventFilter/SiStripRawToDigi", - "EventFilter/TotemRawToDigi", - "JetMETCorrections/Configuration", - "JetMETCorrections/JetCorrector", - "JetMETCorrections/Modules", - "JetMETCorrections/Objects", - "JetMETCorrections/Type1MET", - "MagneticField/Engine", - "MagneticField/GeomBuilder", - "MagneticField/Interpolation", - "MagneticField/Layers", - "MagneticField/ParametrizedEngine", - "MagneticField/Records", - "MagneticField/UniformEngine", - "MagneticField/VolumeBasedEngine", - "MagneticField/VolumeGeometry", - "PhysicsTools/MXNet", - "PhysicsTools/IsolationAlgos", - "PhysicsTools/ONNXRuntime", - "PhysicsTools/PatAlgos", - "PhysicsTools/PatUtils", - "PhysicsTools/SelectorUtils", - "PhysicsTools/TensorFlow", - "RecoBTag/BTagTools", - "RecoBTag/CTagging", - "RecoBTag/Combined", - "RecoBTag/Configuration", - "RecoBTag/DeepFlavour", - "RecoBTag/FeatureTools", - "RecoBTag/ImpactParameter", - "RecoBTag/ImpactParameterLearning", - "RecoBTag/MXNet", - "RecoBTag/ONNXRuntime", - "RecoBTag/PerformanceDB", - "RecoBTag/PixelCluster", - "RecoBTag/Records", - "RecoBTag/SecondaryVertex", - "RecoBTag/Skimming", - "RecoBTag/SoftLepton", - "RecoBTag/TensorFlow", - "RecoBTag/TrackProbability", - "RecoBTag/XMLCalibration", - "RecoBTau/Configuration", - "RecoBTau/JetCrystalsAssociator", - "RecoBTau/JetTagComputer", - "RecoBTau/JetTagMVALearning", - "RecoCTPPS/Configuration", - "RecoCTPPS/PixelLocal", - "RecoCTPPS/ProtonReconstruction", - "RecoCTPPS/TotemRPLocal", - "RecoCaloTools/EcalChannelKiller", - "RecoCaloTools/MetaCollections", - "RecoCaloTools/Navigation", - "RecoCaloTools/Selectors", - "RecoEcal/Configuration", - "RecoEcal/EgammaClusterAlgos", - "RecoEcal/EgammaClusterProducers", - "RecoEcal/EgammaCoreTools", - "RecoEgamma/Configuration", - "RecoEgamma/EgammaElectronAlgos", - "RecoEgamma/EgammaElectronProducers", - "RecoEgamma/EgammaHFProducers", - "RecoEgamma/EgammaIsolationAlgos", - "RecoEgamma/EgammaMCTools", - "RecoEgamma/EgammaPhotonAlgos", - "RecoEgamma/EgammaPhotonProducers", - "RecoEgamma/EgammaTools", - "RecoEgamma/ElectronIdentification", - "RecoEgamma/Examples", - "RecoEgamma/PhotonIdentification", - "RecoHGCal/Configuration", - "RecoHGCal/TICL", - "RecoHI/Configuration", - "RecoHI/HiCentralityAlgos", - "RecoHI/HiEgammaAlgos", - "RecoHI/HiEvtPlaneAlgos", - "RecoHI/HiJetAlgos", - "RecoHI/HiMuonAlgos", - "RecoHI/HiTracking", - "RecoJets/Configuration", - "RecoJets/FFTJetAlgorithms", - "RecoJets/FFTJetProducers", - "RecoJets/JetAlgorithms", - "RecoJets/JetAnalyzers", - "RecoJets/JetAssociationAlgorithms", - "RecoJets/JetAssociationProducers", - "RecoJets/JetPlusTracks", - "RecoJets/JetProducers", - "RecoLocalCalo/CaloRecCandCreator", - "RecoLocalCalo/CaloTowersCreator", - "RecoLocalCalo/Castor", - "RecoLocalCalo/CastorReco", - "RecoLocalCalo/Configuration", - "RecoLocalCalo/EcalDeadChannelRecoveryAlgos", - "RecoLocalCalo/EcalDeadChannelRecoveryProducers", - "RecoLocalCalo/EcalRecAlgos", - "RecoLocalCalo/EcalRecProducers", - "RecoLocalCalo/HGCalRecAlgos", - "RecoLocalCalo/HGCalRecProducers", - "RecoLocalCalo/HcalLaserReco", - "RecoLocalCalo/HcalRecAlgos", - "RecoLocalCalo/HcalRecProducers", - "RecoLocalFastTime/Configuration", - "RecoLocalFastTime/FTLClusterizer", - "RecoLocalFastTime/FTLCommonAlgos", - "RecoLocalFastTime/FTLRecProducers", - "RecoLocalFastTime/Records", - "RecoLocalMuon/CSCEfficiency", - "RecoLocalMuon/CSCRecHitD", - "RecoLocalMuon/CSCSegment", - "RecoLocalMuon/CSCValidation", - "RecoLocalMuon/Configuration", - "RecoLocalMuon/DTRecHit", - "RecoLocalMuon/DTSegment", - "RecoLocalMuon/GEMCSCSegment", - "RecoLocalMuon/GEMRecHit", - "RecoLocalMuon/GEMSegment", - "RecoLocalMuon/RPCRecHit", - "RecoLocalTracker/ClusterParameterEstimator", - "RecoLocalTracker/Configuration", - "RecoLocalTracker/Phase2ITPixelClusterizer", - "RecoLocalTracker/Phase2TrackerRecHits", - "RecoLocalTracker/Records", - "RecoLocalTracker/SiPhase2Clusterizer", - "RecoLocalTracker/SiPixelClusterizer", - "RecoLocalTracker/SiPixelDigiReProducers", - "RecoLocalTracker/SiPixelRecHits", - "RecoLocalTracker/SiStripClusterizer", - "RecoLocalTracker/SiStripRecHitConverter", - "RecoLocalTracker/SiStripZeroSuppression", - "RecoLocalTracker/SubCollectionProducers", - "RecoLuminosity/LumiProducer", - "RecoLuminosity/TCPReceiver", - "RecoMET/Configuration", - "RecoMET/METAlgorithms", - "RecoMET/METFilters", - "RecoMET/METPUSubtraction", - "RecoMET/METProducers", - "RecoMTD/Configuration", - "RecoMTD/DetLayers", - "RecoMTD/MeasurementDet", - "RecoMTD/Navigation", - "RecoMTD/Records", - "RecoMTD/TimingIDTools", - "RecoMTD/TrackExtender", - "RecoMTD/TransientTrackingRecHit", - "RecoMuon/Configuration", - "RecoMuon/CosmicMuonProducer", - "RecoMuon/DetLayers", - "RecoMuon/GlobalMuonProducer", - "RecoMuon/GlobalTrackFinder", - "RecoMuon/GlobalTrackingTools", - "RecoMuon/L2MuonIsolationProducer", - "RecoMuon/L2MuonProducer", - "RecoMuon/L2MuonSeedGenerator", - "RecoMuon/L3MuonIsolationProducer", - "RecoMuon/L3MuonProducer", - "RecoMuon/L3TrackFinder", - "RecoMuon/MeasurementDet", - "RecoMuon/MuonIdentification", - "RecoMuon/MuonIsolation", - "RecoMuon/MuonIsolationProducers", - "RecoMuon/MuonRechitClusterProducer", - "RecoMuon/MuonSeedGenerator", - "RecoMuon/Navigation", - "RecoMuon/Records", - "RecoMuon/StandAloneMuonProducer", - "RecoMuon/StandAloneTrackFinder", - "RecoMuon/TrackerSeedGenerator", - "RecoMuon/TrackingTools", - "RecoMuon/TransientTrackingRecHit", - "RecoParticleFlow/Benchmark", - "RecoParticleFlow/Configuration", - "RecoParticleFlow/PFBlockProducer", - "RecoParticleFlow/PFClusterProducer", - "RecoParticleFlow/PFClusterShapeProducer", - "RecoParticleFlow/PFClusterTools", - "RecoParticleFlow/PFProducer", - "RecoParticleFlow/PFRecHitProducer", - "RecoParticleFlow/PFRootEvent", - "RecoParticleFlow/PFSimProducer", - "RecoParticleFlow/PFTracking", - "RecoPixelVZero/PixelVZeroFinding", - "RecoPixelVertexing/Configuration", - "RecoPixelVertexing/PixelLowPtUtilities", - "RecoPixelVertexing/PixelTrackFitting", - "RecoPixelVertexing/PixelTriplets", - "RecoPixelVertexing/PixelVertexFinding", - "RecoPPS/Configuration", - "RecoPPS/Local", - "RecoPPS/ProtonReconstruction", - "RecoRomanPot/Configuration", - "RecoRomanPot/RecoFP420", - "RecoTBCalo/EcalSimpleTBAnalysis", - "RecoTBCalo/EcalTBAnalysisCoreTools", - "RecoTBCalo/EcalTBHodoscopeReconstructor", - "RecoTBCalo/EcalTBRecProducers", - "RecoTBCalo/EcalTBTDCReconstructor", - "RecoTBCalo/HcalPlotter", - "RecoTBCalo/HcalTBObjectUnpacker", - "RecoTBCalo/HcalTBTools", - "RecoTBCalo/ZDCTBAnalysis", - "RecoTauTag/Configuration", - "RecoTauTag/ImpactParameter", - "RecoTauTag/RecoTau", - "RecoTauTag/TauTagTools", - "RecoTauTag/TrainingFiles", - "RecoTracker/CkfPattern", - "RecoTracker/Configuration", - "RecoTracker/ConversionSeedGenerators", - "RecoTracker/DeDx", - "RecoTracker/DebugTools", - "RecoTracker/DisplacedRegionalTracking", - "RecoTracker/FinalTrackSelectors", - "RecoTracker/GeometryESProducer", - "RecoTracker/IterativeTracking", - "RecoTracker/MeasurementDet", - "RecoTracker/MkFit", - "RecoTracker/MkFitCMS", - "RecoTracker/MkFitCore", - "RecoTracker/NuclearSeedGenerator", - "RecoTracker/PixelLowPtUtilities", - "RecoTracker/PixelSeeding", - "RecoTracker/PixelTrackFitting", - "RecoTracker/PixelVertexFinding", - "RecoTracker/Record", - "RecoTracker/SeedingLayerSetsHits", - "RecoTracker/SiTrackerMRHTools", - "RecoTracker/SingleTrackPattern", - "RecoTracker/SpecialSeedGenerators", - "RecoTracker/TkDetLayers", - "RecoTracker/TkHitPairs", - "RecoTracker/TkMSParametrization", - "RecoTracker/TkNavigation", - "RecoTracker/TkSeedGenerator", - "RecoTracker/TkSeedingLayers", - "RecoTracker/TkTrackingRegions", - "RecoTracker/TrackProducer", - "RecoTracker/TransientTrackingRecHit", - "RecoVZero/VZeroFinding", - "RecoVertex/AdaptiveVertexFinder", - "RecoVertex/AdaptiveVertexFit", - "RecoVertex/BeamSpotProducer", - "RecoVertex/ConfigurableVertexReco", - "RecoVertex/Configuration", - "RecoVertex/GaussianSumVertexFit", - "RecoVertex/GhostTrackFitter", - "RecoVertex/KalmanVertexFit", - "RecoVertex/KinematicFit", - "RecoVertex/KinematicFitPrimitives", - "RecoVertex/LinearizationPointFinders", - "RecoVertex/MultiVertexFit", - "RecoVertex/NuclearInteractionProducer", - "RecoVertex/PrimaryVertexProducer", - "RecoVertex/TertiaryTracksVertexFinder", - "RecoVertex/TrimmedKalmanVertexFinder", - "RecoVertex/TrimmedVertexFit", - "RecoVertex/V0Producer", - "RecoVertex/VertexPrimitives", - "RecoVertex/VertexTools", - "TrackPropagation/NavGeometry", - "TrackPropagation/NavPropagator", - "TrackPropagation/RungeKutta", - "TrackPropagation/SteppingHelixPropagator", - "TrackingTools/AnalyticalJacobians", - "TrackingTools/Configuration", - "TrackingTools/DetLayers", - "TrackingTools/GeomPropagators", - "TrackingTools/GsfTools", - "TrackingTools/GsfTracking", - "TrackingTools/IPTools", - "TrackingTools/KalmanUpdators", - "TrackingTools/MaterialEffects", - "TrackingTools/MeasurementDet", - "TrackingTools/PatternTools", - "TrackingTools/Producers", - "TrackingTools/RecoGeometry", - "TrackingTools/Records", - "TrackingTools/TrackAssociator", - "TrackingTools/TrackFitters", - "TrackingTools/TrackRefitter", - "TrackingTools/TrajectoryCleaning", - "TrackingTools/TrajectoryFiltering", - "TrackingTools/TrajectoryParametrization", - "TrackingTools/TrajectoryState", - "TrackingTools/TransientTrack", - "TrackingTools/TransientTrackingRecHit", - ], - "simulation": [ - "BigProducts/Simulation", - "DataFormats/CSCDigi", - "DataFormats/CTPPSDetId", - "DataFormats/CTPPSDigi", - "DataFormats/DTDigi", - "DataFormats/DetId", - "DataFormats/EcalDetId", - "DataFormats/EcalDigi", - "DataFormats/EcalRawData", - "DataFormats/FP420Digi", - "DataFormats/FTLDigi", - "DataFormats/ForwardDetId", - "DataFormats/GEMDigi", - "DataFormats/GeometryCommonDetAlgo", - "DataFormats/GeometrySurface", - "DataFormats/GeometryVector", - "DataFormats/HGCDigi", - "DataFormats/HGCalDigi", - "DataFormats/HcalDetId", - "DataFormats/HcalDigi", - "DataFormats/JetMatching", - "DataFormats/MuonDetId", - "DataFormats/RPCDigi", - "DataFormats/SiPixelDetId", - "DataFormats/SiPixelDigi", - "DataFormats/SiPixelRawData", - "DataFormats/SiStripDetId", - "DataFormats/SiStripDigi", - "DataFormats/TotemRPDetId", - "IOMC/EventVertexGenerators", - "Mixing/Base", - "SLHCUpgradeSimulations/Configuration", - "SLHCUpgradeSimulations/L1CaloTrigger", - "SimCalorimetry/CaloSimAlgos", - "SimCalorimetry/CastorSim", - "SimCalorimetry/CastorTechTrigProducer", - "SimCalorimetry/Configuration", - "SimCalorimetry/EcalElectronicsEmulation", - "SimCalorimetry/EcalSelectiveReadoutAlgos", - "SimCalorimetry/EcalSelectiveReadoutProducers", - "SimCalorimetry/EcalSimAlgos", - "SimCalorimetry/EcalSimProducers", - "SimCalorimetry/EcalTestBeam", - "SimCalorimetry/EcalTestBeamAlgos", - "SimCalorimetry/EcalZeroSuppressionAlgos", - "SimCalorimetry/EcalZeroSuppressionProducers", - "SimCalorimetry/HGCalAssociatorProducers", - "SimCalorimetry/HGCalSimProducers", - "SimCalorimetry/HGCalSimAlgos", - "SimCalorimetry/HcalSimAlgos", - "SimCalorimetry/HcalSimProducers", - "SimCalorimetry/HcalTestBeam", - "SimCalorimetry/HcalZeroSuppressionProducers", - "SimDataFormats/Associations", - "SimDataFormats/CaloAnalysis", - "SimDataFormats/CaloHit", - "SimDataFormats/CaloTest", - "SimDataFormats/CrossingFrame", - "SimDataFormats/DigiSimLinks", - "SimDataFormats/EcalTestBeam", - "SimDataFormats/EncodedEventId", - "SimDataFormats/Forward", - "SimDataFormats/GEMDigiSimLink", - "SimDataFormats/HcalTestBeam", - "SimDataFormats/HiGenData", - "SimDataFormats/JetMatching", - "SimDataFormats/PileupSummaryInfo", - "SimDataFormats/RPCDigiSimLink", - "SimDataFormats/RandomEngine", - "SimDataFormats/SimHitMaker", - "SimDataFormats/Track", - "SimDataFormats/TrackerDigiSimLink", - "SimDataFormats/TrackingAnalysis", - "SimDataFormats/TrackingHit", - "SimDataFormats/ValidationFormats", - "SimDataFormats/Vertex", - "SimFastTiming/Configuration", - "SimFastTiming/FastTimingCommon", - "SimG4CMS/Calo", - "SimG4CMS/CherenkovAnalysis", - "SimG4CMS/EcalTestBeam", - "SimG4CMS/FP420", - "SimG4CMS/Forward", - "SimG4CMS/HGCalTestBeam", - "SimG4CMS/HcalTestBeam", - "SimG4CMS/Muon", - "SimG4CMS/PPS", - "SimG4CMS/ShowerLibraryProducer", - "SimG4CMS/Tracker", - "SimG4Core/Application", - "SimG4Core/CheckSecondary", - "SimG4Core/Configuration", - "SimG4Core/CountProcesses", - "SimG4Core/CustomPhysics", - "SimG4Core/DD4hepGeometry", - "SimG4Core/GFlash", - "SimG4Core/Generators", - "SimG4Core/Geometry", - "SimG4Core/GeometryProducer", - "SimG4Core/HelpfulWatchers", - "SimG4Core/KillSecondaries", - "SimG4Core/MagneticField", - "SimG4Core/Notification", - "SimG4Core/Physics", - "SimG4Core/PhysicsLists", - "SimG4Core/PrintGeomInfo", - "SimG4Core/PrintTrackNumber", - "SimG4Core/SaveSimTrackAction", - "SimG4Core/SensitiveDetector", - "SimG4Core/TrackingVerbose", - "SimG4Core/Watcher", - "SimGeneral/CaloAnalysis", - "SimGeneral/Configuration", - "SimGeneral/DataMixingModule", - "SimGeneral/Debugging", - "SimGeneral/GFlash", - "SimGeneral/HepPDTESSource", - "SimGeneral/HepPDTRecord", - "SimGeneral/MixingModule", - "SimGeneral/NoiseGenerators", - "SimGeneral/PileupInformation", - "SimGeneral/PreMixingModule", - "SimGeneral/TrackingAnalysis", - "SimMuon/CSCDigitizer", - "SimMuon/Configuration", - "SimMuon/DTDigitizer", - "SimMuon/GEMDigitizer", - "SimMuon/MCTruth", - "SimMuon/Neutron", - "SimMuon/RPCDigitizer", - "SimPPS/Configuration", - "SimPPS/DirectSimProducer", - "SimPPS/PPSPixelDigiProducer", - "SimPPS/PPSSimTrackProducer", - "SimPPS/RPDigiProducer", - "SimRomanPot/Configuration", - "SimRomanPot/SimFP420", - "SimTracker/Common", - "SimTracker/Configuration", - "SimTracker/Records", - "SimTracker/SiPhase2Digitizer", - "SimTracker/SiPixelDigitizer", - "SimTracker/SiStripDigitizer", - "SimTracker/TrackAssociation", - "SimTracker/TrackAssociatorESProducer", - "SimTracker/TrackAssociatorProducers", - "SimTracker/TrackHistory", - "SimTracker/TrackTriggerAssociation", - "SimTracker/TrackerFilters", - "SimTracker/TrackerHitAssociation", - "SimTracker/TrackerMaterialAnalysis", - "SimTracker/VertexAssociation", - "SimTracker/VertexAssociatorESProducer", - "SimTransport/HectorProducer", - "SimTransport/PPSProtonTransport", - "SimTransport/TotemRPProtonTransportParametrization", - "TauAnalysis/MCEmbeddingTools", - "TrackPropagation/Geant4e", - "Utilities/PPS", - "Validation/Configuration", - ], - "upgrade": [ - "CalibTracker/SiPhase2TrackerESProducers", - "CondFormats/HGCalObjects", - "CondTools/SiPhase2Tracker", - "Configuration/Geometry", - "Configuration/PyReleaseValidation", - "CUDADataFormats/HGCal", - "DataFormats/FTLDigi", - "DataFormats/FTLRecHit", - "DataFormats/ForwardDetId", - "DataFormats/GEMDigi", - "DataFormats/GEMRecHit", - "DataFormats/HGCDigi", - "DataFormats/HGCalDigi", - "DataFormats/HGCRecHit", - "DataFormats/HGCalReco", - "DataFormats/L1TCalorimeterPhase2", - "DataFormats/L1TCorrelator", - "DataFormats/L1THGCal", - "DataFormats/L1TrackTrigger", - "DataFormats/L1TParticleFlow", - "DataFormats/Phase2ITPixelCluster", - "DataFormats/Phase2TrackerCluster", - "DataFormats/Phase2TrackerDigi", - "EventFilter/HGCalRawToDigi", - "Geometry/CMSCommonData", - "Geometry/GEMGeometry", - "Geometry/GEMGeometryBuilder", - "Geometry/HGCalCommonData", - "Geometry/HGCalGeometry", - "Geometry/HGCalSimData", - "Geometry/MTDCommonData", - "Geometry/MTDGeometryBuilder", - "Geometry/MTDNumberingBuilder", - "Geometry/MTDSimData", - "L1Trigger/L1CaloTrigger", - "L1Trigger/DTTriggerPhase2", - "L1Trigger/L1THGCal", - "L1Trigger/L1THGCalUtilities", - "L1Trigger/L1TTrackMatch", - "L1Trigger/ME0Trigger", - "L1Trigger/Phase2L1GT", - "L1Trigger/Phase2L1ParticleFlow", - "L1Trigger/TrackTrigger", - "L1Trigger/TrackerDTC", - "RecoHGCal/Configuration", - "RecoHGCal/TICL", - "RecoLocalCalo/HGCalRecAlgos", - "RecoLocalCalo/HGCalRecProducers", - "RecoLocalFastTime/Configuration", - "RecoLocalFastTime/FTLClusterizer", - "RecoLocalFastTime/FTLCommonAlgos", - "RecoLocalFastTime/FTLRecProducers", - "RecoLocalFastTime/Records", - "RecoLocalMuon/GEMCSCSegment", - "RecoLocalMuon/GEMRecHit", - "RecoLocalMuon/GEMSegment", - "RecoLocalTracker/Phase2ITPixelClusterizer", - "RecoLocalTracker/Phase2TrackerRecHits", - "RecoLocalTracker/SiPhase2Clusterizer", - "RecoLocalTracker/SiPhase2VectorHitBuilder", - "RecoMTD/Configuration", - "RecoMTD/DetLayers", - "RecoMTD/MeasurementDet", - "RecoMTD/Navigation", - "RecoMTD/Records", - "RecoMTD/TimingIDTools", - "RecoMTD/TrackExtender", - "RecoMTD/TransientTrackingRecHit", - "SLHCUpgradeSimulations/Configuration", - "SLHCUpgradeSimulations/Geometry", - "SimCalorimetry/EcalEBTrigPrimAlgos", - "SimCalorimetry/EcalEBTrigPrimProducers", - "SimCalorimetry/HGCalAssociatorProducers", - "SimCalorimetry/HGCalSimProducers", - "SimCalorimetry/HGCalSimAlgos", - "SimDataFormats/GEMDigiSimLink", - "SimFastTiming/Configuration", - "SimFastTiming/FastTimingCommon", - "SimG4CMS/HGCalTestBeam", - "SimMuon/GEMDigitizer", - "SimTracker/SiPhase2Digitizer", - ], - "visualization": [ - "Fireworks/Calo", - "Fireworks/Candidates", - "Fireworks/Core", - "Fireworks/Electrons", - "Fireworks/Eve", - "Fireworks/FWInterface", - "Fireworks/GenParticle", - "Fireworks/Geometry", - "Fireworks/Macros", - "Fireworks/Muons", - "Fireworks/MTD", - "Fireworks/ParticleFlow", - "Fireworks/SimData", - "Fireworks/TableWidget", - "Fireworks/Tracks", - "Fireworks/Vertices", - ], - "xpog": [ - "DataFormats/NanoAOD", - "DataFormats/PatCandidates", - "DPGAnalysis/HcalNanoAOD", - "DPGAnalysis/MuonTools", - "PhysicsTools/NanoAOD", - "PhysicsTools/NanoAODTools", - "PhysicsTools/PatAlgos", - "PhysicsTools/PatUtils", - "PhysicsTools/Scouting", - ], + "alca": [ + "Alignment/APEEstimation", + "Alignment/CTPPS", + "Alignment/CocoaAnalysis", + "Alignment/CocoaApplication", + "Alignment/CocoaDDLObjects", + "Alignment/CocoaDaq", + "Alignment/CocoaFit", + "Alignment/CocoaModel", + "Alignment/CocoaToDDL", + "Alignment/CocoaUtilities", + "Alignment/CommonAlignment", + "Alignment/CommonAlignmentAlgorithm", + "Alignment/CommonAlignmentMonitor", + "Alignment/CommonAlignmentParametrization", + "Alignment/CommonAlignmentProducer", + "Alignment/Geners", + "Alignment/HIPAlignmentAlgorithm", + "Alignment/KalmanAlignmentAlgorithm", + "Alignment/LaserAlignment", + "Alignment/LaserAlignmentSimulation", + "Alignment/LaserDQM", + "Alignment/MillePedeAlignmentAlgorithm", + "Alignment/MuonAlignment", + "Alignment/MuonAlignmentAlgorithms", + "Alignment/OfflineValidation", + "Alignment/ReferenceTrajectories", + "Alignment/SurveyAnalysis", + "Alignment/TrackerAlignment", + "Alignment/TwoBodyDecay", + "CalibCalorimetry/CaloMiscalibTools", + "CalibCalorimetry/CaloTPG", + "CalibCalorimetry/CastorCalib", + "CalibCalorimetry/Configuration", + "CalibCalorimetry/EBPhase2TPGTools", + "CalibCalorimetry/EcalCorrectionModules", + "CalibCalorimetry/EcalCorrelatedNoiseAnalysisAlgos", + "CalibCalorimetry/EcalCorrelatedNoiseAnalysisModules", + "CalibCalorimetry/EcalLaserAnalyzer", + "CalibCalorimetry/EcalLaserCorrection", + "CalibCalorimetry/EcalLaserSorting", + "CalibCalorimetry/EcalPedestalOffsets", + "CalibCalorimetry/EcalSRTools", + "CalibCalorimetry/EcalTBCondTools", + "CalibCalorimetry/EcalTPGTools", + "CalibCalorimetry/EcalTrivialCondModules", + "CalibCalorimetry/HcalAlgos", + "CalibCalorimetry/HcalPlugins", + "CalibCalorimetry/HcalStandardModules", + "CalibCalorimetry/HcalTPGAlgos", + "CalibCalorimetry/HcalTPGEventSetup", + "CalibCalorimetry/HcalTPGIO", + "CalibFormats/CaloObjects", + "CalibFormats/CaloTPG", + "CalibFormats/CastorObjects", + "CalibFormats/HcalObjects", + "CalibFormats/SiPixelObjects", + "CalibFormats/SiStripObjects", + "CalibMuon/CSCCalibration", + "CalibMuon/Configuration", + "CalibMuon/DTCalibration", + "CalibMuon/DTDigiSync", + "CalibMuon/RPCCalibration", + "CalibPPS/AlignmentRelative", + "CalibPPS/AlignmentGlobal", + "CalibPPS/ESProducers", + "CalibPPS/TimingCalibration", + "CalibTracker/Configuration", + "CalibTracker/Records", + "CalibTracker/SiPhase2TrackerESProducers", + "CalibTracker/SiPixelConnectivity", + "CalibTracker/SiPixelESProducers", + "CalibTracker/SiPixelErrorEstimation", + "CalibTracker/SiPixelGainCalibration", + "CalibTracker/SiPixelIsAliveCalibration", + "CalibTracker/SiPixelLorentzAngle", + "CalibTracker/SiPixelQuality", + "CalibTracker/SiPixelSCurveCalibration", + "CalibTracker/SiPixelTools", + "CalibTracker/SiStripAPVAnalysis", + "CalibTracker/SiStripChannelGain", + "CalibTracker/SiStripCommon", + "CalibTracker/SiStripDCS", + "CalibTracker/SiStripESProducers", + "CalibTracker/SiStripHitEfficiency", + "CalibTracker/SiStripHitResolution", + "CalibTracker/SiStripLorentzAngle", + "CalibTracker/SiStripQuality", + "CalibTracker/StandaloneTrackerTopology", + "Calibration/EcalAlCaRecoProducers", + "Calibration/EcalCalibAlgos", + "Calibration/EcalTBTools", + "Calibration/HcalAlCaRecoProducers", + "Calibration/HcalCalibAlgos", + "Calibration/HcalConnectivity", + "Calibration/HcalIsolatedTrackReco", + "Calibration/Hotline", + "Calibration/IsolatedParticles", + "Calibration/LumiAlCaRecoProducers", + "Calibration/PPSAlCaRecoProducer", + "Calibration/TkAlCaRecoProducers", + "Calibration/Tools", + "CaloOnlineTools/EcalTools", + "CondCore/DBCommon", + "CondCore/DBOutputService", + "CondCore/ESSources", + "CondCore/IOVService", + "CondCore/MetaDataService", + "CondCore/Modules", + "CondCore/PluginSystem", + "CondCore/PopCon", + "CondCore/TagCollection", + "CondFormats/Alignment", + "CondFormats/AlignmentRecord", + "CondFormats/BTauObjects", + "CondFormats/BeamSpotObjects", + "CondFormats/CSCObjects", + "CondFormats/CTPPSReadoutObjects", + "CondFormats/Calibration", + "CondFormats/CastorObjects", + "CondFormats/Common", + "CondFormats/DTObjects", + "CondFormats/DataRecord", + "CondFormats/ESObjects", + "CondFormats/EcalCorrections", + "CondFormats/EcalObjects", + "CondFormats/EgammaObjects", + "CondFormats/GBRForest", + "CondFormats/GeometryObjects", + "CondFormats/HGCalObjects", + "CondFormats/HIObjects", + "CondFormats/HLTObjects", + "CondFormats/HcalMapping", + "CondFormats/HcalObjects", + "CondFormats/L1TObjects", + "CondFormats/Luminosity", + "CondFormats/MFObjects", + "CondFormats/OptAlignObjects", + "CondFormats/PCLConfig", + "CondFormats/PhysicsToolsObjects", + "CondFormats/PPSObjects", + "CondFormats/RPCObjects", + "CondFormats/RecoMuonObjects", + "CondFormats/RunInfo", + "CondFormats/SiPhase2TrackerObjects", + "CondFormats/SiPixelObjects", + "CondFormats/SiStripObjects", + "CondFormats/TotemReadoutObjects", + "CondTools/BeamSpot", + "CondTools/SiPhase2Tracker", + "Configuration/AlCa", + "DataFormats/Alignment", + "DataFormats/HcalCalibObjects", + "IORawData/CaloPatterns", + "IORawData/DTCommissioning", + "IORawData/HcalTBInputService", + "IORawData/SiPixelInputSources", + "MuonAnalysis/MomentumScaleCalibration", + "RecoVertex/BeamSpotProducer", + ], + "analysis": [ + "AnalysisAlgos/SiStripClusterInfoProducer", + "AnalysisAlgos/TrackInfoProducer", + "AnalysisDataFormats/EWK", + "AnalysisDataFormats/Egamma", + "AnalysisDataFormats/SUSYBSMObjects", + "AnalysisDataFormats/SiStripClusterInfo", + "AnalysisDataFormats/TopObjects", + "AnalysisDataFormats/TrackInfo", + "CommonTools/RecoUtils", + "DPGAnalysis/SiStripTools", + "DiffractiveForwardAnalysis/Configuration", + "DiffractiveForwardAnalysis/Skimming", + "EgammaAnalysis/CSA07Skims", + "EgammaAnalysis/Configuration", + "EgammaAnalysis/ElectronIDESSources", + "EgammaAnalysis/ElectronTools", + "EgammaAnalysis/PhotonIDProducers", + "ElectroWeakAnalysis/Configuration", + "ElectroWeakAnalysis/Skimming", + "ElectroWeakAnalysis/Utilities", + "ElectroWeakAnalysis/WENu", + "ElectroWeakAnalysis/WMuNu", + "ElectroWeakAnalysis/ZEE", + "ElectroWeakAnalysis/ZMuMu", + "HeavyFlavorAnalysis/Configuration", + "HeavyFlavorAnalysis/Onia2MuMu", + "HeavyFlavorAnalysis/RecoDecay", + "HeavyFlavorAnalysis/Skimming", + "HeavyFlavorAnalysis/SpecificDecay", + "HeavyIonsAnalysis/Configuration", + "HiggsAnalysis/CombinedLimit", + "HiggsAnalysis/Configuration", + "HiggsAnalysis/HiggsToGammaGamma", + "HiggsAnalysis/Skimming", + "JetMETAnalysis/Configuration", + "JetMETAnalysis/METSkims", + "JetMETCorrections/Algorithms", + "JetMETCorrections/FFTJetModules", + "JetMETCorrections/FFTJetObjects", + "JetMETCorrections/InterpolationTables", + "JetMETCorrections/IsolatedParticles", + "JetMETCorrections/JetParton", + "JetMETCorrections/JetVertexAssociation", + "JetMETCorrections/MCJet", + "JetMETCorrections/MinBias", + "JetMETCorrections/TauJet", + "MuonAnalysis/Configuration", + "MuonAnalysis/MuonAssociators", + "PhysicsTools/CandAlgos", + "PhysicsTools/CandUtils", + "PhysicsTools/CondLiteIO", + "PhysicsTools/Configuration", + "PhysicsTools/FWLite", + "PhysicsTools/HepMCCandAlgos", + "PhysicsTools/Heppy", + "PhysicsTools/HeppyCore", + "PhysicsTools/IsolationUtils", + "PhysicsTools/JetCharge", + "PhysicsTools/JetExamples", + "PhysicsTools/JetMCAlgos", + "PhysicsTools/JetMCUtils", + "PhysicsTools/KinFitter", + "PhysicsTools/MVAComputer", + "PhysicsTools/MVATrainer", + "PhysicsTools/ParallelAnalysis", + "PhysicsTools/PatExamples", + "PhysicsTools/PythonAnalysis", + "PhysicsTools/RecoAlgos", + "PhysicsTools/RecoUtils", + "PhysicsTools/RooStatsCms", + "PhysicsTools/TagAndProbe", + "PhysicsTools/UtilAlgos", + "PhysicsTools/Utilities", + "QCDAnalysis/ChargedHadronSpectra", + "QCDAnalysis/Configuration", + "QCDAnalysis/Skimming", + "QCDAnalysis/UEAnalysis", + "SUSYBSMAnalysis/Configuration", + "SUSYBSMAnalysis/HSCP", + "SUSYBSMAnalysis/Skimming", + "TBDataFormats/EcalTBObjects", + "TBDataFormats/HcalTBObjects", + "TopQuarkAnalysis/Configuration", + "TopQuarkAnalysis/Examples", + "TopQuarkAnalysis/TopEventProducers", + "TopQuarkAnalysis/TopEventSelection", + "TopQuarkAnalysis/TopHitFit", + "TopQuarkAnalysis/TopJetCombination", + "TopQuarkAnalysis/TopKinFitter", + "TopQuarkAnalysis/TopObjectResolutions", + "TopQuarkAnalysis/TopPairBSM", + "TopQuarkAnalysis/TopSkimming", + "TopQuarkAnalysis/TopTools", + "Utilities/BinningTools", + ], + "core": [ + ".clang-format", + ".clang-tidy", + ".gitignore", + "BigProducts/Simulation", + "Configuration/SVSuite", + "DataFormats/CLHEP", + "DataFormats/Common", + "DataFormats/FWLite", + "DataFormats/Histograms", + "DataFormats/Provenance", + "DataFormats/Scouting", + "DataFormats/StdDictionaries", + "DataFormats/Streamer", + "DataFormats/TestObjects", + "DataFormats/WrappedStdDictionaries", + "Documentation/CodingRules", + "Documentation/DataFormats", + "Documentation/PhysicsTools", + "Documentation/ReferenceManualScripts", + "FWCore/Catalog", + "FWCore/Common", + "FWCore/Concurrency", + "FWCore/FWLite", + "FWCore/Framework", + "FWCore/GuiBrowsers", + "FWCore/Integration", + "FWCore/MessageLogger", + "FWCore/MessageService", + "FWCore/Modules", + "FWCore/ParameterSet", + "FWCore/ParameterSetReader", + "FWCore/PluginManager", + "FWCore/PrescaleService", + "FWCore/PyDevParameterSet", + "FWCore/Python", + "FWCore/PythonFramework", + "FWCore/PythonParameterSet", + "FWCore/PythonUtilities", + "FWCore/ROOTTests", + "FWCore/Reflection", + "FWCore/RootAutoLibraryLoader", + "FWCore/SOA", + "FWCore/ServiceRegistry", + "FWCore/Services", + "FWCore/SharedMemory", + "FWCore/Skeletons", + "FWCore/Sources", + "FWCore/TFWLiteSelector", + "FWCore/TFWLiteSelectorTest", + "FWCore/TestProcessor", + "FWCore/Utilities", + "FWCore/Version", + "IOMC/Input", + "IOMC/RandomEngine", + "IOPool/Common", + "IOPool/Input", + "IOPool/Output", + "IOPool/Provenance", + "IOPool/SecondaryInput", + "IOPool/Streamer", + "IOPool/TFileAdaptor", + "IgTools/IgProf", + "LICENSE", + "NOTICE", + "PerfTools/AllocMonitor", + "PerfTools/AllocMonitorPreload", + "PerfTools/Callgrind", + "PerfTools/EdmEvent", + "PerfTools/JeProf", + "PerfTools/MaxMemoryPreload", + "Utilities/DCacheAdaptor", + "Utilities/DavixAdaptor", + "Utilities/General", + "Utilities/LStoreAdaptor", + "Utilities/OpenSSL", + "Utilities/RFIOAdaptor", + "Utilities/ReleaseScripts", + "Utilities/StaticAnalyzers", + "Utilities/StorageFactory", + "Utilities/Testing", + "Utilities/Timing", + "Utilities/Xerces", + "Utilities/XrdAdaptor", + "Validation/Performance", + "Validation/Tools", + "pull_request_template.md", + ], + "daq": [ + "Configuration/SiStripDAQ", + "DataFormats/FEDRawData", + "DataFormats/OnlineMetaData", + "DataFormats/Scalers", + "DataFormats/TCDS", + "EventFilter/AutoBU", + "EventFilter/Configuration", + "EventFilter/Cosmics", + "EventFilter/ESDigiToRaw", + "EventFilter/EcalDigiToRaw", + "EventFilter/FEDInterface", + "EventFilter/Goodies", + "EventFilter/Message2log4cplus", + "EventFilter/Modules", + "EventFilter/OnlineMetaDataRawToDigi", + "EventFilter/Phase2TrackerRawToDigi", + "EventFilter/Playback", + "EventFilter/Processor", + "EventFilter/RawDataCollector", + "EventFilter/ResourceBroker", + "EventFilter/SMProxyServer", + "EventFilter/ShmBuffer", + "EventFilter/ShmReader", + "EventFilter/SiStripChannelChargeFilter", + "EventFilter/StorageManager", + "EventFilter/Utilities", + "IORawData/CSCCommissioning", + "IORawData/DaqSource", + ], + "db": [ + "CaloOnlineTools/HcalOnlineDb", + "CommonTools/ConditionDBWriter", + "CondCore/AlignmentPlugins", + "CondCore/BTauPlugins", + "CondCore/BaseKeyedPlugins", + "CondCore/BasicCondPlugins", + "CondCore/BeamSpotPlugins", + "CondCore/CSCPlugins", + "CondCore/CTPPSPlugins", + "CondCore/CalibPlugins", + "CondCore/CastorPlugins", + "CondCore/CondDB", + "CondCore/CondHDF5ESSource", + "CondCore/DBCommon", + "CondCore/DBOutputService", + "CondCore/DQMPlugins", + "CondCore/DTPlugins", + "CondCore/ESPlugins", + "CondCore/ESSources", + "CondCore/EcalPlugins", + "CondCore/EgammaPlugins", + "CondCore/GBRForestPlugins", + "CondCore/GEMPlugins", + "CondCore/GeometryPlugins", + "CondCore/HIPlugins", + "CondCore/HLTPlugins", + "CondCore/HcalPlugins", + "CondCore/IOVService", + "CondCore/JetMETPlugins", + "CondCore/L1TPlugins", + "CondCore/LuminosityPlugins", + "CondCore/MetaDataService", + "CondCore/ORA", + "CondCore/OptAlignPlugins", + "CondCore/PCLConfigPlugins", + "CondCore/PhysicsToolsPlugins", + "CondCore/PopCon", + "CondCore/RPCPlugins", + "CondCore/RecoMuonPlugins", + "CondCore/RegressionTest", + "CondCore/RunInfoPlugins", + "CondCore/SiPhase2TrackerPlugins", + "CondCore/SiPixelPlugins", + "CondCore/SiStripPlugins", + "CondCore/Utilities", + "CondFormats/Alignment", + "CondFormats/AlignmentRecord", + "CondFormats/BTauObjects", + "CondFormats/BeamSpotObjects", + "CondFormats/CSCObjects", + "CondFormats/CTPPSReadoutObjects", + "CondFormats/Calibration", + "CondFormats/CastorObjects", + "CondFormats/Common", + "CondFormats/DQMObjects", + "CondFormats/DTObjects", + "CondFormats/DataRecord", + "CondFormats/ESObjects", + "CondFormats/EcalCorrections", + "CondFormats/EcalObjects", + "CondFormats/EgammaObjects", + "CondFormats/External", + "CondFormats/GBRForest", + "CondFormats/GEMObjects", + "CondFormats/GeometryObjects", + "CondFormats/HIObjects", + "CondFormats/HLTObjects", + "CondFormats/HcalMapping", + "CondFormats/HcalObjects", + "CondFormats/HGCalObjects", + "CondFormats/JetMETObjects", + "CondFormats/L1TObjects", + "CondFormats/Luminosity", + "CondFormats/OptAlignObjects", + "CondFormats/PCLConfig", + "CondFormats/PhysicsToolsObjects", + "CondFormats/RPCObjects", + "CondFormats/RecoMuonObjects", + "CondFormats/RunInfo", + "CondFormats/Serialization", + "CondFormats/SerializationHelper", + "CondFormats/SiPhase2TrackerObjects", + "CondFormats/SiPixelObjects", + "CondFormats/SiPixelTransient", + "CondFormats/SiStripObjects", + "CondFormats/TotemReadoutObjects", + "CondTools/BTau", + "CondTools/BeamSpot", + "CondTools/CTPPS", + "CondTools/DQM", + "CondTools/DT", + "CondTools/Ecal", + "CondTools/GEM", + "CondTools/Geometry", + "CondTools/HLT", + "CondTools/Hcal", + "CondTools/JetMET", + "CondTools/IntegrationTest", + "CondTools/L1Trigger", + "CondTools/L1TriggerExt", + "CondTools/O2OFramework", + "CondTools/RPC", + "CondTools/RunInfo", + "CondTools/SiPixel", + "CondTools/SiStrip", + "CondTools/Utilities", + "DQM/BeamMonitor", + "OnlineDB/CSCCondDB", + "OnlineDB/EcalCondDB", + "OnlineDB/HcalCondDB", + "OnlineDB/Oracle", + "OnlineDB/SiStripConfigDb", + "OnlineDB/SiStripESSources", + "OnlineDB/SiStripO2O", + "RecoLuminosity/LumiDB", + ], + "dqm": [ + "CommonTools/TrackerMap", + "CondCore/DQMPlugins", + "CondFormats/DQMObjects", + "CondTools/DQM", + "DQM/BeamMonitor", + "DQM/CSCMonitorModule", + "DQM/CTPPS", + "DQM/CastorMonitor", + "DQM/DTMonitorClient", + "DQM/DTMonitorModule", + "DQM/DataScouting", + "DQM/EcalBarrelMonitorClient", + "DQM/EcalBarrelMonitorDbModule", + "DQM/EcalBarrelMonitorModule", + "DQM/EcalBarrelMonitorTasks", + "DQM/EcalCommon", + "DQM/EcalEndcapMonitorClient", + "DQM/EcalEndcapMonitorDbModule", + "DQM/EcalEndcapMonitorModule", + "DQM/EcalEndcapMonitorTasks", + "DQM/EcalMonitorClient", + "DQM/EcalMonitorDbModule", + "DQM/EcalMonitorTasks", + "DQM/EcalPreshowerMonitorClient", + "DQM/EcalPreshowerMonitorModule", + "DQM/GEM", + "DQM/HLTEvF", + "DQM/HLXMonitor", + "DQM/HcalCommon", + "DQM/HcalMonitorClient", + "DQM/HcalMonitorModule", + "DQM/HcalMonitorTasks", + "DQM/HcalTasks", + "DQM/Integration", + "DQM/L1TMonitor", + "DQM/L1TMonitorClient", + "DQM/MuonMonitor", + "DQM/Phase2OuterTracker", + "DQM/Physics", + "DQM/PhysicsHWW", + "DQM/PhysicsObjectsMonitoring", + "DQM/PixelLumi", + "DQM/RCTMonitor", + "DQM/RPCMonitorClient", + "DQM/RPCMonitorDigi", + "DQM/SiOuterTracker", + "DQM/SiPixelCommon", + "DQM/SiPixelHeterogeneous", + "DQM/SiPixelHistoricInfoClient", + "DQM/SiPixelMonitorClient", + "DQM/SiPixelMonitorCluster", + "DQM/SiPixelMonitorDigi", + "DQM/SiPixelMonitorRawData", + "DQM/SiPixelMonitorRecHit", + "DQM/SiPixelMonitorTrack", + "DQM/SiPixelPhase1Clusters", + "DQM/SiPixelPhase1Common", + "DQM/SiPixelPhase1Config", + "DQM/SiPixelPhase1DeadFEDChannels", + "DQM/SiPixelPhase1Digis", + "DQM/SiPixelPhase1Heterogeneous", + "DQM/SiPixelPhase1RawData", + "DQM/SiPixelPhase1RecHits", + "DQM/SiPixelPhase1Summary", + "DQM/SiPixelPhase1Track", + "DQM/SiPixelPhase1TrackClusters", + "DQM/SiPixelPhase1TrackEfficiency", + "DQM/SiPixelPhase1TrackResiduals", + "DQM/SiStripCommissioningAnalysis", + "DQM/SiStripCommissioningClients", + "DQM/SiStripCommissioningDbClients", + "DQM/SiStripCommissioningSources", + "DQM/SiStripCommissioningSummary", + "DQM/SiStripCommon", + "DQM/SiStripHistoricInfoClient", + "DQM/SiStripMonitorApproximateCluster", + "DQM/SiStripMonitorClient", + "DQM/SiStripMonitorCluster", + "DQM/SiStripMonitorDigi", + "DQM/SiStripMonitorHardware", + "DQM/SiStripMonitorPedestals", + "DQM/SiStripMonitorSummary", + "DQM/SiStripMonitorTrack", + "DQM/TrackerCommon", + "DQM/TrackerMonitorTrack", + "DQM/TrackerRemapper", + "DQM/TrackingMonitor", + "DQM/TrackingMonitorClient", + "DQM/TrackingMonitorSource", + "DQM/SiTrackerPhase2", + "DQM/TrigXMonitor", + "DQM/TrigXMonitorClient", + "DQMOffline/Alignment", + "DQMOffline/CalibCalo", + "DQMOffline/CalibMuon", + "DQMOffline/CalibTracker", + "DQMOffline/Configuration", + "DQMOffline/EGamma", + "DQMOffline/Ecal", + "DQMOffline/Hcal", + "DQMOffline/JetMET", + "DQMOffline/L1Trigger", + "DQMOffline/Lumi", + "DQMOffline/Muon", + "DQMOffline/MuonDPG", + "DQMOffline/PFTau", + "DQMOffline/RecoB", + "DQMOffline/Trigger", + "DQMServices/ClientConfig", + "DQMServices/Components", + "DQMServices/Core", + "DQMServices/Demo", + "DQMServices/Diagnostic", + "DQMServices/Examples", + "DQMServices/FileIO", + "DQMServices/FwkIO", + "DQMServices/StreamerIO", + "DQMServices/XdaqCollector", + "DataFormats/Histograms", + "DPGAnalysis/HcalTools", + "HLTriggerOffline/B2G", + "HLTriggerOffline/Btag", + "HLTriggerOffline/Common", + "HLTriggerOffline/Egamma", + "HLTriggerOffline/Exotica", + "HLTriggerOffline/HeavyFlavor", + "HLTriggerOffline/Higgs", + "HLTriggerOffline/JetMET", + "HLTriggerOffline/Muon", + "HLTriggerOffline/SMP", + "HLTriggerOffline/SUSYBSM", + "HLTriggerOffline/Tau", + "HLTriggerOffline/Top", + "Utilities/RelMon", + "Validation/CSCRecHits", + "Validation/CTPPS", + "Validation/CaloTowers", + "Validation/Configuration", + "Validation/DTRecHits", + "Validation/EcalClusters", + "Validation/EcalDigis", + "Validation/EcalHits", + "Validation/EcalRecHits", + "Validation/EventGenerator", + "Validation/Geometry", + "Validation/GlobalDigis", + "Validation/GlobalHits", + "Validation/GlobalRecHits", + "Validation/HGCalTriggerPrimitives", + "Validation/HGCalValidation", + "Validation/HcalDigis", + "Validation/HcalHits", + "Validation/HcalRecHits", + "Validation/HLTrigger", + "Validation/L1T", + "Validation/Mixing", + "Validation/MuonCSCDigis", + "Validation/MuonDTDigis", + "Validation/MuonGEMDigis", + "Validation/MuonGEMHits", + "Validation/MuonGEMRecHits", + "Validation/MuonHits", + "Validation/MuonIdentification", + "Validation/MuonIsolation", + "Validation/MuonME0Digis", + "Validation/MuonME0Hits", + "Validation/MuonME0RecHits", + "Validation/MuonME0Validation", + "Validation/MuonRPCDigis", + "Validation/MtdValidation", + "Validation/RPCRecHits", + "Validation/RecoB", + "Validation/RecoEgamma", + "Validation/RecoHI", + "Validation/RecoJets", + "Validation/RecoMET", + "Validation/RecoMuon", + "Validation/RecoParticleFlow", + "Validation/RecoPixelVertexing", + "Validation/RecoTau", + "Validation/RecoTrack", + "Validation/RecoVertex", + "Validation/SiOuterTrackerV", + "Validation/SiPixelPhase1ConfigV", + "Validation/SiPixelPhase1DigisV", + "Validation/SiPixelPhase1HitsV", + "Validation/SiPixelPhase1RecHitsV", + "Validation/SiPixelPhase1TrackClustersV", + "Validation/SiPixelPhase1TrackingParticleV", + "Validation/SiTrackerPhase2V", + "Validation/TrackerConfiguration", + "Validation/TrackerDigis", + "Validation/TrackerHits", + "Validation/TrackerRecHits", + "Validation/TrackingMCTruth", + ], + "externals": [ + "", + ], + "fastsim": [ + "CommonTools/BaseParticlePropagator", + "FastSimDataFormats/External", + "FastSimDataFormats/L1GlobalMuonTrigger", + "FastSimDataFormats/NuclearInteractions", + "FastSimDataFormats/CTPPSFastSim", + "FastSimulation/BaseParticlePropagator", + "FastSimDataFormats/PileUpEvents", + "FastSimulation/CTPPSFastGeometry", + "FastSimulation/CTPPSFastSim", + "FastSimulation/CTPPSFastTrackingProducer", + "FastSimulation/CTPPSRecHitProducer", + "FastSimulation/CTPPSSimHitProducer", + "FastSimulation/CaloGeometryTools", + "FastSimulation/CaloHitMakers", + "FastSimulation/CaloRecHitsProducer", + "FastSimulation/CalorimeterProperties", + "FastSimulation/Calorimetry", + "FastSimulation/Configuration", + "FastSimulation/EgammaElectronAlgos", + "FastSimulation/Event", + "FastSimulation/EventProducer", + "FastSimulation/ForwardDetectors", + "FastSimulation/HighLevelTrigger", + "FastSimulation/L1CaloTriggerProducer", + "FastSimulation/MaterialEffects", + "FastSimulation/MuonSimHitProducer", + "FastSimulation/Muons", + "FastSimulation/Particle", + "FastSimulation/ParticleDecay", + "FastSimulation/ParticleFlow", + "FastSimulation/ParticlePropagator", + "FastSimulation/PileUpProducer", + "FastSimulation/ShowerDevelopment", + "FastSimulation/SimplifiedGeometryPropagator", + "FastSimulation/TrackerSetup", + "FastSimulation/Tracking", + "FastSimulation/TrackingRecHitProducer", + "FastSimulation/TrajectoryManager", + "FastSimulation/Utilities", + "FastSimulation/Validation", + ], + "generators": [ + "Configuration/Generator", + "DataFormats/HepMCCandidate", + "GeneratorInterface/AMPTInterface", + "GeneratorInterface/AlpgenInterface", + "GeneratorInterface/BeamHaloGenerator", + "GeneratorInterface/CascadeInterface", + "GeneratorInterface/Configuration", + "GeneratorInterface/Core", + "GeneratorInterface/CosmicMuonGenerator", + "GeneratorInterface/EvtGenInterface", + "GeneratorInterface/ExhumeInterface", + "GeneratorInterface/ExternalDecays", + "GeneratorInterface/GenExtensions", + "GeneratorInterface/GenFilters", + "GeneratorInterface/Herwig6Interface", + "GeneratorInterface/Herwig7Interface", + "GeneratorInterface/HiGenCommon", + "GeneratorInterface/HijingInterface", + "GeneratorInterface/Hydjet2Interface", + "GeneratorInterface/HydjetInterface", + "GeneratorInterface/LHEInterface", + "GeneratorInterface/MCatNLOInterface", + "GeneratorInterface/PartonShowerVeto", + "GeneratorInterface/PhotosInterface", + "GeneratorInterface/PomwigInterface", + "GeneratorInterface/PyquenInterface", + "GeneratorInterface/Pythia6Interface", + "GeneratorInterface/Pythia8Interface", + "GeneratorInterface/ReggeGribovPartonMCInterface", + "GeneratorInterface/RivetInterface", + "GeneratorInterface/SherpaInterface", + "GeneratorInterface/TauolaInterface", + "GeneratorInterface/ThePEGInterface", + "IOMC/ParticleGuns", + "SimDataFormats/GeneratorProducts", + "SimDataFormats/HTXS", + "Validation/EventGenerator", + ], + "geometry": [ + "Configuration/Geometry", + "DataFormats/CTPPSAlignment", + "DetectorDescription/Algorithm", + "DetectorDescription/Base", + "DetectorDescription/Core", + "DetectorDescription/DDCMS", + "DetectorDescription/DDVecCMS", + "DetectorDescription/ExprAlgo", + "DetectorDescription/OfflineDBLoader", + "DetectorDescription/OnlineDBLoader", + "DetectorDescription/Parser", + "DetectorDescription/RecoGeometry", + "DetectorDescription/RegressionTest", + "DetectorDescription/Schema", + "Geometry/CMSCommonData", + "Geometry/CSCGeometry", + "Geometry/CSCGeometryBuilder", + "Geometry/CaloEventSetup", + "Geometry/CaloGeometry", + "Geometry/CaloTopology", + "Geometry/CommonDetUnit", + "Geometry/CommonTopologies", + "Geometry/DTGeometry", + "Geometry/DTGeometryBuilder", + "Geometry/EcalAlgo", + "Geometry/EcalCommonData", + "Geometry/EcalMapping", + "Geometry/EcalSimData", + "Geometry/EcalTestBeam", + "Geometry/FP420CommonData", + "Geometry/FP420SimData", + "Geometry/ForwardCommonData", + "Geometry/ForwardGeometry", + "Geometry/ForwardSimData", + "Geometry/GEMGeometry", + "Geometry/GEMGeometryBuilder", + "Geometry/GlobalTrackingGeometryBuilder", + "Geometry/HGCalCommonData", + "Geometry/HGCalGeometry", + "Geometry/HGCalSimData", + "Geometry/HGCalTBCommonData", + "Geometry/HcalAlgo", + "Geometry/HcalCommonData", + "Geometry/HcalEventSetup", + "Geometry/HcalSimData", + "Geometry/HcalTestBeamData", + "Geometry/HcalTowerAlgo", + "Geometry/MTCCTrackerCommonData", + "Geometry/MTDCommonData", + "Geometry/MTDGeometryBuilder", + "Geometry/MTDNumberingBuilder", + "Geometry/MTDSimData", + "Geometry/MuonCommonData", + "Geometry/MuonNumbering", + "Geometry/MuonSimData", + "Geometry/RPCGeometry", + "Geometry/RPCGeometryBuilder", + "Geometry/Records", + "Geometry/TrackerCommonData", + "Geometry/TrackerGeometryBuilder", + "Geometry/TrackerNumberingBuilder", + "Geometry/TrackerRecoData", + "Geometry/TrackerSimData", + "Geometry/TrackingGeometryAligner", + "Geometry/TwentyFivePercentTrackerCommonData", + "Geometry/VeryForwardData", + "Geometry/VeryForwardGeometry", + "Geometry/VeryForwardGeometryBuilder", + "Geometry/VeryForwardProtonTransport", + "Geometry/VeryForwardRPTopology", + "GeometryReaders/XMLIdealGeometryESSource", + "SLHCUpgradeSimulations/Geometry", + "Validation/CheckOverlap", + "Validation/Geometry", + "Validation/MuonRPCGeometry", + "Validation/Shashlik", + ], + "heterogeneous": [ + "CUDADataFormats/BeamSpot", + "CUDADataFormats/CaloCommon", + "CUDADataFormats/Common", + "CUDADataFormats/EcalDigi", + "CUDADataFormats/EcalRecHitSoA", + "CUDADataFormats/HGCal", + "CUDADataFormats/HcalDigi", + "CUDADataFormats/HcalRecHitSoA", + "CUDADataFormats/PortableTestObjects", + "CUDADataFormats/SiPixelCluster", + "CUDADataFormats/SiPixelDigi", + "CUDADataFormats/SiStripCluster", + "CUDADataFormats/StdDictionaries", + "CUDADataFormats/Track", + "CUDADataFormats/TrackingRecHit", + "CUDADataFormats/Vertex", + "DataFormats/Portable", + "DataFormats/PortableTestObjects", + "DataFormats/SoATemplate", + "HeterogeneousCore/AlpakaCore", + "HeterogeneousCore/AlpakaInterface", + "HeterogeneousCore/AlpakaServices", + "HeterogeneousCore/AlpakaTest", + "HeterogeneousCore/CUDACore", + "HeterogeneousCore/CUDAServices", + "HeterogeneousCore/CUDATest", + "HeterogeneousCore/CUDAUtilities", + "HeterogeneousCore/Common", + "HeterogeneousCore/MPICore", + "HeterogeneousCore/MPIServices", + "HeterogeneousCore/Producer", + "HeterogeneousCore/Product", + "HeterogeneousCore/ROCmCore", + "HeterogeneousCore/ROCmServices", + "HeterogeneousCore/ROCmUtilities", + "HeterogeneousCore/SonicCore", + "HeterogeneousCore/SonicTriton", + "HeterogeneousTest/AlpakaTest", + "HeterogeneousTest/CUDADevice", + "HeterogeneousTest/CUDAKernel", + "HeterogeneousTest/CUDAOpaque", + "HeterogeneousTest/CUDATest", + "HeterogeneousTest/CUDAWrapper", + "HeterogeneousTest/ROCmDevice", + "HeterogeneousTest/ROCmKernel", + "HeterogeneousTest/ROCmOpaque", + "HeterogeneousTest/ROCmWrapper", + ], + "hlt": [ + "CommonTools/TriggerUtils", + "CondCore/HLTPlugins", + "CondFormats/HLTObjects", + "CondTools/HLT", + "Configuration/HLT", + "DQM/HLTEvF", + "DataFormats/HLTReco", + "HLTrigger/Configuration", + "HLTrigger/Egamma", + "HLTrigger/HLTanalyzers", + "HLTrigger/HLTcore", + "HLTrigger/HLTexample", + "HLTrigger/HLTfilters", + "HLTrigger/JSONMonitoring", + "HLTrigger/JetMET", + "HLTrigger/Muon", + "HLTrigger/Timer", + "HLTrigger/Tools", + "HLTrigger/btau", + "HLTrigger/special", + "RecoEgamma/EgammaHLTAlgos", + "RecoEgamma/EgammaHLTProducers", + "RecoMuon/L2MuonIsolationProducer", + "RecoMuon/L2MuonProducer", + "RecoMuon/L2MuonSeedGenerator", + "RecoMuon/L3MuonIsolationProducer", + "RecoMuon/L3MuonProducer", + "RecoMuon/L3TrackFinder", + "RecoTauTag/HLTProducers", + ], + "l1": [ + "CalibCalorimetry/CaloTPG", + "CalibCalorimetry/EBPhase2TPGTools", + "CalibCalorimetry/EcalTPGTools", + "CalibCalorimetry/HcalTPGAlgos", + "CalibCalorimetry/HcalTPGEventSetup", + "CalibCalorimetry/HcalTPGIO", + "CommonTools/TriggerUtils", + "CondCore/L1TPlugins", + "CondFormats/L1TObjects", + "CondTools/L1Trigger", + "DQMOffline/L1Trigger", + "DataFormats/L1CSCTrackFinder", + "DataFormats/L1CaloTrigger", + "DataFormats/L1DTTrackFinder", + "DataFormats/L1GlobalCaloTrigger", + "DataFormats/L1GlobalMuonTrigger", + "DataFormats/L1GlobalTrigger", + "DataFormats/L1TCalorimeter", + "DataFormats/L1TCalorimeterPhase2", + "DataFormats/L1TCorrelator", + "DataFormats/L1TGlobal", + "DataFormats/L1THGCal", + "DataFormats/L1TMuon", + "DataFormats/L1TMuonPhase2", + "DataFormats/L1TotemRP", + "DataFormats/L1TrackTrigger", + "DataFormats/L1Trigger", + "DataFormats/L1TParticleFlow", + "DataFormats/Phase2L1Taus", + "DataFormats/LTCDigi", + "DataFormats/Scalers", + "EventFilter/CSCTFRawToDigi", + "EventFilter/DTTFRawToDigi", + "EventFilter/GctRawToDigi", + "EventFilter/L1GlobalTriggerRawToDigi", + "EventFilter/L1TRawToDigi", + "EventFilter/L1TXRawToDigi", + "EventFilter/RctRawToDigi", + "EventFilter/TwinMuxRawToDigi", + "L1Trigger/CSCCommonTrigger", + "L1Trigger/CSCTrackFinder", + "L1Trigger/CSCTriggerPrimitives", + "L1Trigger/Configuration", + "L1Trigger/DTBti", + "L1Trigger/DTPhase2Trigger", + "L1Trigger/DTSectorCollector", + "L1Trigger/DTTrackFinder", + "L1Trigger/DTTraco", + "L1Trigger/DTTrigger", + "L1Trigger/DTTriggerPhase2", + "L1Trigger/DTTriggerServerPhi", + "L1Trigger/DTTriggerServerTheta", + "L1Trigger/DTUtilities", + "L1Trigger/DemonstratorTools", + "L1Trigger/GlobalCaloTrigger", + "L1Trigger/GlobalMuonTrigger", + "L1Trigger/GlobalTrigger", + "L1Trigger/GlobalTriggerAnalyzer", + "L1Trigger/HardwareValidation", + "L1Trigger/L1CaloTrigger", + "L1Trigger/L1ExtraFromDigis", + "L1Trigger/L1GctAnalyzer", + "L1Trigger/L1TCaloLayer1", + "L1Trigger/L1TCalorimeter", + "L1Trigger/L1TCommon", + "L1Trigger/L1TGlobal", + "L1Trigger/L1TGEM", + "L1Trigger/L1THGCal", + "L1Trigger/L1THGCalUtilities", + "L1Trigger/L1TTrackMatch", + "L1Trigger/L1TMuon", + "L1Trigger/L1TMuonBarrel", + "L1Trigger/L1TMuonCPPF", + "L1Trigger/L1TMuonEndCap", + "L1Trigger/L1TMuonOverlap", + "L1Trigger/L1TMuonOverlapPhase1", + "L1Trigger/L1TNtuples", + "L1Trigger/L1TTwinMux", + "L1Trigger/L1TZDC", + "L1Trigger/ME0Trigger", + "L1Trigger/Phase2L1GMT", + "L1Trigger/Phase2L1GT", + "L1Trigger/Phase2L1ParticleFlow", + "L1Trigger/Phase2L1Taus", + "L1Trigger/RPCTechnicalTrigger", + "L1Trigger/RPCTrigger", + "L1Trigger/RPCTriggerPrimitives", + "L1Trigger/RegionalCaloTrigger", + "L1Trigger/Skimmer", + "L1Trigger/TextToDigi", + "L1Trigger/TrackerTFP", + "L1Trigger/TrackFindingAM", + "L1Trigger/TrackFindingTMTT", + "L1Trigger/TrackFindingTracklet", + "L1Trigger/TrackTrigger", + "L1Trigger/TrackerDTC", + "L1Trigger/VertexFinder", + "L1TriggerConfig/CSCTFConfigProducers", + "L1TriggerConfig/DTTPGConfig", + "L1TriggerConfig/DTTPGConfigProducers", + "L1TriggerConfig/DTTrackFinder", + "L1TriggerConfig/GMTConfigProducers", + "L1TriggerConfig/GctConfigProducers", + "L1TriggerConfig/L1CSCTPConfigProducers", + "L1TriggerConfig/L1GeometryProducers", + "L1TriggerConfig/L1GtConfigProducers", + "L1TriggerConfig/L1ScalesProducers", + "L1TriggerConfig/L1TConfigProducers", + "L1TriggerConfig/L1TUtmTriggerMenuProducers", + "L1TriggerConfig/RCTConfigProducers", + "L1TriggerConfig/RPCTriggerConfig", + "L1TriggerConfig/Utilities", + "L1TriggerConfig/XmlConfigTools", + "L1TriggerOffline/Configuration", + "L1TriggerOffline/L1Analyzer", + "Phase2L1Trigger/CalibratedDigis", + "SLHCUpgradeSimulations/L1CaloTrigger", + "SimCalorimetry/EcalEBTrigPrimAlgos", + "SimCalorimetry/EcalEBTrigPrimProducers", + "SimCalorimetry/EcalTrigPrimAlgos", + "SimCalorimetry/EcalTrigPrimProducers", + "SimCalorimetry/HcalTrigPrimAlgos", + "SimCalorimetry/HcalTrigPrimProducers", + "SimTracker/TrackTriggerAssociation", + "Validation/EcalTriggerPrimitives", + ], + "operations": [ + "Configuration/Applications", + "Configuration/DataProcessing", + "Configuration/Eras", + "Configuration/EventContent", + "Configuration/GlobalRuns", + "Configuration/ProcessModifiers", + "Configuration/StandardSequences", + ], + "pdmv": [ + "Configuration/PyReleaseValidation", + "Configuration/Skimming", + "DPGAnalysis/Skims", + ], + "reconstruction": [ + "CUDADataFormats/BeamSpot", + "CUDADataFormats/CaloCommon", + "CUDADataFormats/EcalDigi", + "CUDADataFormats/EcalRecHitSoA", + "CUDADataFormats/HcalDigi", + "CUDADataFormats/HcalRecHitSoA", + "CUDADataFormats/HGCal", + "CUDADataFormats/SiPixelCluster", + "CUDADataFormats/SiPixelDigi", + "CUDADataFormats/SiStripCluster", + "CUDADataFormats/Track", + "CUDADataFormats/TrackingRecHit", + "CUDADataFormats/Vertex", + "CommonTools/BaseParticlePropagator", + "CommonTools/CandAlgos", + "CommonTools/CandUtils", + "CommonTools/Clustering1D", + "CommonTools/Egamma", + "CommonTools/MVAUtils", + "CommonTools/ParticleFlow", + "CommonTools/PileupAlgos", + "CommonTools/RecoAlgos", + "CommonTools/Statistics", + "CommonTools/TrackerMap", + "CommonTools/UtilAlgos", + "CommonTools/Utils", + "CondFormats/SiPixelTransient", + "Configuration/EcalTB", + "Configuration/JetMET", + "DataFormats/BTauReco", + "DataFormats/BeamSpot", + "DataFormats/CSCRecHit", + "DataFormats/CTPPSDigi", + "DataFormats/CTPPSReco", + "DataFormats/CaloRecHit", + "DataFormats/CaloTowers", + "DataFormats/Candidate", + "DataFormats/CastorReco", + "DataFormats/DTRecHit", + "DataFormats/EcalRecHit", + "DataFormats/EgammaCandidates", + "DataFormats/EgammaReco", + "DataFormats/EgammaTrackReco", + "DataFormats/FP420Cluster", + "DataFormats/FTLRecHit", + "DataFormats/GEMRecHit", + "DataFormats/GsfTrackReco", + "DataFormats/HGCRecHit", + "DataFormats/HGCalReco", + "DataFormats/HcalIsolatedTrack", + "DataFormats/HcalRecHit", + "DataFormats/HeavyIonEvent", + "DataFormats/JetReco", + "DataFormats/Luminosity", + "DataFormats/METObjects", + "DataFormats/METReco", + "DataFormats/Math", + "DataFormats/MuonData", + "DataFormats/MuonReco", + "DataFormats/MuonSeed", + "DataFormats/OnlineMetaData", + "DataFormats/ParticleFlowCandidate", + "DataFormats/ParticleFlowReco", + "DataFormats/PatCandidates", + "DataFormats/Phase2ITPixelCluster", + "DataFormats/Phase2TrackerCluster", + "DataFormats/Phase2TrackerDigi", + "DataFormats/Phase2TrackerRecHit", + "DataFormats/PixelMatchTrackReco", + "DataFormats/ProtonReco", + "DataFormats/RPCRecHit", + "DataFormats/RecoCandidate", + "DataFormats/Scalers", + "DataFormats/SiPixelCluster", + "DataFormats/SiStripCluster", + "DataFormats/SiStripCommon", + "DataFormats/TCDS", + "DataFormats/TauReco", + "DataFormats/TotemDigi", + "DataFormats/TotemReco", + "DataFormats/TrackCandidate", + "DataFormats/TrackReco", + "DataFormats/TrackerCommon", + "DataFormats/TrackerRecHit2D", + "DataFormats/TrackingRecHit", + "DataFormats/TrackingSeed", + "DataFormats/TrajectorySeed", + "DataFormats/TrajectoryState", + "RecoLocalTracker/SiPhase2VectorHitBuilder", + "DataFormats/V0Candidate", + "DataFormats/VZero", + "DataFormats/VertexReco", + "EventFilter/CSCRawToDigi", + "EventFilter/CTPPSRawToDigi", + "EventFilter/CastorRawToDigi", + "EventFilter/DTRawToDigi", + "EventFilter/ESRawToDigi", + "EventFilter/EcalRawToDigi", + "EventFilter/EcalTBRawToDigi", + "EventFilter/GEMRawToDigi", + "EventFilter/HGCalRawToDigi", + "EventFilter/HcalRawToDigi", + "EventFilter/LTCRawToDigi", + "EventFilter/OnlineMetaDataRawToDigi", + "EventFilter/Phase2TrackerRawToDigi", + "EventFilter/RPCRawToDigi", + "EventFilter/ScalersRawToDigi", + "EventFilter/SiPixelRawToDigi", + "EventFilter/SiStripRawToDigi", + "EventFilter/TotemRawToDigi", + "JetMETCorrections/Configuration", + "JetMETCorrections/JetCorrector", + "JetMETCorrections/Modules", + "JetMETCorrections/Objects", + "JetMETCorrections/Type1MET", + "MagneticField/Engine", + "MagneticField/GeomBuilder", + "MagneticField/Interpolation", + "MagneticField/Layers", + "MagneticField/ParametrizedEngine", + "MagneticField/Records", + "MagneticField/UniformEngine", + "MagneticField/VolumeBasedEngine", + "MagneticField/VolumeGeometry", + "PhysicsTools/MXNet", + "PhysicsTools/IsolationAlgos", + "PhysicsTools/ONNXRuntime", + "PhysicsTools/PatAlgos", + "PhysicsTools/PatUtils", + "PhysicsTools/SelectorUtils", + "PhysicsTools/TensorFlow", + "RecoBTag/BTagTools", + "RecoBTag/CTagging", + "RecoBTag/Combined", + "RecoBTag/Configuration", + "RecoBTag/DeepFlavour", + "RecoBTag/FeatureTools", + "RecoBTag/ImpactParameter", + "RecoBTag/ImpactParameterLearning", + "RecoBTag/MXNet", + "RecoBTag/ONNXRuntime", + "RecoBTag/PerformanceDB", + "RecoBTag/PixelCluster", + "RecoBTag/Records", + "RecoBTag/SecondaryVertex", + "RecoBTag/Skimming", + "RecoBTag/SoftLepton", + "RecoBTag/TensorFlow", + "RecoBTag/TrackProbability", + "RecoBTag/XMLCalibration", + "RecoBTau/Configuration", + "RecoBTau/JetCrystalsAssociator", + "RecoBTau/JetTagComputer", + "RecoBTau/JetTagMVALearning", + "RecoCTPPS/Configuration", + "RecoCTPPS/PixelLocal", + "RecoCTPPS/ProtonReconstruction", + "RecoCTPPS/TotemRPLocal", + "RecoCaloTools/EcalChannelKiller", + "RecoCaloTools/MetaCollections", + "RecoCaloTools/Navigation", + "RecoCaloTools/Selectors", + "RecoEcal/Configuration", + "RecoEcal/EgammaClusterAlgos", + "RecoEcal/EgammaClusterProducers", + "RecoEcal/EgammaCoreTools", + "RecoEgamma/Configuration", + "RecoEgamma/EgammaElectronAlgos", + "RecoEgamma/EgammaElectronProducers", + "RecoEgamma/EgammaHFProducers", + "RecoEgamma/EgammaIsolationAlgos", + "RecoEgamma/EgammaMCTools", + "RecoEgamma/EgammaPhotonAlgos", + "RecoEgamma/EgammaPhotonProducers", + "RecoEgamma/EgammaTools", + "RecoEgamma/ElectronIdentification", + "RecoEgamma/Examples", + "RecoEgamma/PhotonIdentification", + "RecoHGCal/Configuration", + "RecoHGCal/TICL", + "RecoHI/Configuration", + "RecoHI/HiCentralityAlgos", + "RecoHI/HiEgammaAlgos", + "RecoHI/HiEvtPlaneAlgos", + "RecoHI/HiJetAlgos", + "RecoHI/HiMuonAlgos", + "RecoHI/HiTracking", + "RecoJets/Configuration", + "RecoJets/FFTJetAlgorithms", + "RecoJets/FFTJetProducers", + "RecoJets/JetAlgorithms", + "RecoJets/JetAnalyzers", + "RecoJets/JetAssociationAlgorithms", + "RecoJets/JetAssociationProducers", + "RecoJets/JetPlusTracks", + "RecoJets/JetProducers", + "RecoLocalCalo/CaloRecCandCreator", + "RecoLocalCalo/CaloTowersCreator", + "RecoLocalCalo/Castor", + "RecoLocalCalo/CastorReco", + "RecoLocalCalo/Configuration", + "RecoLocalCalo/EcalDeadChannelRecoveryAlgos", + "RecoLocalCalo/EcalDeadChannelRecoveryProducers", + "RecoLocalCalo/EcalRecAlgos", + "RecoLocalCalo/EcalRecProducers", + "RecoLocalCalo/HGCalRecAlgos", + "RecoLocalCalo/HGCalRecProducers", + "RecoLocalCalo/HcalLaserReco", + "RecoLocalCalo/HcalRecAlgos", + "RecoLocalCalo/HcalRecProducers", + "RecoLocalFastTime/Configuration", + "RecoLocalFastTime/FTLClusterizer", + "RecoLocalFastTime/FTLCommonAlgos", + "RecoLocalFastTime/FTLRecProducers", + "RecoLocalFastTime/Records", + "RecoLocalMuon/CSCEfficiency", + "RecoLocalMuon/CSCRecHitD", + "RecoLocalMuon/CSCSegment", + "RecoLocalMuon/CSCValidation", + "RecoLocalMuon/Configuration", + "RecoLocalMuon/DTRecHit", + "RecoLocalMuon/DTSegment", + "RecoLocalMuon/GEMCSCSegment", + "RecoLocalMuon/GEMRecHit", + "RecoLocalMuon/GEMSegment", + "RecoLocalMuon/RPCRecHit", + "RecoLocalTracker/ClusterParameterEstimator", + "RecoLocalTracker/Configuration", + "RecoLocalTracker/Phase2ITPixelClusterizer", + "RecoLocalTracker/Phase2TrackerRecHits", + "RecoLocalTracker/Records", + "RecoLocalTracker/SiPhase2Clusterizer", + "RecoLocalTracker/SiPixelClusterizer", + "RecoLocalTracker/SiPixelDigiReProducers", + "RecoLocalTracker/SiPixelRecHits", + "RecoLocalTracker/SiStripClusterizer", + "RecoLocalTracker/SiStripRecHitConverter", + "RecoLocalTracker/SiStripZeroSuppression", + "RecoLocalTracker/SubCollectionProducers", + "RecoLuminosity/LumiProducer", + "RecoLuminosity/TCPReceiver", + "RecoMET/Configuration", + "RecoMET/METAlgorithms", + "RecoMET/METFilters", + "RecoMET/METPUSubtraction", + "RecoMET/METProducers", + "RecoMTD/Configuration", + "RecoMTD/DetLayers", + "RecoMTD/MeasurementDet", + "RecoMTD/Navigation", + "RecoMTD/Records", + "RecoMTD/TimingIDTools", + "RecoMTD/TrackExtender", + "RecoMTD/TransientTrackingRecHit", + "RecoMuon/Configuration", + "RecoMuon/CosmicMuonProducer", + "RecoMuon/DetLayers", + "RecoMuon/GlobalMuonProducer", + "RecoMuon/GlobalTrackFinder", + "RecoMuon/GlobalTrackingTools", + "RecoMuon/L2MuonIsolationProducer", + "RecoMuon/L2MuonProducer", + "RecoMuon/L2MuonSeedGenerator", + "RecoMuon/L3MuonIsolationProducer", + "RecoMuon/L3MuonProducer", + "RecoMuon/L3TrackFinder", + "RecoMuon/MeasurementDet", + "RecoMuon/MuonIdentification", + "RecoMuon/MuonIsolation", + "RecoMuon/MuonIsolationProducers", + "RecoMuon/MuonRechitClusterProducer", + "RecoMuon/MuonSeedGenerator", + "RecoMuon/Navigation", + "RecoMuon/Records", + "RecoMuon/StandAloneMuonProducer", + "RecoMuon/StandAloneTrackFinder", + "RecoMuon/TrackerSeedGenerator", + "RecoMuon/TrackingTools", + "RecoMuon/TransientTrackingRecHit", + "RecoParticleFlow/Benchmark", + "RecoParticleFlow/Configuration", + "RecoParticleFlow/PFBlockProducer", + "RecoParticleFlow/PFClusterProducer", + "RecoParticleFlow/PFClusterShapeProducer", + "RecoParticleFlow/PFClusterTools", + "RecoParticleFlow/PFProducer", + "RecoParticleFlow/PFRecHitProducer", + "RecoParticleFlow/PFRootEvent", + "RecoParticleFlow/PFSimProducer", + "RecoParticleFlow/PFTracking", + "RecoPixelVZero/PixelVZeroFinding", + "RecoPixelVertexing/Configuration", + "RecoPixelVertexing/PixelLowPtUtilities", + "RecoPixelVertexing/PixelTrackFitting", + "RecoPixelVertexing/PixelTriplets", + "RecoPixelVertexing/PixelVertexFinding", + "RecoPPS/Configuration", + "RecoPPS/Local", + "RecoPPS/ProtonReconstruction", + "RecoRomanPot/Configuration", + "RecoRomanPot/RecoFP420", + "RecoTBCalo/EcalSimpleTBAnalysis", + "RecoTBCalo/EcalTBAnalysisCoreTools", + "RecoTBCalo/EcalTBHodoscopeReconstructor", + "RecoTBCalo/EcalTBRecProducers", + "RecoTBCalo/EcalTBTDCReconstructor", + "RecoTBCalo/HcalPlotter", + "RecoTBCalo/HcalTBObjectUnpacker", + "RecoTBCalo/HcalTBTools", + "RecoTBCalo/ZDCTBAnalysis", + "RecoTauTag/Configuration", + "RecoTauTag/ImpactParameter", + "RecoTauTag/RecoTau", + "RecoTauTag/TauTagTools", + "RecoTauTag/TrainingFiles", + "RecoTracker/CkfPattern", + "RecoTracker/Configuration", + "RecoTracker/ConversionSeedGenerators", + "RecoTracker/DeDx", + "RecoTracker/DebugTools", + "RecoTracker/DisplacedRegionalTracking", + "RecoTracker/FinalTrackSelectors", + "RecoTracker/GeometryESProducer", + "RecoTracker/IterativeTracking", + "RecoTracker/MeasurementDet", + "RecoTracker/MkFit", + "RecoTracker/MkFitCMS", + "RecoTracker/MkFitCore", + "RecoTracker/NuclearSeedGenerator", + "RecoTracker/PixelLowPtUtilities", + "RecoTracker/PixelSeeding", + "RecoTracker/PixelTrackFitting", + "RecoTracker/PixelVertexFinding", + "RecoTracker/Record", + "RecoTracker/SeedingLayerSetsHits", + "RecoTracker/SiTrackerMRHTools", + "RecoTracker/SingleTrackPattern", + "RecoTracker/SpecialSeedGenerators", + "RecoTracker/TkDetLayers", + "RecoTracker/TkHitPairs", + "RecoTracker/TkMSParametrization", + "RecoTracker/TkNavigation", + "RecoTracker/TkSeedGenerator", + "RecoTracker/TkSeedingLayers", + "RecoTracker/TkTrackingRegions", + "RecoTracker/TrackProducer", + "RecoTracker/TransientTrackingRecHit", + "RecoVZero/VZeroFinding", + "RecoVertex/AdaptiveVertexFinder", + "RecoVertex/AdaptiveVertexFit", + "RecoVertex/BeamSpotProducer", + "RecoVertex/ConfigurableVertexReco", + "RecoVertex/Configuration", + "RecoVertex/GaussianSumVertexFit", + "RecoVertex/GhostTrackFitter", + "RecoVertex/KalmanVertexFit", + "RecoVertex/KinematicFit", + "RecoVertex/KinematicFitPrimitives", + "RecoVertex/LinearizationPointFinders", + "RecoVertex/MultiVertexFit", + "RecoVertex/NuclearInteractionProducer", + "RecoVertex/PrimaryVertexProducer", + "RecoVertex/TertiaryTracksVertexFinder", + "RecoVertex/TrimmedKalmanVertexFinder", + "RecoVertex/TrimmedVertexFit", + "RecoVertex/V0Producer", + "RecoVertex/VertexPrimitives", + "RecoVertex/VertexTools", + "TrackPropagation/NavGeometry", + "TrackPropagation/NavPropagator", + "TrackPropagation/RungeKutta", + "TrackPropagation/SteppingHelixPropagator", + "TrackingTools/AnalyticalJacobians", + "TrackingTools/Configuration", + "TrackingTools/DetLayers", + "TrackingTools/GeomPropagators", + "TrackingTools/GsfTools", + "TrackingTools/GsfTracking", + "TrackingTools/IPTools", + "TrackingTools/KalmanUpdators", + "TrackingTools/MaterialEffects", + "TrackingTools/MeasurementDet", + "TrackingTools/PatternTools", + "TrackingTools/Producers", + "TrackingTools/RecoGeometry", + "TrackingTools/Records", + "TrackingTools/TrackAssociator", + "TrackingTools/TrackFitters", + "TrackingTools/TrackRefitter", + "TrackingTools/TrajectoryCleaning", + "TrackingTools/TrajectoryFiltering", + "TrackingTools/TrajectoryParametrization", + "TrackingTools/TrajectoryState", + "TrackingTools/TransientTrack", + "TrackingTools/TransientTrackingRecHit", + ], + "simulation": [ + "BigProducts/Simulation", + "DataFormats/CSCDigi", + "DataFormats/CTPPSDetId", + "DataFormats/CTPPSDigi", + "DataFormats/DTDigi", + "DataFormats/DetId", + "DataFormats/EcalDetId", + "DataFormats/EcalDigi", + "DataFormats/EcalRawData", + "DataFormats/FP420Digi", + "DataFormats/FTLDigi", + "DataFormats/ForwardDetId", + "DataFormats/GEMDigi", + "DataFormats/GeometryCommonDetAlgo", + "DataFormats/GeometrySurface", + "DataFormats/GeometryVector", + "DataFormats/HGCDigi", + "DataFormats/HGCalDigi", + "DataFormats/HcalDetId", + "DataFormats/HcalDigi", + "DataFormats/JetMatching", + "DataFormats/MuonDetId", + "DataFormats/RPCDigi", + "DataFormats/SiPixelDetId", + "DataFormats/SiPixelDigi", + "DataFormats/SiPixelRawData", + "DataFormats/SiStripDetId", + "DataFormats/SiStripDigi", + "DataFormats/TotemRPDetId", + "IOMC/EventVertexGenerators", + "Mixing/Base", + "SLHCUpgradeSimulations/Configuration", + "SLHCUpgradeSimulations/L1CaloTrigger", + "SimCalorimetry/CaloSimAlgos", + "SimCalorimetry/CastorSim", + "SimCalorimetry/CastorTechTrigProducer", + "SimCalorimetry/Configuration", + "SimCalorimetry/EcalElectronicsEmulation", + "SimCalorimetry/EcalSelectiveReadoutAlgos", + "SimCalorimetry/EcalSelectiveReadoutProducers", + "SimCalorimetry/EcalSimAlgos", + "SimCalorimetry/EcalSimProducers", + "SimCalorimetry/EcalTestBeam", + "SimCalorimetry/EcalTestBeamAlgos", + "SimCalorimetry/EcalZeroSuppressionAlgos", + "SimCalorimetry/EcalZeroSuppressionProducers", + "SimCalorimetry/HGCalAssociatorProducers", + "SimCalorimetry/HGCalSimProducers", + "SimCalorimetry/HGCalSimAlgos", + "SimCalorimetry/HcalSimAlgos", + "SimCalorimetry/HcalSimProducers", + "SimCalorimetry/HcalTestBeam", + "SimCalorimetry/HcalZeroSuppressionProducers", + "SimDataFormats/Associations", + "SimDataFormats/CaloAnalysis", + "SimDataFormats/CaloHit", + "SimDataFormats/CaloTest", + "SimDataFormats/CrossingFrame", + "SimDataFormats/DigiSimLinks", + "SimDataFormats/EcalTestBeam", + "SimDataFormats/EncodedEventId", + "SimDataFormats/Forward", + "SimDataFormats/GEMDigiSimLink", + "SimDataFormats/HcalTestBeam", + "SimDataFormats/HiGenData", + "SimDataFormats/JetMatching", + "SimDataFormats/PileupSummaryInfo", + "SimDataFormats/RPCDigiSimLink", + "SimDataFormats/RandomEngine", + "SimDataFormats/SimHitMaker", + "SimDataFormats/Track", + "SimDataFormats/TrackerDigiSimLink", + "SimDataFormats/TrackingAnalysis", + "SimDataFormats/TrackingHit", + "SimDataFormats/ValidationFormats", + "SimDataFormats/Vertex", + "SimFastTiming/Configuration", + "SimFastTiming/FastTimingCommon", + "SimG4CMS/Calo", + "SimG4CMS/CherenkovAnalysis", + "SimG4CMS/EcalTestBeam", + "SimG4CMS/FP420", + "SimG4CMS/Forward", + "SimG4CMS/HGCalTestBeam", + "SimG4CMS/HcalTestBeam", + "SimG4CMS/Muon", + "SimG4CMS/PPS", + "SimG4CMS/ShowerLibraryProducer", + "SimG4CMS/Tracker", + "SimG4Core/Application", + "SimG4Core/CheckSecondary", + "SimG4Core/Configuration", + "SimG4Core/CountProcesses", + "SimG4Core/CustomPhysics", + "SimG4Core/DD4hepGeometry", + "SimG4Core/GFlash", + "SimG4Core/Generators", + "SimG4Core/Geometry", + "SimG4Core/GeometryProducer", + "SimG4Core/HelpfulWatchers", + "SimG4Core/KillSecondaries", + "SimG4Core/MagneticField", + "SimG4Core/Notification", + "SimG4Core/Physics", + "SimG4Core/PhysicsLists", + "SimG4Core/PrintGeomInfo", + "SimG4Core/PrintTrackNumber", + "SimG4Core/SaveSimTrackAction", + "SimG4Core/SensitiveDetector", + "SimG4Core/TrackingVerbose", + "SimG4Core/Watcher", + "SimGeneral/CaloAnalysis", + "SimGeneral/Configuration", + "SimGeneral/DataMixingModule", + "SimGeneral/Debugging", + "SimGeneral/GFlash", + "SimGeneral/HepPDTESSource", + "SimGeneral/HepPDTRecord", + "SimGeneral/MixingModule", + "SimGeneral/NoiseGenerators", + "SimGeneral/PileupInformation", + "SimGeneral/PreMixingModule", + "SimGeneral/TrackingAnalysis", + "SimMuon/CSCDigitizer", + "SimMuon/Configuration", + "SimMuon/DTDigitizer", + "SimMuon/GEMDigitizer", + "SimMuon/MCTruth", + "SimMuon/Neutron", + "SimMuon/RPCDigitizer", + "SimPPS/Configuration", + "SimPPS/DirectSimProducer", + "SimPPS/PPSPixelDigiProducer", + "SimPPS/PPSSimTrackProducer", + "SimPPS/RPDigiProducer", + "SimRomanPot/Configuration", + "SimRomanPot/SimFP420", + "SimTracker/Common", + "SimTracker/Configuration", + "SimTracker/Records", + "SimTracker/SiPhase2Digitizer", + "SimTracker/SiPixelDigitizer", + "SimTracker/SiStripDigitizer", + "SimTracker/TrackAssociation", + "SimTracker/TrackAssociatorESProducer", + "SimTracker/TrackAssociatorProducers", + "SimTracker/TrackHistory", + "SimTracker/TrackTriggerAssociation", + "SimTracker/TrackerFilters", + "SimTracker/TrackerHitAssociation", + "SimTracker/TrackerMaterialAnalysis", + "SimTracker/VertexAssociation", + "SimTracker/VertexAssociatorESProducer", + "SimTransport/HectorProducer", + "SimTransport/PPSProtonTransport", + "SimTransport/TotemRPProtonTransportParametrization", + "TauAnalysis/MCEmbeddingTools", + "TrackPropagation/Geant4e", + "Utilities/PPS", + "Validation/Configuration", + ], + "upgrade": [ + "CalibTracker/SiPhase2TrackerESProducers", + "CondFormats/HGCalObjects", + "CondTools/SiPhase2Tracker", + "Configuration/Geometry", + "Configuration/PyReleaseValidation", + "CUDADataFormats/HGCal", + "DataFormats/FTLDigi", + "DataFormats/FTLRecHit", + "DataFormats/ForwardDetId", + "DataFormats/GEMDigi", + "DataFormats/GEMRecHit", + "DataFormats/HGCDigi", + "DataFormats/HGCalDigi", + "DataFormats/HGCRecHit", + "DataFormats/HGCalReco", + "DataFormats/L1TCalorimeterPhase2", + "DataFormats/L1TCorrelator", + "DataFormats/L1THGCal", + "DataFormats/L1TrackTrigger", + "DataFormats/L1TParticleFlow", + "DataFormats/Phase2ITPixelCluster", + "DataFormats/Phase2TrackerCluster", + "DataFormats/Phase2TrackerDigi", + "EventFilter/HGCalRawToDigi", + "Geometry/CMSCommonData", + "Geometry/GEMGeometry", + "Geometry/GEMGeometryBuilder", + "Geometry/HGCalCommonData", + "Geometry/HGCalGeometry", + "Geometry/HGCalSimData", + "Geometry/MTDCommonData", + "Geometry/MTDGeometryBuilder", + "Geometry/MTDNumberingBuilder", + "Geometry/MTDSimData", + "L1Trigger/L1CaloTrigger", + "L1Trigger/DTTriggerPhase2", + "L1Trigger/L1THGCal", + "L1Trigger/L1THGCalUtilities", + "L1Trigger/L1TTrackMatch", + "L1Trigger/ME0Trigger", + "L1Trigger/Phase2L1GT", + "L1Trigger/Phase2L1ParticleFlow", + "L1Trigger/TrackTrigger", + "L1Trigger/TrackerDTC", + "RecoHGCal/Configuration", + "RecoHGCal/TICL", + "RecoLocalCalo/HGCalRecAlgos", + "RecoLocalCalo/HGCalRecProducers", + "RecoLocalFastTime/Configuration", + "RecoLocalFastTime/FTLClusterizer", + "RecoLocalFastTime/FTLCommonAlgos", + "RecoLocalFastTime/FTLRecProducers", + "RecoLocalFastTime/Records", + "RecoLocalMuon/GEMCSCSegment", + "RecoLocalMuon/GEMRecHit", + "RecoLocalMuon/GEMSegment", + "RecoLocalTracker/Phase2ITPixelClusterizer", + "RecoLocalTracker/Phase2TrackerRecHits", + "RecoLocalTracker/SiPhase2Clusterizer", + "RecoLocalTracker/SiPhase2VectorHitBuilder", + "RecoMTD/Configuration", + "RecoMTD/DetLayers", + "RecoMTD/MeasurementDet", + "RecoMTD/Navigation", + "RecoMTD/Records", + "RecoMTD/TimingIDTools", + "RecoMTD/TrackExtender", + "RecoMTD/TransientTrackingRecHit", + "SLHCUpgradeSimulations/Configuration", + "SLHCUpgradeSimulations/Geometry", + "SimCalorimetry/EcalEBTrigPrimAlgos", + "SimCalorimetry/EcalEBTrigPrimProducers", + "SimCalorimetry/HGCalAssociatorProducers", + "SimCalorimetry/HGCalSimProducers", + "SimCalorimetry/HGCalSimAlgos", + "SimDataFormats/GEMDigiSimLink", + "SimFastTiming/Configuration", + "SimFastTiming/FastTimingCommon", + "SimG4CMS/HGCalTestBeam", + "SimMuon/GEMDigitizer", + "SimTracker/SiPhase2Digitizer", + ], + "visualization": [ + "Fireworks/Calo", + "Fireworks/Candidates", + "Fireworks/Core", + "Fireworks/Electrons", + "Fireworks/Eve", + "Fireworks/FWInterface", + "Fireworks/GenParticle", + "Fireworks/Geometry", + "Fireworks/Macros", + "Fireworks/Muons", + "Fireworks/MTD", + "Fireworks/ParticleFlow", + "Fireworks/SimData", + "Fireworks/TableWidget", + "Fireworks/Tracks", + "Fireworks/Vertices", + ], + "xpog": [ + "DataFormats/NanoAOD", + "DataFormats/PatCandidates", + "DPGAnalysis/HcalNanoAOD", + "DPGAnalysis/MuonTools", + "PhysicsTools/NanoAOD", + "PhysicsTools/NanoAODTools", + "PhysicsTools/PatAlgos", + "PhysicsTools/PatUtils", + "PhysicsTools/Scouting", + ], } -#Just like CMSSW_CATEGORIES but it creates non-blocking (no signature needed) github labels -#This can be used to automatically create non-blocking labels for selected packages instead of -#explicit `type label`. Only valid dpg/pog groups can be added as labels -#Format: label = [regexp1, regexp2] -#where regexpX can be part of cmssw package e.g SiStrip will match all cmssw packages with SiStrip in them +# Just like CMSSW_CATEGORIES but it creates non-blocking (no signature needed) github labels +# This can be used to automatically create non-blocking labels for selected packages instead of +# explicit `type label`. Only valid dpg/pog groups can be added as labels +# Format: label = [regexp1, regexp2] +# where regexpX can be part of cmssw package e.g SiStrip will match all cmssw packages with SiStrip in them # "jetmet": [ "SubSystem1/", "SubSystem2/Package"] CMSSW_LABELS = { - "trk": [ - "Alignment/APEEstimation", - "Alignment/HIPAlignmentAlgorithm", - "Alignment/KalmanAlignmentAlgorithm", - "Alignment/Laser", - "Alignment/MillePedeAlignmentAlgorithm", - "Alignment/OfflineValidation", - "Alignment/ReferenceTrajectories", - "Alignment/SurveyAnalysis", - "Alignment/TrackerAlignment", - "Alignment/TwoBodyDecay", - "AnalysisAlgos/SiStripClusterInfoProducer", - "CalibFormats/SiPixel", - "CalibFormats/SiStrip", - "CalibTracker/", - "Calibration/TkAlCaRecoProducers", - "CommonTools/TrackerMap", - "CondCore/SiPhase2", - "CondCore/SiPixel", - "CondCore/SiStrip", - "CondFormats/SiPhase2", - "CondFormats/SiPixel", - "CondFormats/SiStrip" - "CondTools/SiPhase2", - "CondTools/SiPixel", - "CondTools/SiStrip", - "CUDADataFormats/SiPixel", - "CUDADataFormats/SiStrip", - "DPGAnalysis/SiStripTools", - "DataFormats/Phase2ITPixelCluster", - "DataFormats/Phase2Tracker", - "DataFormats/PixelMatchTrackReco", - "DataFormats/SiPixel", - "DataFormats/SiStrip", - "DQM/SiOuterTracker", - "DQM/SiPixel", - "DQM/SiStrip", - "DQM/SiTracker", - "DQM/Tracker", - "DQMOffline/CalibTracker", - "EventFilter/Phase2TrackerRawToDigi", - "EventFilter/SiPixelRawToDigi", - "EventFilter/SiStripRawToDigi", - "FastSimulation/TrackerSetup", - "Geometry/Tracker", - "L1Trigger/Tracker", - "OnlineDB/SiStrip", - "RecoLocalTracker/", - "SimTracker/", - "Validation/Si", - "Validation/Tracker", - ], - "tracking": [ - "AnalysisAlgos/TrackInfoProducer", - "AnalysisDataFormats/TrackInfo", - "CondCore/BeamSpot", - "CondFormats/BeamSpot", - "CondTools/BeamSpot", - "CUDADataFormats/Track", - "CUDADataFormats/Vertex", - "DataFormats/Track", - "DataFormats/VertexReco", - "DataFormats/V0Candidate", - "DataFormats/VZero", - "DQM/BeamMonitor", - "DQMOffline/RecoB", - "DQM/Tracking", - "FastSimulation/Tracking", - "Geometry/GlobalTracking", - "RecoHI/HiTracking", - "RecoPixelVertexing/", - "RecoTracker/", - "RecoVertex/", - "SimGeneral/TrackingAnalysis", - "TrackPropagation/", - "TrackingTools/", - "Validation/RecoPixelVertexing", - "Validation/RecoTrack", - "Validation/RecoVertex", - "Validation/Tracking" - ], - "hcal": [ - "DPGAnalysis/HcalNanoAOD" - ] + "trk": [ + "Alignment/APEEstimation", + "Alignment/HIPAlignmentAlgorithm", + "Alignment/KalmanAlignmentAlgorithm", + "Alignment/Laser", + "Alignment/MillePedeAlignmentAlgorithm", + "Alignment/OfflineValidation", + "Alignment/ReferenceTrajectories", + "Alignment/SurveyAnalysis", + "Alignment/TrackerAlignment", + "Alignment/TwoBodyDecay", + "AnalysisAlgos/SiStripClusterInfoProducer", + "CalibFormats/SiPixel", + "CalibFormats/SiStrip", + "CalibTracker/", + "Calibration/TkAlCaRecoProducers", + "CommonTools/TrackerMap", + "CondCore/SiPhase2", + "CondCore/SiPixel", + "CondCore/SiStrip", + "CondFormats/SiPhase2", + "CondFormats/SiPixel", + "CondFormats/SiStrip" "CondTools/SiPhase2", + "CondTools/SiPixel", + "CondTools/SiStrip", + "CUDADataFormats/SiPixel", + "CUDADataFormats/SiStrip", + "DPGAnalysis/SiStripTools", + "DataFormats/Phase2ITPixelCluster", + "DataFormats/Phase2Tracker", + "DataFormats/PixelMatchTrackReco", + "DataFormats/SiPixel", + "DataFormats/SiStrip", + "DQM/SiOuterTracker", + "DQM/SiPixel", + "DQM/SiStrip", + "DQM/SiTracker", + "DQM/Tracker", + "DQMOffline/CalibTracker", + "EventFilter/Phase2TrackerRawToDigi", + "EventFilter/SiPixelRawToDigi", + "EventFilter/SiStripRawToDigi", + "FastSimulation/TrackerSetup", + "Geometry/Tracker", + "L1Trigger/Tracker", + "OnlineDB/SiStrip", + "RecoLocalTracker/", + "SimTracker/", + "Validation/Si", + "Validation/Tracker", + ], + "tracking": [ + "AnalysisAlgos/TrackInfoProducer", + "AnalysisDataFormats/TrackInfo", + "CondCore/BeamSpot", + "CondFormats/BeamSpot", + "CondTools/BeamSpot", + "CUDADataFormats/Track", + "CUDADataFormats/Vertex", + "DataFormats/Track", + "DataFormats/VertexReco", + "DataFormats/V0Candidate", + "DataFormats/VZero", + "DQM/BeamMonitor", + "DQMOffline/RecoB", + "DQM/Tracking", + "FastSimulation/Tracking", + "Geometry/GlobalTracking", + "RecoHI/HiTracking", + "RecoPixelVertexing/", + "RecoTracker/", + "RecoVertex/", + "SimGeneral/TrackingAnalysis", + "TrackPropagation/", + "TrackingTools/", + "Validation/RecoPixelVertexing", + "Validation/RecoTrack", + "Validation/RecoVertex", + "Validation/Tracking", + ], + "hcal": ["DPGAnalysis/HcalNanoAOD"], } diff --git a/check-future-commits-prs.py b/check-future-commits-prs.py index ddd5e074451c..c4ff2af4197f 100755 --- a/check-future-commits-prs.py +++ b/check-future-commits-prs.py @@ -4,34 +4,55 @@ from optparse import OptionParser from socket import setdefaulttimeout from github_utils import api_rate_limits + setdefaulttimeout(120) import sys + SCRIPT_DIR = dirname(abspath(sys.argv[0])) parser = OptionParser(usage="%prog") -parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) -parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw") +parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, +) +parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default="cms-sw/cmssw", +) opts, args = parser.parse_args() -if len(args) != 0: parser.error("Too many/few arguments") +if len(args) != 0: + parser.error("Too many/few arguments") -repo_dir = join(SCRIPT_DIR,'repos',opts.repository.replace("-","_")) -if exists(join(repo_dir,"repo_config.py")): sys.path.insert(0,repo_dir) +repo_dir = join(SCRIPT_DIR, "repos", opts.repository.replace("-", "_")) +if exists(join(repo_dir, "repo_config.py")): + sys.path.insert(0, repo_dir) import repo_config from process_pr import get_last_commit - + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) api_rate_limits(gh) repo = gh.get_repo(opts.repository) -label = [ repo.get_label("future-commit") ] -cnt=0 +label = [repo.get_label("future-commit")] +cnt = 0 for issue in repo.get_issues(state="open", sort="updated", labels=label): - if not issue.pull_request: continue - last_commit = get_last_commit(pr) - if last_commit is None: continue - if last_commit.commit.committer.date>datetime.utcnow(): continue - cnt += 1 - with open("cms-bot-%s-%s.txt" % (repo.name, cnt),"w") as prop: - prop.write("FORCE_PULL_REQUEST=%s\n" % issue.number) - prop.write("REPOSITORY=%s\n" % opts.repository) + if not issue.pull_request: + continue + last_commit = get_last_commit(pr) + if last_commit is None: + continue + if last_commit.commit.committer.date > datetime.utcnow(): + continue + cnt += 1 + with open("cms-bot-%s-%s.txt" % (repo.name, cnt), "w") as prop: + prop.write("FORCE_PULL_REQUEST=%s\n" % issue.number) + prop.write("REPOSITORY=%s\n" % opts.repository) api_rate_limits(gh) diff --git a/checkDirSizes.py b/checkDirSizes.py index dde332fee777..55a06734b943 100755 --- a/checkDirSizes.py +++ b/checkDirSizes.py @@ -8,18 +8,19 @@ def doDu(what): - error, out = run_cmd('du -k -s %s' % what) + error, out = run_cmd("du -k -s %s" % what) if error: print("Error while getting directory size.") sys.exit(1) results = [l.split() for l in out.split("\n")] - return dict([(pkg.strip().replace("src/", ''), int(sz.strip() * 1024)) - for (sz, pkg) in results]) + return dict( + [(pkg.strip().replace("src/", ""), int(sz.strip() * 1024)) for (sz, pkg) in results] + ) -if __name__ == '__main__': +if __name__ == "__main__": try: - f = open('dirSizeInfo.pkl', 'wb') + f = open("dirSizeInfo.pkl", "wb") pklr = Pickler(f, protocol=2) pklr.dump(doDu("src lib bin")) pklr.dump(doDu("src/*/*")) diff --git a/checkLibDeps.py b/checkLibDeps.py index ade009af059b..80076061c262 100755 --- a/checkLibDeps.py +++ b/checkLibDeps.py @@ -13,11 +13,11 @@ if scriptPath not in sys.path: sys.path.append(scriptPath) -sys.path.append(os.path.join(scriptPath,"python")) +sys.path.append(os.path.join(scriptPath, "python")) class LibDepChecker(object): - def __init__(self, startDir=None, plat='slc6_amd64_gcc493'): + def __init__(self, startDir=None, plat="slc6_amd64_gcc493"): self.plat = plat if not startDir: startDir = os.getcwd() @@ -25,29 +25,31 @@ def __init__(self, startDir=None, plat='slc6_amd64_gcc493'): def doCheck(self): import glob - pkgDirList = glob.glob(self.startDir + '/src/[A-Z]*/*') + + pkgDirList = glob.glob(self.startDir + "/src/[A-Z]*/*") errMap = {} for pkg in pkgDirList: if not os.path.isdir(pkg): continue - pkg = re.sub('^' + self.startDir + '/src/', '', pkg) + pkg = re.sub("^" + self.startDir + "/src/", "", pkg) missing = self.checkPkg(pkg) if missing: errMap[pkg] = missing from pickle import Pickler - summFile = open('libchk.pkl', 'wb') + + summFile = open("libchk.pkl", "wb") pklr = Pickler(summFile, protocol=2) pklr.dump(errMap) summFile.close() def checkPkg(self, pkg): - libName = 'lib' + pkg.replace('/', '') + '.so' - libPathName = os.path.join(self.startDir, 'lib', self.plat, libName) + libName = "lib" + pkg.replace("/", "") + ".so" + libPathName = os.path.join(self.startDir, "lib", self.plat, libName) if not os.path.exists(libPathName): return [] - cmd = '(cd ' + self.startDir + '/lib/' + self.plat + ';' - cmd += 'libchecker.pl ' + libName + ' )' + cmd = "(cd " + self.startDir + "/lib/" + self.plat + ";" + cmd += "libchecker.pl " + libName + " )" print("in ", os.getcwd(), " executing :'" + cmd + "'") log = os.popen(cmd).readlines() return log @@ -59,16 +61,16 @@ def main(): except ImportError: import archived_argparse as argparse parser = argparse.ArgumentParser() - parser.add_argument('-p', '--platform', default=None) - parser.add_argument('-n', '--dryRun', default=False, action='store_true') - parser.add_argument('-d', '--startDir', default=None) + parser.add_argument("-p", "--platform", default=None) + parser.add_argument("-n", "--dryRun", default=False, action="store_true") + parser.add_argument("-d", "--startDir", default=None) args = parser.parse_args() # Keeping it for interface compatibility reasons # noinspection PyUnusedLocal dryRun = args.dryRun - plat = args.platform or os.environ['SCRAM_ARCH'] - startDir = args.startDir or '.' + plat = args.platform or os.environ["SCRAM_ARCH"] + startDir = args.startDir or "." ldc = LibDepChecker(startDir, plat) ldc.doCheck() diff --git a/checkLogFile.py b/checkLogFile.py index e142be711aa1..67a9bdee8340 100755 --- a/checkLogFile.py +++ b/checkLogFile.py @@ -14,7 +14,7 @@ if scriptPath not in sys.path: sys.path.append(scriptPath) -sys.path.append(os.path.join(scriptPath,"python")) +sys.path.append(os.path.join(scriptPath, "python")) class LogChecker(object): @@ -40,10 +40,10 @@ def setHtml(self, html): # -------------------------------------------------------------------------------- def getTags(self): try: - prepFile = open('nohup.out', 'r') + prepFile = open("nohup.out", "r") except IOError: try: - prepFile = open('prebuild.log', 'r') + prepFile = open("prebuild.log", "r") except IOError: print("no nohup.out or prebuild.log found in . ") raise @@ -51,7 +51,7 @@ def getTags(self): lines = prepFile.readlines() prepFile.close() - pkgTagRe = re.compile(r'Package\s*([a-zA-Z].*)\s*version\s*(.*)\s*checkout\s*(.*)') + pkgTagRe = re.compile(r"Package\s*([a-zA-Z].*)\s*version\s*(.*)\s*checkout\s*(.*)") for line in lines: pkgTagMatch = pkgTagRe.match(line) @@ -61,7 +61,7 @@ def getTags(self): stat = pkgTagMatch.group(3).strip() self.pkgVers[pkg] = vers if stat.lower() != "successful": - print("WARNING: problems checking out ", pkg, vers, stat, "(" + line + ')') + print("WARNING: problems checking out ", pkg, vers, stat, "(" + line + ")") # print "found ", str(len(self.pkgVers.keys())), 'tags' # for key, val in self.pkgVers.items(): @@ -71,12 +71,19 @@ def getTags(self): # -------------------------------------------------------------------------------- def checkLog(self, logFileName): - plat = os.environ['SCRAM_ARCH'] + plat = os.environ["SCRAM_ARCH"] pkgName = "" import re - gmakeCompRe = re.compile("gmake:.*/" + plat + r"/src/([a-zA-Z].*)/([a-zA-Z].*)/src/([a-zA-Z].*)\.o.*") - gmakeTestRe = re.compile("gmake:.*/" + plat + r"/src/([a-zA-Z].*)/([a-zA-Z].*)/test/([a-zA-Z].*)\.o.*") - gmakeLinkRe = re.compile("gmake:.*/" + plat + r"/src/([a-zA-Z].*)/([a-zA-Z].*)/(lib[a-zA-Z].*\.so).*") + + gmakeCompRe = re.compile( + "gmake:.*/" + plat + r"/src/([a-zA-Z].*)/([a-zA-Z].*)/src/([a-zA-Z].*)\.o.*" + ) + gmakeTestRe = re.compile( + "gmake:.*/" + plat + r"/src/([a-zA-Z].*)/([a-zA-Z].*)/test/([a-zA-Z].*)\.o.*" + ) + gmakeLinkRe = re.compile( + "gmake:.*/" + plat + r"/src/([a-zA-Z].*)/([a-zA-Z].*)/(lib[a-zA-Z].*\.so).*" + ) gmakeGeneric = re.compile("gmake:.*") self.fileIndex += 1 @@ -85,12 +92,19 @@ def checkLog(self, logFileName): print("in :", os.getcwd()) print("checking file ", logFileName) print("================================================================================\n") - htmlFileName = logFileName.replace('/', '_').replace(".", '-') + ".html" + htmlFileName = logFileName.replace("/", "_").replace(".", "-") + ".html" if self.htmlOut and self.sumLog: self.sumLog.write( - '\n') - self.sumLog.write('

Checking log file ' + logFileName + '

\n') - self.sumLog.write('\n') + '\n' + ) + self.sumLog.write( + '

Checking log file ' + + logFileName + + "

\n" + ) + self.sumLog.write("\n") self.sumLog.write("

\n") self.getTags() @@ -103,11 +117,13 @@ def checkLog(self, logFileName): if len(lines) < 200: if self.htmlOut and self.sumLog: - self.sumLog.write('Warning: suspiciously short log file!\n') + self.sumLog.write( + 'Warning: suspiciously short log file!\n' + ) nErr = 0 nWarn = 0 - errorList = {'make': []} + errorList = {"make": []} subSysCompErr = {} subSysTestErr = {} @@ -183,7 +199,7 @@ def checkLog(self, logFileName): if item[0] not in compErrPkg: compErrPkg.append(item[0]) startIndex = len(key) + len(item[0]) + 1 - print(" " + item[0] + ' (' + str(item[1])[startIndex:] + ')') + print(" " + item[0] + " (" + str(item[1])[startIndex:] + ")") print("--------------------------------------------------------------------------------") testErrPkg = [] for key, val in subSysTestErr.items(): @@ -191,18 +207,16 @@ def checkLog(self, logFileName): for item in val: if item[0] not in testErrPkg: testErrPkg.append(item[0]) - print(" " + item[0] + ' (' + str(item[1]) + ')') + print(" " + item[0] + " (" + str(item[1]) + ")") print("--------------------------------------------------------------------------------") nLinkErr = 0 for key, val in subSysLinkErr.items(): - subSys = key.split("/")[0] - if ((subSys not in subSysCompErr.keys()) and - (subSys not in subSysTestErr.keys())): + if (subSys not in subSysCompErr.keys()) and (subSys not in subSysTestErr.keys()): nLinkErr += 1 print(str(len(val)) + " ERRORs in link-step found for subsystem", subSys) for item in val: - print(" " + item[0] + ' (' + str(item[1]) + ')') + print(" " + item[0] + " (" + str(item[1]) + ")") print("--------------------------------------------------------------------------------") genErrPkg = [] for key, val in list(subSysGenErr.items()): @@ -210,7 +224,7 @@ def checkLog(self, logFileName): for item in val: if item[0] not in genErrPkg: genErrPkg.append(item[0]) - print(" " + item[0] + ' (' + str(item[3]) + ')') + print(" " + item[0] + " (" + str(item[3]) + ")") print("--------------------------------------------------------------------------------") print("\nA total of ", len(compErrPkg), " packages failed compilation.") print("\nA total of ", len(testErrPkg), " packages failed compiling tetsts.") @@ -228,65 +242,103 @@ def checkLog(self, logFileName): errLines.append(int(i)) try: - htmlFile = open(os.path.join(self.logDir, htmlFileName), 'w') + htmlFile = open(os.path.join(self.logDir, htmlFileName), "w") except IOError: print("ERROR opening htmlFile ", os.path.join(self.logDir, htmlFileName)) raise - htmlFile.write("\nLogCheck for " + logFileName + "\n\n") + htmlFile.write( + "\nLogCheck for " + logFileName + "\n\n" + ) htmlFile.write("

LogCheck for " + logFileName + "

\n") htmlFile.write("

Analysis from " + time.asctime() + "

\n") for key, val in subSysCompErr.items(): - htmlFile.write('

\n') - htmlFile.write('' + str( - len(val)) + ' Compile ERRORs found for subsystem ' + key) - htmlFile.write('
\n') - htmlFile.write('

\n') + htmlFile.write("

\n") + htmlFile.write( + '' + + str(len(val)) + + " Compile ERRORs found for subsystem " + + key + ) + htmlFile.write("
\n") + htmlFile.write("

\n") for key, val in subSysLinkErr.items(): - htmlFile.write('

\n') - htmlFile.write('' + str( - len(val)) + ' Linker ERRORs found for subsystem ' + key) - htmlFile.write('
\n') - htmlFile.write('

\n') + htmlFile.write("

\n") + htmlFile.write( + '' + + str(len(val)) + + " Linker ERRORs found for subsystem " + + key + ) + htmlFile.write("
\n") + htmlFile.write("

\n") for key, val in subSysCompErr.items(): - htmlFile.write('

\n') - htmlFile.write('\n') - htmlFile.write(str(len(val)) + ' ERRORs found for subsystem ' + key + '
\n') - htmlFile.write('

    \n') + htmlFile.write("

    \n") + htmlFile.write("\n") + htmlFile.write(str(len(val)) + " ERRORs found for subsystem " + key + "
    \n") + htmlFile.write("

      \n") for item in val: pkg = item[0] try: - htmlFile.write('
    1. package ' + pkg + ' file ' + item[1] + ' Tag: ' + self.pkgVers[ - key + "/" + pkg] + '
      \n') + htmlFile.write( + "
    2. package " + + pkg + + " file " + + item[1] + + " Tag: " + + self.pkgVers[key + "/" + pkg] + + "
      \n" + ) except KeyError: - htmlFile.write('
    3. package ' + pkg + ' file ' + item[1] + ' Tag: ???
      \n') + htmlFile.write( + "
    4. package " + + pkg + + " file " + + item[1] + + " Tag: ???
      \n" + ) index = item[2] - htmlFile.write('
      \n') - htmlFile.write('
      \n')
      +                    htmlFile.write('\n')
      +                    htmlFile.write("
      \n")
                           try:
                               for delta in range(-5, 1):
                                   if self.htmlOut:
                                       htmlFile.write(str(index + delta) + " : " + lines[index + delta])
      -                            # print " ", index+delta, ":", lines[index+delta],                            
      +                            # print " ", index+delta, ":", lines[index+delta],
                           except IndexError:
                               pass
      -                    htmlFile.write('
      \n') - htmlFile.write('
    5. \n') + htmlFile.write("\n") + htmlFile.write("\n") htmlFile.write("
      \n") - htmlFile.write('
    \n') - htmlFile.write('

    \n') + htmlFile.write("
\n") + htmlFile.write("

\n") for key, val in subSysGenErr.items(): - htmlFile.write('

\n') + htmlFile.write("

\n") htmlFile.write( - '' + str(len(val)) + ' UNKNOWN ERRORs found ') - htmlFile.write('
\n') - htmlFile.write('

\n') - - htmlFile.write('
\n')
+                    ''
+                    + str(len(val))
+                    + " UNKNOWN ERRORs found "
+                )
+                htmlFile.write(" 
\n") + htmlFile.write("

\n") + + htmlFile.write("
\n")
             for index in range(len(lines)):
                 # html-ify:
                 line = lines[index]
@@ -298,8 +350,8 @@ def checkLog(self, logFileName):
                     htmlFile.write('')
                     htmlFile.write('\n')
                     htmlFile.write(line)
-                    htmlFile.write('')
-                    htmlFile.write(' \n')
+                    htmlFile.write("")
+                    htmlFile.write(" \n")
                 else:
                     htmlFile.write(line)
             htmlFile.write("
\n\n") @@ -313,8 +365,10 @@ def checkLog(self, logFileName): if len(errorList.items()) > 500: if self.htmlOut and self.sumLog: self.sumLog.write( - 'Caution: Too many errors found (' + - str(len(errorList.items())) + '), printout suppressed !!\n') + 'Caution: Too many errors found (' + + str(len(errorList.items())) + + "), printout suppressed !!\n" + ) errLimit = True if self.verbose > 0 and not errLimit: @@ -324,17 +378,25 @@ def checkLog(self, logFileName): if self.htmlOut and self.sumLog: self.sumLog.write('\n') self.sumLog.write("
\n") - self.sumLog.write('\n') + self.sumLog.write( + '\n' + ) try: print("------------------------------------------") for delta in range(-2, 2): if self.htmlOut and self.sumLog: - self.sumLog.write(str(index + delta) + " : " + lines[index + delta]) - print(" ", index + delta, ":", lines[index + delta], end=' ') + self.sumLog.write( + str(index + delta) + " : " + lines[index + delta] + ) + print(" ", index + delta, ":", lines[index + delta], end=" ") except IndexError: pass if self.htmlOut and self.sumLog: - self.sumLog.write('\n') + self.sumLog.write("\n") msg = "In total: " if nErr == 0: @@ -353,7 +415,7 @@ def checkLog(self, logFileName): print(msg) if self.htmlOut and self.sumLog: self.sumLog.write(msg + "\n") - self.sumLog.write('

\n') + self.sumLog.write("

\n") return nErr, nWarn @@ -362,18 +424,22 @@ def checkLog(self, logFileName): def checkFiles(self, fileList=None): if fileList is None: fileList = [] - print("going to check ", len(fileList), ' files:', fileList) + print("going to check ", len(fileList), " files:", fileList) import socket + hostName = socket.gethostname().lower() import time + date = time.ctime() if self.htmlOut and not self.sumLog: - self.sumLog = open(os.path.join(self.logDir, "CheckLog-summary.html"), 'w') - self.sumLog.write("\nSummary of logfiles\n\n
\n")
-            self.sumLog.write('

Check of logfiles

\n') + self.sumLog = open(os.path.join(self.logDir, "CheckLog-summary.html"), "w") + self.sumLog.write( + "\nSummary of logfiles\n\n
\n"
+            )
+            self.sumLog.write("

Check of logfiles

\n") self.sumLog.write("

\n") self.sumLog.write("Checking done on " + hostName + " at " + date) self.sumLog.write("

\n") @@ -389,7 +455,7 @@ def checkFiles(self, fileList=None): nFilWarn = 0 for fileName in fileList: file = fileName - if fileName[:2] == './': + if fileName[:2] == "./": file = fileName[2:] try: nErr, nWarn = self.checkLog(file) @@ -405,12 +471,14 @@ def checkFiles(self, fileList=None): if nWarn > 0: nFilWarn += 1 - print("\n================================================================================\n") + print( + "\n================================================================================\n" + ) if nFiles > 0: print("Checked a total of ", nFiles, "log files.") - print("A total of ", totErr, "errors in ", nFilErr, 'files.') - print("A total of ", totWarn, "warnings in ", nFilWarn, 'files.') + print("A total of ", totErr, "errors in ", nFilErr, "files.") + print("A total of ", totWarn, "warnings in ", nFilWarn, "files.") # print "Files with errors: " # for f in self.errFiles: # print "\t", f @@ -422,13 +490,17 @@ def checkFiles(self, fileList=None): if self.htmlOut and self.sumLog: self.sumLog.write('
\n') self.sumLog.write("Checked a total of " + str(nFiles) + " log files.\n") - self.sumLog.write("A total of " + str(totErr) + " errors in " + str(nFilErr) + ' files.\n') - self.sumLog.write("A total of " + str(totWarn) + " warnings in " + str(nFilWarn) + ' files.\n') + self.sumLog.write( + "A total of " + str(totErr) + " errors in " + str(nFilErr) + " files.\n" + ) + self.sumLog.write( + "A total of " + str(totWarn) + " warnings in " + str(nFilWarn) + " files.\n" + ) self.sumLog.write("Files with errors: \n") self.sumLog.write("
") - htmlFile.write('status') + htmlFile.write("status") htmlFile.write("") - htmlFile.write('subsystem/package') + htmlFile.write("subsystem/package") htmlFile.write("") @@ -245,25 +294,36 @@ def makeHTMLSummaryPage(self): pkgList = sorted(self.errMap[key], key=lambda x: x.name()) for pkg in pkgList: - if not pkg.name() in self.tagList: continue - styleClass = 'ok' - for cKey in self.errorKeys : - if styleClass == 'ok' and cKey in pkg.errSummary.keys(): styleClass = self.styleClass[cKey] - htmlFile.write('
 ') - link = ' '+pkg.name()+' '+self.tagList[pkg.name()]+' ' + if not pkg.name() in self.tagList: + continue + styleClass = "ok" + for cKey in self.errorKeys: + if styleClass == "ok" and cKey in pkg.errSummary.keys(): + styleClass = self.styleClass[cKey] + htmlFile.write("
 ") + link = ( + ' ' + + pkg.name() + + " " + + self.tagList[pkg.name()] + + " " + ) htmlFile.write(link) htmlFile.write("") if pKey in pkg.errSummary.keys(): - if sys.version_info[0]<3: - htmlFile.write( str(pkg.errSummary[pKey]).decode('ascii','ignore') ) + if sys.version_info[0] < 3: + htmlFile.write(str(pkg.errSummary[pKey]).decode("ascii", "ignore")) else: - htmlFile.write( str(pkg.errSummary[pKey])) + htmlFile.write(str(pkg.errSummary[pKey])) else: - htmlFile.write(' - ') + htmlFile.write(" - ") htmlFile.write("
 ') - link = ' '+pkg.name()+' '+self.tagList[pkg.name()]+' ' + htmlFile.write("") + link = ( + ' ' + + pkg.name() + + " " + + self.tagList[pkg.name()] + + " " + ) htmlFile.write(link) htmlFile.write("") - htmlFile.write(' - ') + htmlFile.write(" - ") htmlFile.write("
\n") for f in self.errFiles: - self.sumLog.write('\n') + self.sumLog.write('\n") self.sumLog.write("
' + f + '
' + f + "
\n") self.sumLog.write("\n") @@ -446,9 +518,9 @@ def main(): import archived_argparse as argparse parser = argparse.ArgumentParser() - parser.add_argument('--html', default=False, action='store_true') - parser.add_argument('--verbose', default=0) - parser.add_argument('files', nargs='+') + parser.add_argument("--html", default=False, action="store_true") + parser.add_argument("--verbose", default=0) + parser.add_argument("files", nargs="+") args = parser.parse_args() html = args.html diff --git a/checkPyConfigs.py b/checkPyConfigs.py index b923099a1690..6908ef34ad00 100755 --- a/checkPyConfigs.py +++ b/checkPyConfigs.py @@ -12,7 +12,7 @@ def pythonNameFromCfgName(cfgName): return newName.replace("/data/", "/python/").replace(".cf", "_cf") + ".py" -releaseBase = os.path.expandvars("$CMSSW_RELEASE_BASE/src") + '/' +releaseBase = os.path.expandvars("$CMSSW_RELEASE_BASE/src") + "/" files = glob.glob(releaseBase + "*/*/data/*cf[fi]") # give 'em two hours @@ -25,8 +25,8 @@ def pythonNameFromCfgName(cfgName): pythonFile = pythonNameFromCfgName(f) if os.path.exists(pythonFile): if os.path.getmtime(f) > os.path.getmtime(pythonNameFromCfgName(f)) + gracePeriod: - subsys, pkg, pydir, fname = pythonFile.split('/') - pkgName = subsys + '_' + pkg + subsys, pkg, pydir, fname = pythonFile.split("/") + pkgName = subsys + "_" + pkg if pkgName in pkgInfo: pkgInfo[pkgName].append(pythonFile) else: @@ -36,8 +36,8 @@ def pythonNameFromCfgName(cfgName): # print f else: missingFiles.append(pythonFile) - subsys, pkg, pydir, fname = pythonFile.split('/') - pkgName = subsys + '_' + pkg + subsys, pkg, pydir, fname = pythonFile.split("/") + pkgName = subsys + "_" + pkg if pkgName in pkgInfo: pkgInfo[pkgName].append(pythonFile) else: @@ -49,7 +49,7 @@ def pythonNameFromCfgName(cfgName): nFiles = 0 pkgList.sort() for pkg in pkgList: - print('-' * 80) + print("-" * 80) print("Package:", pkg) for fName in pkgInfo[pkg]: status = "update needed :" @@ -58,4 +58,4 @@ def pythonNameFromCfgName(cfgName): print(" ", status, fName) nFiles += 1 -print("\nFound a total of ", len(pkgList), "problematic packages and ", nFiles, 'files.') +print("\nFound a total of ", len(pkgList), "problematic packages and ", nFiles, "files.") diff --git a/checkTestLog.py b/checkTestLog.py index b25ea5c130b6..f6c7cb255f82 100755 --- a/checkTestLog.py +++ b/checkTestLog.py @@ -14,17 +14,17 @@ if scriptPath not in sys.path: sys.path.append(scriptPath) -sys.path.append(os.path.join(scriptPath,"python")) +sys.path.append(os.path.join(scriptPath, "python")) # TODO is this file used? + class TestLogChecker(object): def __init__(self, outFileIn=None, verbIn=False): - self.outFile = sys.stdout if outFileIn: print("Summary file:", outFileIn) - self.outFile = open(outFileIn, 'w') + self.outFile = open(outFileIn, "w") self.verbose = verbIn @@ -39,8 +39,7 @@ def setVerbose(self, verbIn=False): return def checkScramWarnings(self, logFile, verbose=False): - - self.outFile.write("going to check " + logFile + ' for scram warnings\n') + self.outFile.write("going to check " + logFile + " for scram warnings\n") # """ # WARNING: Unable to find package/tool called Geometry/CommonDetAlgo @@ -48,14 +47,14 @@ def checkScramWarnings(self, logFile, verbose=False): # # """" - exprNoPkg = '^WARNING: Unable to find package/tool called ([A-Za-z].*/[A-Za-z].*)' - exprNoPkg += r'\s*in current project area \(declared at src/([A-Za-z].*)\)' + exprNoPkg = "^WARNING: Unable to find package/tool called ([A-Za-z].*/[A-Za-z].*)" + exprNoPkg += r"\s*in current project area \(declared at src/([A-Za-z].*)\)" noPkgRe = re.compile(exprNoPkg) # WARNING: PhysicsTools/RecoAlgos/BuildFile does not export anything: - noExportRe = re.compile('^WARNING: ([A-Za-z].*)/BuildFile does not export anything:') + noExportRe = re.compile("^WARNING: ([A-Za-z].*)/BuildFile does not export anything:") - lf = open(logFile, 'r') + lf = open(logFile, "r") nLines = 0 nNoPkg = 0 @@ -76,7 +75,7 @@ def checkScramWarnings(self, logFile, verbose=False): nNoPkg += 1 pkg = noPkgMatch.group(2).strip() tool = noPkgMatch.group(1).strip() - tp = pkg + '--' + tool + tp = pkg + "--" + tool if tp not in noToolPkgs: noToolPkgs.append(tp) @@ -88,34 +87,37 @@ def checkScramWarnings(self, logFile, verbose=False): lf.close() - self.outFile.write('found ' + str(nNoPkg) + ' scram-warnings in ' + str(nLines) + ' lines of log file.\n') - self.outFile.write('found ' + str(len(noToolPkgs)) + ' BuildFiles with tool problems\n') + self.outFile.write( + "found " + str(nNoPkg) + " scram-warnings in " + str(nLines) + " lines of log file.\n" + ) + self.outFile.write("found " + str(len(noToolPkgs)) + " BuildFiles with tool problems\n") if verbose: for p in noToolPkgs: - self.outFile.write(" " + p + '\n') - self.outFile.write('\n') + self.outFile.write(" " + p + "\n") + self.outFile.write("\n") - self.outFile.write('found ' + str(len(noExport)) + ' BuildFiles without exporting anything:\n') + self.outFile.write( + "found " + str(len(noExport)) + " BuildFiles without exporting anything:\n" + ) if verbose: for p in noExport: - self.outFile.write(" " + p + '\n') - self.outFile.write('\n') + self.outFile.write(" " + p + "\n") + self.outFile.write("\n") - self.outFile.write('\n') + self.outFile.write("\n") return # -------------------------------------------------------------------------------- def check(self, logFile): + self.outFile.write("going to check " + logFile + "\n") - self.outFile.write("going to check " + logFile + '\n') + subsysRe = re.compile("^>> Tests for package ([A-Za-z].*/[A-Za-z].*) ran.") - subsysRe = re.compile('^>> Tests for package ([A-Za-z].*/[A-Za-z].*) ran.') - - pkgTestStartRe = re.compile('^===== Test \"(.*)\" ====') - pkgTestEndRe = re.compile(r'^\^\^\^\^ End Test (.*) \^\^\^\^') - pkgTestResultRe = re.compile('.*---> test ([^ ]+) (had ERRORS|succeeded)') + pkgTestStartRe = re.compile('^===== Test "(.*)" ====') + pkgTestEndRe = re.compile(r"^\^\^\^\^ End Test (.*) \^\^\^\^") + pkgTestResultRe = re.compile(".*---> test ([^ ]+) (had ERRORS|succeeded)") pkgStartRe = re.compile("^>> Entering Package (.*)") pkgEndRe = re.compile("^>> Leaving Package (.*)") @@ -123,7 +125,7 @@ def check(self, logFile): pkgSubsysMap = {} subsysPkgMap = {} - lf = open(logFile, 'r') + lf = open(logFile, "r") startTime = time.time() nLines = 0 @@ -143,7 +145,7 @@ def check(self, logFile): actPkgLines += 1 subsysMatch = subsysRe.match(line) if subsysMatch: - subsys, pkg = subsysMatch.group(1).split('/') + subsys, pkg = subsysMatch.group(1).split("/") if pkg not in pkgSubsysMap: pkgSubsysMap[pkg] = subsys if subsys in subsysPkgMap: @@ -164,7 +166,13 @@ def check(self, logFile): if pkgEndMatch: pkg = pkgEndMatch.group(1) if actPkg != pkg: - self.outFile.write("pkgEndMatch> package mismatch: pkg found " + pkg + ' actPkg=' + actPkg + '\n') + self.outFile.write( + "pkgEndMatch> package mismatch: pkg found " + + pkg + + " actPkg=" + + actPkg + + "\n" + ) pkgLines[pkg] = actPkgLines pkgTestResultMatch = pkgTestResultRe.match(line) @@ -190,26 +198,31 @@ def check(self, logFile): tst = pkgTestEndMatch.group(1) if actTest != tst: self.outFile.write( - "pkgTestEndMatch> test mismatch: pkg found " + tst + ' actPkg=' + actTest + '\n') + "pkgTestEndMatch> test mismatch: pkg found " + + tst + + " actPkg=" + + actTest + + "\n" + ) testLines[tst] = actTstLines stopTime = time.time() lf.close() - self.outFile.write("found a total of " + str(nLines) + ' lines in logfile.\n') - self.outFile.write("analysis took " + str(stopTime - startTime) + ' sec.\n') + self.outFile.write("found a total of " + str(nLines) + " lines in logfile.\n") + self.outFile.write("analysis took " + str(stopTime - startTime) + " sec.\n") - self.outFile.write("total number of tests: " + str(len(list(results.keys()))) + '\n') + self.outFile.write("total number of tests: " + str(len(list(results.keys()))) + "\n") nMax = 1000 self.outFile.write("tests with more than " + str(nMax) + " lines of logs:\n") for pkg, lines in testLines.items(): if lines > nMax: - self.outFile.write(" " + pkg + ' : ' + str(lines) + '\n') + self.outFile.write(" " + pkg + " : " + str(lines) + "\n") self.outFile.write("Number of tests for packages: \n") noTests = 0 nrTests = 0 - indent = ' ' + indent = " " totalOK = 0 totalFail = 0 for pkg, nTst in pkgTests.items(): @@ -218,8 +231,8 @@ def check(self, logFile): else: nrTests += 1 if self.verbose: - self.outFile.write('-' * 80 + '\n') - self.outFile.write(indent + pkg + ' : ') + self.outFile.write("-" * 80 + "\n") + self.outFile.write(indent + pkg + " : ") nOK = 0 if self.verbose: self.outFile.write("\n") @@ -230,18 +243,41 @@ def check(self, logFile): else: totalFail += 1 if self.verbose: - self.outFile.write(indent * 2 + tNam + ' ' + results[tNam] + '\n') + self.outFile.write(indent * 2 + tNam + " " + results[tNam] + "\n") if self.verbose: self.outFile.write(indent + pkg + " : ") self.outFile.write( - indent + str(len(testNames[pkg])) + ' tests in total, OK:' + str(nOK) + ' fail:' + str( - len(testNames[pkg]) - nOK) + '\n') - - self.outFile.write(indent + str(nrTests) + " packages with tests (" + str( - float(nrTests) / float(len(pkgTests.keys()))) + ")\n") - self.outFile.write(indent + str(noTests) + " packages without tests (" + str( - float(noTests) / float(len(pkgTests.keys()))) + ")\n") - self.outFile.write(indent + "in total: tests OK : " + str(totalOK) + ' tests FAIL : ' + str(totalFail) + '\n') + indent + + str(len(testNames[pkg])) + + " tests in total, OK:" + + str(nOK) + + " fail:" + + str(len(testNames[pkg]) - nOK) + + "\n" + ) + + self.outFile.write( + indent + + str(nrTests) + + " packages with tests (" + + str(float(nrTests) / float(len(pkgTests.keys()))) + + ")\n" + ) + self.outFile.write( + indent + + str(noTests) + + " packages without tests (" + + str(float(noTests) / float(len(pkgTests.keys()))) + + ")\n" + ) + self.outFile.write( + indent + + "in total: tests OK : " + + str(totalOK) + + " tests FAIL : " + + str(totalFail) + + "\n" + ) return @@ -255,10 +291,10 @@ def main(): import archived_argparse as argparse parser = argparse.ArgumentParser() - parser.add_argument('-l', '--logFile', dest='logFile', required=True) - parser.add_argument('-s', '--scram', default=False, action='store_true') - parser.add_argument('-v', '--verbose', default=False, action='store_true') - parser.add_argument('-l', '--outFile', dest='outFile') + parser.add_argument("-l", "--logFile", dest="logFile", required=True) + parser.add_argument("-s", "--scram", default=False, action="store_true") + parser.add_argument("-v", "--verbose", default=False, action="store_true") + parser.add_argument("-l", "--outFile", dest="outFile") args = parser.parse_args() logFile = args.logFile @@ -272,5 +308,5 @@ def main(): tlc.check(logFile) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/chk-invalid-headers.py b/chk-invalid-headers.py index 006911c733ea..9f5c931ef74b 100755 --- a/chk-invalid-headers.py +++ b/chk-invalid-headers.py @@ -11,10 +11,10 @@ def hasInclude(inc, src, cache): if src not in cache: cache[src] = {} - for e in ['CMSSW_BASE', 'CMSSW_RELEASE_BASE', 'CMSSW_FULL_RELEASE_BASE']: + for e in ["CMSSW_BASE", "CMSSW_RELEASE_BASE", "CMSSW_FULL_RELEASE_BASE"]: if (e not in environ) or (not environ[e]): continue - src_file = join(environ[e], 'src', src) + src_file = join(environ[e], "src", src) if not exists(src_file): continue exp = re.compile(r'^\s*#\s*include\s*([<"])([^<"]+)([<"])\s*$') @@ -30,9 +30,9 @@ def hasInclude(inc, src, cache): def readDeps(cache, depFile): - with gzip.open(depFile, 'rt') as ref: + with gzip.open(depFile, "rt") as ref: for line in ref.readlines(): - data = line.strip().split(' ', 1) + data = line.strip().split(" ", 1) if len(data) < 2: continue cache[data[0]] = data[1].strip() @@ -43,17 +43,17 @@ def main(): includes = {} uses = {} usedby = {} - readDeps(uses, join(environ['CMSSW_RELEASE_BASE'], 'etc', 'dependencies', 'uses.out.gz')) - readDeps(usedby, join(environ['CMSSW_RELEASE_BASE'], 'etc', 'dependencies', 'usedby.out.gz')) + readDeps(uses, join(environ["CMSSW_RELEASE_BASE"], "etc", "dependencies", "uses.out.gz")) + readDeps(usedby, join(environ["CMSSW_RELEASE_BASE"], "etc", "dependencies", "usedby.out.gz")) errs = {} checked = {} for inc in usedby: - items = inc.split('/') - if items[2] == 'interface': + items = inc.split("/") + if items[2] == "interface": continue - for src in usedby[inc].split(' '): - sitems = src.split('/') + for src in usedby[inc].split(" "): + sitems = src.split("/") if (items[0] == sitems[0]) and (items[1] == sitems[1]) and (items[2] == sitems[2]): continue if hasInclude(inc, src, includes): @@ -61,8 +61,8 @@ def main(): errs[src] = {} errs[src][inc] = includes[src][inc] if src in uses: - for isrc in uses[src].strip().split(' '): - xchk = '%s:%s' % (src, inc) + for isrc in uses[src].strip().split(" "): + xchk = "%s:%s" % (src, inc) if xchk in checked: continue checked[xchk] = 1 @@ -80,26 +80,36 @@ def main(): pkg_errs = {} for e in errs: - pkg = '/'.join(e.split('/')[:2]) + pkg = "/".join(e.split("/")[:2]) if pkg not in pkg_errs: pkg_errs[pkg] = {} pkg_errs[pkg][e] = errs[e] - outdir = 'invalid-includes' - run_cmd('rm -f %s; mkdir %s' % (outdir, outdir)) + outdir = "invalid-includes" + run_cmd("rm -f %s; mkdir %s" % (outdir, outdir)) all_count = {} for p in sorted(pkg_errs): all_count[p] = len(pkg_errs[p]) pdir = join(outdir, p) - run_cmd('mkdir -p %s' % pdir) - with open(join(pdir, 'index.html'), 'w') as ref: + run_cmd("mkdir -p %s" % pdir) + with open(join(pdir, "index.html"), "w") as ref: ref.write("\n") for e in sorted(pkg_errs[p]): ref.write("

%s:

\n" % e) for inc in sorted(errs[e].keys()): - url = 'https://github.com/cms-sw/cmssw/blob/%s/%s#L%s' % (environ['CMSSW_VERSION'], e, errs[e][inc]) + url = "https://github.com/cms-sw/cmssw/blob/%s/%s#L%s" % ( + environ["CMSSW_VERSION"], + e, + errs[e][inc], + ) ref.write('%s
\n' % (url, inc)) ref.write("
\n") ref.write("\n") - dump(all_count, open(outdir + '/summary.json', 'w'), indent=2, sort_keys=True, separators=(',', ': ')) + dump( + all_count, + open(outdir + "/summary.json", "w"), + indent=2, + sort_keys=True, + separators=(",", ": "), + ) diff --git a/cms-filename-checks.py b/cms-filename-checks.py index 5ce5f3d110f3..507927109389 100755 --- a/cms-filename-checks.py +++ b/cms-filename-checks.py @@ -2,18 +2,23 @@ from __future__ import print_function from sys import argv from os.path import join, exists + exceptions_regexp = [] uniq_paths = [] -for file_path in [ f.strip("\n").strip("/") for f in open(argv[1]).readlines()]: - if not file_path or [ r for r in exceptions_regexp if r.match(file_path) ] : continue - xpath = "" - for sub_path in file_path.split('/'): - xpath=join(xpath,sub_path) - if not sub_path[:1].isdigit(): continue - #If it exists then we allow to have files with [0-9] under it - if exists(join(argv[2],xpath)): break - if not xpath in uniq_paths: uniq_paths.append(xpath) - break +for file_path in [f.strip("\n").strip("/") for f in open(argv[1]).readlines()]: + if not file_path or [r for r in exceptions_regexp if r.match(file_path)]: + continue + xpath = "" + for sub_path in file_path.split("/"): + xpath = join(xpath, sub_path) + if not sub_path[:1].isdigit(): + continue + # If it exists then we allow to have files with [0-9] under it + if exists(join(argv[2], xpath)): + break + if not xpath in uniq_paths: + uniq_paths.append(xpath) + break if uniq_paths: - print("\n".join(uniq_paths)) + print("\n".join(uniq_paths)) diff --git a/cms-jenkins-api b/cms-jenkins-api deleted file mode 100755 index 3cbf0a4b3a61..000000000000 --- a/cms-jenkins-api +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python -# A script to communicate with jenkins in a secure way. -from __future__ import print_function -import json -import sys -import os -from _py2with3compatibility import run_cmd -from argparse import ArgumentParser - -COOKIE_JAR="~/private/ssocookie.txt" -JENKINS_URL="https://cmssdt.cern.ch/jenkins" -TOKEN_CMD="cern-get-sso-cookie --krb -u %(url)s -o %(jar)s" -API_CMD="curl -k -L --cookie %(jar)s --cookie-jar %(jar)s -X POST %(url)s/%(api)s --data-urlencode json='%(json)s' --user cmsbuild:%(token)s" - -def format(s, **kwds): - return s % kwds - -if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument("api", nargs=1, help="The api call to make.") - parser.add_argument("--url", dest="url", default=JENKINS_URL, help="The jenkins server.") - parser.add_argument("--cookie-jar", dest="jar", default=COOKIE_JAR, help="Where to find the cookie jar.") - parser.add_argument("args", nargs="*", help="Key value pair arguments") - args = parser.parse_args() - err, out = run_cmd(format(TOKEN_CMD, url=JENKINS_URL, jar=COOKIE_JAR)) - if err: - parser.error("Unable to get token") - - print(args.api[0]) - json = json.dumps({"parameter": [dict(list(zip(["name", "value"], x.split("=")))) for x in args.args]}) - print(json) - cmd = format(API_CMD, url=args.url, jar=args.jar, api=args.api[0], json=json, token=os.getenv("HUBOT_JENKINS_TOKEN")) - print(cmd) - err, out = run_cmd(cmd) - if err: - print(out) - sys.exit(1) - print(out) - sys.exit(0) diff --git a/cms-jenkins-api b/cms-jenkins-api new file mode 120000 index 000000000000..45c70288c5be --- /dev/null +++ b/cms-jenkins-api @@ -0,0 +1 @@ +cms-jenkins-api.py \ No newline at end of file diff --git a/cms-jenkins-api.py b/cms-jenkins-api.py new file mode 100755 index 000000000000..8a952cc58371 --- /dev/null +++ b/cms-jenkins-api.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# A script to communicate with jenkins in a secure way. +from __future__ import print_function +import json +import sys +import os +from _py2with3compatibility import run_cmd +from argparse import ArgumentParser + +COOKIE_JAR = "~/private/ssocookie.txt" +JENKINS_URL = "https://cmssdt.cern.ch/jenkins" +TOKEN_CMD = "cern-get-sso-cookie --krb -u %(url)s -o %(jar)s" +API_CMD = "curl -k -L --cookie %(jar)s --cookie-jar %(jar)s -X POST %(url)s/%(api)s --data-urlencode json='%(json)s' --user cmsbuild:%(token)s" + + +def format(s, **kwds): + return s % kwds + + +if __name__ == "__main__": + parser = ArgumentParser() + parser.add_argument("api", nargs=1, help="The api call to make.") + parser.add_argument("--url", dest="url", default=JENKINS_URL, help="The jenkins server.") + parser.add_argument( + "--cookie-jar", dest="jar", default=COOKIE_JAR, help="Where to find the cookie jar." + ) + parser.add_argument("args", nargs="*", help="Key value pair arguments") + args = parser.parse_args() + err, out = run_cmd(format(TOKEN_CMD, url=JENKINS_URL, jar=COOKIE_JAR)) + if err: + parser.error("Unable to get token") + + print(args.api[0]) + json = json.dumps( + {"parameter": [dict(list(zip(["name", "value"], x.split("=")))) for x in args.args]} + ) + print(json) + cmd = format( + API_CMD, + url=args.url, + jar=args.jar, + api=args.api[0], + json=json, + token=os.getenv("HUBOT_JENKINS_TOKEN"), + ) + print(cmd) + err, out = run_cmd(cmd) + if err: + print(out) + sys.exit(1) + print(out) + sys.exit(0) diff --git a/cms_static.py b/cms_static.py index e3bb77e0b5dc..63036c44467c 100644 --- a/cms_static.py +++ b/cms_static.py @@ -1,16 +1,28 @@ -GH_CMSSW_ORGANIZATION = 'cms-sw' -GH_CMSSW_REPO = 'cmssw' -GH_CMSDIST_REPO = 'cmsdist' -BUILD_REL = '^[Bb]uild[ ]+(CMSSW_[^ ]+)' -CREATE_REPO = '^[Cc]reate[ ]+repository[ ]+([A-Z][0-9A-Za-z]+)[-/]([a-zA-Z][0-9A-Za-z]+)' -NEW_ISSUE_PREFIX = 'A new Issue was created by ' -NEW_PR_PREFIX = 'A new Pull Request was created by ' -ISSUE_SEEN_MSG = '^A new (Pull Request|Issue) was created by ' +GH_CMSSW_ORGANIZATION = "cms-sw" +GH_CMSSW_REPO = "cmssw" +GH_CMSDIST_REPO = "cmsdist" +BUILD_REL = "^[Bb]uild[ ]+(CMSSW_[^ ]+)" +CREATE_REPO = "^[Cc]reate[ ]+repository[ ]+([A-Z][0-9A-Za-z]+)[-/]([a-zA-Z][0-9A-Za-z]+)" +NEW_ISSUE_PREFIX = "A new Issue was created by " +NEW_PR_PREFIX = "A new Pull Request was created by " +ISSUE_SEEN_MSG = "^A new (Pull Request|Issue) was created by " VALID_CMSDIST_BRANCHES = "^IB/CMSSW_.+$" -BACKPORT_STR ="- Backported from #" -CMSBUILD_GH_USER ="cmsbuild" -CMSBOT_IGNORE_MSG= "\s*" -CMSBOT_NO_NOTIFY_MSG= "\s*" -VALID_CMS_SW_REPOS_FOR_TESTS = ["cmssw", "cmsdist", "cms-bot","root", "cmssw-config", - "pkgtools", "SCRAM", "cmssw-osenv", "cms-git-tools", - "cms-common","cms_oracleocci_abi_hack","siteconf", 'scram-tools'] +BACKPORT_STR = "- Backported from #" +CMSBUILD_GH_USER = "cmsbuild" +CMSBOT_IGNORE_MSG = "\s*" +CMSBOT_NO_NOTIFY_MSG = "\s*" +VALID_CMS_SW_REPOS_FOR_TESTS = [ + "cmssw", + "cmsdist", + "cms-bot", + "root", + "cmssw-config", + "pkgtools", + "SCRAM", + "cmssw-osenv", + "cms-git-tools", + "cms-common", + "cms_oracleocci_abi_hack", + "siteconf", + "scram-tools", +] diff --git a/cmsdist-comp-pr-process.py b/cmsdist-comp-pr-process.py index 2f903e9c6530..7c70668cc283 100755 --- a/cmsdist-comp-pr-process.py +++ b/cmsdist-comp-pr-process.py @@ -1,90 +1,118 @@ #!/usr/bin/env python3 import sys -from sys import exit,argv +from sys import exit, argv from re import match from github import Github from os.path import expanduser, dirname, abspath, join, exists from optparse import OptionParser from socket import setdefaulttimeout + setdefaulttimeout(120) SCRIPT_DIR = dirname(abspath(argv[0])) + def mark_pr_ready_for_test(repo, pr, context="cmsbot/test"): try: latest_commit = pr.get_commits().reversed[0].sha # Get the latest commit SHA repo.get_commit(latest_commit).create_status( - state="success", - context=context, - description="Ready for testing" + state="success", context=context, description="Ready for testing" ) print("Commit status marked as 'Ready for testing'") except GithubException as e: print(f"Failed to mark commit status: {e}") + def process_pr(gh, repo, issue, dryRun): - from cmsdist_merge_permissions import USERS_TO_TRIGGER_HOOKS, getCommentCommand, hasRights - print("Issue state:", issue.state) - prId = issue.number - pr = None - branch = None - cmdType = None - chg_files= [] - if issue.pull_request: - pr = repo.get_pull(prId) - branch = pr.base.ref - print("PR merged:", pr.merged) - if pr.merged: return True - from process_pr import get_changed_files - chg_files = get_changed_files(repo, pr) - USERS_TO_TRIGGER_HOOKS.add("cmsbuild") - for comment in issue.get_comments(): - commenter = comment.user.login - if not commenter in USERS_TO_TRIGGER_HOOKS: continue - comment_msg = comment.body.encode("ascii", "ignore").decode() - comment_lines = [ l.strip() for l in comment_msg.split("\n") if l.strip() ][0:1] - print("Comment first line: %s => %s" % (commenter, comment_lines)) - if not comment_lines: continue - first_line = comment_lines[0] - if commenter == "cmsbuild": - if not cmdType: continue - if match("^Command\s+"+cmdType+"\s+acknowledged.$",first_line): - print("Acknowledged ",cmdType) - cmdType = None - continue - cmd = getCommentCommand(first_line) - if not cmd: continue - if (cmd == "ping") and cmdType: continue - if (cmd == "test") and cmdType: continue - if cmd == "merge" and not pr: continue - if not hasRights (commenter, branch, cmd, chg_files): continue - cmdType = cmd - print("Found: Command %s issued by %s" % (cmdType, commenter)) - if not cmdType: return True - print("Processing ",cmdType) - if dryRun: return True - if issue.state == "open": - if cmdType == "merge": pr.merge() - if cmdType == "close": issue.edit(state="closed") - if cmdType == "test": mark_pr_ready_for_test(repo, pr) - elif cmdType == "open": issue.edit(state="open") - issue.create_comment("Command "+cmdType+" acknowledged.") - return True + from cmsdist_merge_permissions import USERS_TO_TRIGGER_HOOKS, getCommentCommand, hasRights + + print("Issue state:", issue.state) + prId = issue.number + pr = None + branch = None + cmdType = None + chg_files = [] + if issue.pull_request: + pr = repo.get_pull(prId) + branch = pr.base.ref + print("PR merged:", pr.merged) + if pr.merged: + return True + from process_pr import get_changed_files + + chg_files = get_changed_files(repo, pr) + USERS_TO_TRIGGER_HOOKS.add("cmsbuild") + for comment in issue.get_comments(): + commenter = comment.user.login + if not commenter in USERS_TO_TRIGGER_HOOKS: + continue + comment_msg = comment.body.encode("ascii", "ignore").decode() + comment_lines = [l.strip() for l in comment_msg.split("\n") if l.strip()][0:1] + print("Comment first line: %s => %s" % (commenter, comment_lines)) + if not comment_lines: + continue + first_line = comment_lines[0] + if commenter == "cmsbuild": + if not cmdType: + continue + if match("^Command\s+" + cmdType + "\s+acknowledged.$", first_line): + print("Acknowledged ", cmdType) + cmdType = None + continue + cmd = getCommentCommand(first_line) + if not cmd: + continue + if (cmd == "ping") and cmdType: + continue + if (cmd == "test") and cmdType: + continue + if cmd == "merge" and not pr: + continue + if not hasRights(commenter, branch, cmd, chg_files): + continue + cmdType = cmd + print("Found: Command %s issued by %s" % (cmdType, commenter)) + if not cmdType: + return True + print("Processing ", cmdType) + if dryRun: + return True + if issue.state == "open": + if cmdType == "merge": + pr.merge() + if cmdType == "close": + issue.edit(state="closed") + if cmdType == "test": + mark_pr_ready_for_test(repo, pr) + elif cmdType == "open": + issue.edit(state="open") + issue.create_comment("Command " + cmdType + " acknowledged.") + return True + if __name__ == "__main__": - parser = OptionParser(usage="%prog ") - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) - #parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmsdist.", type=str, default="cms-sw/cmsdist") - opts, args = parser.parse_args() + parser = OptionParser(usage="%prog ") + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, + ) + # parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmsdist.", type=str, default="cms-sw/cmsdist") + opts, args = parser.parse_args() + + if len(args) != 1: + parser.error("Too many/few arguments") + prId = int(args[0]) - if len(args) != 1: - parser.error("Too many/few arguments") - prId = int(args[0]) - - repo_dir = join(SCRIPT_DIR,'repos',"cms-sw/cmsdist".replace("-","_")) - if exists(join(repo_dir,"repo_config.py")): sys.path.insert(0,repo_dir) - import repo_config + repo_dir = join(SCRIPT_DIR, "repos", "cms-sw/cmsdist".replace("-", "_")) + if exists(join(repo_dir, "repo_config.py")): + sys.path.insert(0, repo_dir) + import repo_config - gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) - repo = gh.get_repo("cms-sw/cmsdist") - if not process_pr(gh, repo, repo.get_issue(prId), opts.dryRun): exit(1) - exit (0) + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) + repo = gh.get_repo("cms-sw/cmsdist") + if not process_pr(gh, repo, repo.get_issue(prId), opts.dryRun): + exit(1) + exit(0) diff --git a/cmsdist_merge_permissions.py b/cmsdist_merge_permissions.py index 424424ce348c..535c7352804b 100644 --- a/cmsdist_merge_permissions.py +++ b/cmsdist_merge_permissions.py @@ -1,53 +1,70 @@ -#cmsdist/comp rules -from re import match,IGNORECASE +# cmsdist/comp rules +from re import match, IGNORECASE -CMSSW_BRANCHES = "^IB/CMSSW_.+$" -ALL_BRANCHES = ".+" +CMSSW_BRANCHES = "^IB/CMSSW_.+$" +ALL_BRANCHES = ".+" COMP_BRANCHES = "^comp_gcc.+$" -#CMSDIST_PERMISSIONS: format -#gh-user: [command-type, regexp-valid-branch, regexp-invalid-branch, regexp-changed-files-for-merge-command] +# CMSDIST_PERMISSIONS: format +# gh-user: [command-type, regexp-valid-branch, regexp-invalid-branch, regexp-changed-files-for-merge-command] CMSDIST_PERMISSIONS = { - "muhammadimranfarooqi" : [ ".+", ALL_BRANCHES , CMSSW_BRANCHES, ".+" ], - "arooshap" : [ ".+", ALL_BRANCHES , CMSSW_BRANCHES, ".+" ], - "amaltaro" : [ ".+", COMP_BRANCHES , CMSSW_BRANCHES, ".+" ], - "todor-ivanov" : [ ".+", COMP_BRANCHES , CMSSW_BRANCHES, ".+" ], - "belforte" : [ ".+", COMP_BRANCHES , CMSSW_BRANCHES, ".+" ], - "mapellidario" : [ ".+", COMP_BRANCHES , CMSSW_BRANCHES, ".+" ], - "germanfgv" : [ ".+", COMP_BRANCHES , CMSSW_BRANCHES, ".+" ], + "muhammadimranfarooqi": [".+", ALL_BRANCHES, CMSSW_BRANCHES, ".+"], + "arooshap": [".+", ALL_BRANCHES, CMSSW_BRANCHES, ".+"], + "amaltaro": [".+", COMP_BRANCHES, CMSSW_BRANCHES, ".+"], + "todor-ivanov": [".+", COMP_BRANCHES, CMSSW_BRANCHES, ".+"], + "belforte": [".+", COMP_BRANCHES, CMSSW_BRANCHES, ".+"], + "mapellidario": [".+", COMP_BRANCHES, CMSSW_BRANCHES, ".+"], + "germanfgv": [".+", COMP_BRANCHES, CMSSW_BRANCHES, ".+"], } VALID_COMMENTS = { - "^(please(\s*,|)\s+|)merge$" : "merge", - "^(please(\s*,|)\s+|)close$" : "close", - "^(please(\s*,|)\s+|)(re|)open$": "open", - "^ping$" : "ping", - "^(please(\s*,|)\s+|)test$" : "test", + "^(please(\s*,|)\s+|)merge$": "merge", + "^(please(\s*,|)\s+|)close$": "close", + "^(please(\s*,|)\s+|)(re|)open$": "open", + "^ping$": "ping", + "^(please(\s*,|)\s+|)test$": "test", } + def getCommentCommand(comment): - comment = comment.strip().lower() - for regex in VALID_COMMENTS: - if match(regex,comment,IGNORECASE): return VALID_COMMENTS[regex] - return None + comment = comment.strip().lower() + for regex in VALID_COMMENTS: + if match(regex, comment, IGNORECASE): + return VALID_COMMENTS[regex] + return None + def hasRights(user, branch, type, files=[]): - if not user in CMSDIST_PERMISSIONS: return False - if not match(CMSDIST_PERMISSIONS[user][0], type): return False - if branch: - reg = CMSDIST_PERMISSIONS[user][2] - if reg and match(reg,branch): return False - reg = CMSDIST_PERMISSIONS[user][1] - if not match(reg,branch): return False - if type=="merge": - for f in files: - if not match(CMSDIST_PERMISSIONS[user][3], f): return False - return True + if not user in CMSDIST_PERMISSIONS: + return False + if not match(CMSDIST_PERMISSIONS[user][0], type): + return False + if branch: + reg = CMSDIST_PERMISSIONS[user][2] + if reg and match(reg, branch): + return False + reg = CMSDIST_PERMISSIONS[user][1] + if not match(reg, branch): + return False + if type == "merge": + for f in files: + if not match(CMSDIST_PERMISSIONS[user][3], f): + return False + return True + def isValidWebHook(payload): - if (not payload['repository']['full_name'] in ['cms-sw/cmsdist']): return False - if (not payload['comment']['user']['login'] in CMSDIST_PERMISSIONS.keys()): return False - comment_lines = [ l.strip() for l in payload['comment']['body'].encode("ascii", "ignore").decode().split("\n") if l.strip() ][0:1] - if (not comment_lines) or (not getCommentCommand(comment_lines[0])): return False - return True + if not payload["repository"]["full_name"] in ["cms-sw/cmsdist"]: + return False + if not payload["comment"]["user"]["login"] in CMSDIST_PERMISSIONS.keys(): + return False + comment_lines = [ + l.strip() + for l in payload["comment"]["body"].encode("ascii", "ignore").decode().split("\n") + if l.strip() + ][0:1] + if (not comment_lines) or (not getCommentCommand(comment_lines[0])): + return False + return True + USERS_TO_TRIGGER_HOOKS = set(CMSDIST_PERMISSIONS.keys()) diff --git a/cmssw_known_errors.py b/cmssw_known_errors.py index 01809f53d4c1..72825838678a 100755 --- a/cmssw_known_errors.py +++ b/cmssw_known_errors.py @@ -6,148 +6,199 @@ MSG_ARCH_INCOMPETIBILITY = "Architecture incompetibility. GridPacks were built for x86_64" MSG_ASAN_INCOMPETIBILITY = "Grid-packs missing asan and non-asan shared libraries" MSG_TRITON_INCOMPETIBILITY = "Triton Server Instance for non-x86_64" -KNOWN_ERRORS = {"relvals":{}, "addons":{}, "unittests":{}} -KNOWN_ERRORS["relvals"]["CMSSW_9_[2-3]_.+"]={ - "slc._amd64_gcc630": { - "512.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "513.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "515.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "516.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "518.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "519.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "521.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "522.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "525.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "526.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "528.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "529.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "534.0": { "step": 1, "exitcode": 35584, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - } +KNOWN_ERRORS = {"relvals": {}, "addons": {}, "unittests": {}} +KNOWN_ERRORS["relvals"]["CMSSW_9_[2-3]_.+"] = { + "slc._amd64_gcc630": { + "512.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "513.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "515.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "516.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "518.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "519.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "521.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "522.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "525.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "526.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "528.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "529.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "534.0": {"step": 1, "exitcode": 35584, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + } } -KNOWN_ERRORS["relvals"]["CMSSW_9_[4-9]_.+"]=deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_9_[2-3]_.+"]) +KNOWN_ERRORS["relvals"]["CMSSW_9_[4-9]_.+"] = deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_9_[2-3]_.+"]) KNOWN_ERRORS["relvals"]["CMSSW_9_[4-9]_.+"]["slc._amd64_gcc630"].pop("534.0", None) -#10.0.X and 10.1.X -KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]={} -KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc630"]=deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_9_[4-9]_.+"]["slc._amd64_gcc630"]) -KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_amd64_gcc700"]={ - "514.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "517.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "520.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "523.0": { "step": 1, "exitcode": 31744, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "524.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "527.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "530.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "551.0": { "step": 1, "exitcode": 31744, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "552.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "554.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "555.0": { "step": 1, "exitcode": 31744, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "556.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "562.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "1360.0": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "1361.0": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "1362.0": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "1363.0": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "1361.17": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "1362.17": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "1363.17": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "25210.0": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "25211.0": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "25212.0": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "25213.0": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "25211.17": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "25212.17": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "25213.17": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, +# 10.0.X and 10.1.X +KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"] = {} +KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc630"] = deepcopy( + KNOWN_ERRORS["relvals"]["CMSSW_9_[4-9]_.+"]["slc._amd64_gcc630"] +) +KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_amd64_gcc700"] = { + "514.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "517.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "520.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "523.0": {"step": 1, "exitcode": 31744, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "524.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "527.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "530.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "551.0": {"step": 1, "exitcode": 31744, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "552.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "554.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "555.0": {"step": 1, "exitcode": 31744, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "556.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "562.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "1360.0": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "1361.0": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "1362.0": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "1363.0": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "1361.17": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "1362.17": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "1363.17": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "25210.0": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "25211.0": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "25212.0": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "25213.0": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "25211.17": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "25212.17": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "25213.17": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, } -KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"]=deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_amd64_gcc700"]) -KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"]["534.0"]={ "step": 1, "exitcode": 256, "reason" : MSG_GCC_ABI_INCOMPETIBILITY} -KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc700"]={ - "512.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "513.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "515.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "516.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "518.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "519.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "521.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "522.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "525.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "526.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "528.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "529.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, +KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"] = deepcopy( + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_amd64_gcc700"] +) +KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"]["534.0"] = { + "step": 1, + "exitcode": 256, + "reason": MSG_GCC_ABI_INCOMPETIBILITY, +} +KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc700"] = { + "512.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "513.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "515.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "516.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "518.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "519.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "521.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "522.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "525.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "526.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "528.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "529.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, } for wf in KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc700"]: - KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"][wf] = deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc700"][wf]) + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"][wf] = deepcopy( + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc700"][wf] + ) for wf in KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"]: - KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"][wf]["reason"]=MSG_ARCH_INCOMPETIBILITY - -for xwf in ["136","2521"]: - for wf in KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"]: - if wf.startswith(xwf): KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"][wf]["exitcode"]=64000 - -#10.2 -RelFilter="CMSSW_10_2_.+" -KNOWN_ERRORS["relvals"][RelFilter]={} -KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"] = deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"]) -KNOWN_ERRORS["relvals"][RelFilter]["slc7_amd64_gcc630"]= deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc630"]) -KNOWN_ERRORS["relvals"][RelFilter]["slc7_amd64_gcc[7-9][0-9]+"]= deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc700"]) -for wf in ["523.0", "551.0","555.0","562.0","1360.0","25210.0"]: - KNOWN_ERRORS["relvals"][RelFilter]["slc7_amd64_gcc[7-9][0-9]+"][wf]=deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_amd64_gcc700"][wf]) - -KNOWN_ERRORS["relvals"][RelFilter]["slc6_amd64_gcc[7-9][0-9]+"]= { - "523.0": { "step": 1, "exitcode": 31744, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "551.0": { "step": 1, "exitcode": 31744, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "555.0": { "step": 1, "exitcode": 31744, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "562.0": { "step": 1, "exitcode": 16640, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "1360.0": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, - "25210.0": { "step": 1, "exitcode": 34304, "reason" : MSG_GCC_ABI_INCOMPETIBILITY}, + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"][wf][ + "reason" + ] = MSG_ARCH_INCOMPETIBILITY + +for xwf in ["136", "2521"]: + for wf in KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"]: + if wf.startswith(xwf): + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"][wf]["exitcode"] = 64000 + +# 10.2 +RelFilter = "CMSSW_10_2_.+" +KNOWN_ERRORS["relvals"][RelFilter] = {} +KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"] = deepcopy( + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"] +) +KNOWN_ERRORS["relvals"][RelFilter]["slc7_amd64_gcc630"] = deepcopy( + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc630"] +) +KNOWN_ERRORS["relvals"][RelFilter]["slc7_amd64_gcc[7-9][0-9]+"] = deepcopy( + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"]["slc7_amd64_gcc700"] +) +for wf in ["523.0", "551.0", "555.0", "562.0", "1360.0", "25210.0"]: + KNOWN_ERRORS["relvals"][RelFilter]["slc7_amd64_gcc[7-9][0-9]+"][wf] = deepcopy( + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_amd64_gcc700"][wf] + ) + +KNOWN_ERRORS["relvals"][RelFilter]["slc6_amd64_gcc[7-9][0-9]+"] = { + "523.0": {"step": 1, "exitcode": 31744, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "551.0": {"step": 1, "exitcode": 31744, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "555.0": {"step": 1, "exitcode": 31744, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "562.0": {"step": 1, "exitcode": 16640, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "1360.0": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, + "25210.0": {"step": 1, "exitcode": 34304, "reason": MSG_GCC_ABI_INCOMPETIBILITY}, } -#10.3 -RelFilter="CMSSW_10_3_.+" -KNOWN_ERRORS["relvals"][RelFilter]={} -KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"] = deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"]) +# 10.3 +RelFilter = "CMSSW_10_3_.+" +KNOWN_ERRORS["relvals"][RelFilter] = {} +KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"] = deepcopy( + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"] +) -#10.4 and above -RelFilter="CMSSW_(10_([4-9]|[1-9][0-9]+)|1[1-9]|[2-9][0-9]|[1-9][0-9][0-9]+)_.+" -KNOWN_ERRORS["relvals"][RelFilter]={} -KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"] = deepcopy(KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"]) -for wf in ["535.0", "536.0", "537.0", "538.0", "547.0", "548.0", "573.0", "1361.18", "1361.181", "1362.18", "1363.18", "25211.18", "25212.18", "25213.18"]: - KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"][wf] = deepcopy(KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"]["512.0"]) -KNOWN_ERRORS["relvals"][RelFilter][".+_ppc64le_.+"] = deepcopy(KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"]) +# 10.4 and above +RelFilter = "CMSSW_(10_([4-9]|[1-9][0-9]+)|1[1-9]|[2-9][0-9]|[1-9][0-9][0-9]+)_.+" +KNOWN_ERRORS["relvals"][RelFilter] = {} +KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"] = deepcopy( + KNOWN_ERRORS["relvals"]["CMSSW_10_[0-1]_.+"][".+_aarch64_.+"] +) +for wf in [ + "535.0", + "536.0", + "537.0", + "538.0", + "547.0", + "548.0", + "573.0", + "1361.18", + "1361.181", + "1362.18", + "1363.18", + "25211.18", + "25212.18", + "25213.18", +]: + KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"][wf] = deepcopy( + KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"]["512.0"] + ) +KNOWN_ERRORS["relvals"][RelFilter][".+_ppc64le_.+"] = deepcopy( + KNOWN_ERRORS["relvals"][RelFilter][".+_aarch64_.+"] +) -RelFilter="CMSSW_[0-9]+_[0-9]+_ASAN_X_.+" +RelFilter = "CMSSW_[0-9]+_[0-9]+_ASAN_X_.+" KNOWN_ERRORS["relvals"][RelFilter] = {} KNOWN_ERRORS["relvals"][RelFilter][".+"] = { - "511.0": { "step": 1, "exitcode": 31744, "reason" : MSG_ASAN_INCOMPETIBILITY}, - "534.0": { "step": 1, "exitcode": 256, "reason" : MSG_ASAN_INCOMPETIBILITY}, - "536.0": { "step": 1, "exitcode": 256, "reason" : MSG_ASAN_INCOMPETIBILITY}, + "511.0": {"step": 1, "exitcode": 31744, "reason": MSG_ASAN_INCOMPETIBILITY}, + "534.0": {"step": 1, "exitcode": 256, "reason": MSG_ASAN_INCOMPETIBILITY}, + "536.0": {"step": 1, "exitcode": 256, "reason": MSG_ASAN_INCOMPETIBILITY}, } -RelFilter="CMSSW_(12|1[2-9]|[2-9][0-9]|[1-9][0-9][0-9]+)_.+" +RelFilter = "CMSSW_(12|1[2-9]|[2-9][0-9]|[1-9][0-9][0-9]+)_.+" KNOWN_ERRORS["relvals"][RelFilter] = {} KNOWN_ERRORS["relvals"][RelFilter][".+_(aarch64|ppc64le)_.+"] = { - "10804.31": { "step": 3, "exitcode": 16640, "reason" : MSG_TRITON_INCOMPETIBILITY}, - "10805.31": { "step": 3, "exitcode": 16640, "reason" : MSG_TRITON_INCOMPETIBILITY}, + "10804.31": {"step": 3, "exitcode": 16640, "reason": MSG_TRITON_INCOMPETIBILITY}, + "10805.31": {"step": 3, "exitcode": 16640, "reason": MSG_TRITON_INCOMPETIBILITY}, } + def get_known_errors(release, architecture, test_type): - if not test_type in KNOWN_ERRORS: return {} - from re import match - errs = {} - for rel in KNOWN_ERRORS[test_type]: - if not match(rel,release): continue - for arch in KNOWN_ERRORS[test_type][rel]: - if not match(arch,architecture): continue - for test in KNOWN_ERRORS[test_type][rel][arch]: - obj = KNOWN_ERRORS[test_type][rel][arch][test] - if not obj: - if test in errs: del errs[test] - else: - errs[test]=obj - return errs + if not test_type in KNOWN_ERRORS: + return {} + from re import match + + errs = {} + for rel in KNOWN_ERRORS[test_type]: + if not match(rel, release): + continue + for arch in KNOWN_ERRORS[test_type][rel]: + if not match(arch, architecture): + continue + for test in KNOWN_ERRORS[test_type][rel][arch]: + obj = KNOWN_ERRORS[test_type][rel][arch][test] + if not obj: + if test in errs: + del errs[test] + else: + errs[test] = obj + return errs + if __name__ == "__main__": - from json import dumps - print(dumps(KNOWN_ERRORS,sort_keys=True,indent=2)) + from json import dumps + print(dumps(KNOWN_ERRORS, sort_keys=True, indent=2)) diff --git a/cmssw_l2/commit.txt b/cmssw_l2/commit.txt index 91ceea555b3b..8ca11a5c2ec7 100644 --- a/cmssw_l2/commit.txt +++ b/cmssw_l2/commit.txt @@ -62,3 +62,5 @@ d72ca15c6ea00017a43fb81fc0d254dea1a0df61 f78417506ff4cba5415943816df6ecfb2972dfcf 26915ad78be57b64f2639f1aa7a3b8d0b5131e3d df758b8740fb65e3c60cf10a777e43bbaa022842 +2fd4e6d6fbfa415e108c64888b0caf2ff33972b8 +204bd9234753983de7203b45644dbcc49f62956e diff --git a/cmssw_l2/l2.json b/cmssw_l2/l2.json index 24690ab4337f..f2e101df7b2c 100644 --- a/cmssw_l2/l2.json +++ b/cmssw_l2/l2.json @@ -1559,6 +1559,7 @@ "category": [ "hlt" ], + "end_date": 1698883200, "start_date": 1632096000 } ], diff --git a/cmssw_l2/update.py b/cmssw_l2/update.py index 2a6698881638..13ad2986d3b2 100755 --- a/cmssw_l2/update.py +++ b/cmssw_l2/update.py @@ -1,59 +1,68 @@ #!/usr/bin/env python from sys import argv, exit -from json import load,dump -try: from categories import CMSSW_L2 +from json import load, dump + +try: + from categories import CMSSW_L2 except Exception as e: - print("Python import error:",e) - exit(0) -try: from categories import CMSSW_L1 -except: CMSSW_L1 = {} + print("Python import error:", e) + exit(0) +try: + from categories import CMSSW_L1 +except: + CMSSW_L1 = {} l2_file = argv[1] -ctime=int(int(argv[2])/86400)*86400 +ctime = int(int(argv[2]) / 86400) * 86400 data = {} with open(l2_file) as ref: - data=load(ref) + data = load(ref) for u in CMSSW_L1: - if u not in CMSSW_L2: CMSSW_L2[u]=['orp'] - else: CMSSW_L2[u].append('orp') + if u not in CMSSW_L2: + CMSSW_L2[u] = ["orp"] + else: + CMSSW_L2[u].append("orp") -data_chg=False +data_chg = False for u in CMSSW_L2: - if u not in data: - data[u] = [{'start_date': ctime, 'category': CMSSW_L2[u]}] - data_chg=True - elif (set(CMSSW_L2[u])!=set(data[u][-1]['category'])): - if 'end_date' not in data[u][-1]: - data_chg=True - if (data[u][-1]['start_date'] == ctime): - data[u].pop() - if not data[u]: del data[u] - else: - data[u][-1]['end_date'] = ctime - if CMSSW_L2[u]: - data_chg=True - if u not in data: - data[u] = [{'start_date': ctime, 'category': CMSSW_L2[u]}] - elif (data[u][-1]['end_date'] == ctime) and (set(CMSSW_L2[u])==set(data[u][-1]['category'])): - del data[u][-1]['end_date'] - else: - data[u].append({'start_date': ctime, 'category': CMSSW_L2[u]}) - elif ('end_date' in data[u][-1]): - data[u].append({'start_date': ctime, 'category': CMSSW_L2[u]}) - data_chg=True + if u not in data: + data[u] = [{"start_date": ctime, "category": CMSSW_L2[u]}] + data_chg = True + elif set(CMSSW_L2[u]) != set(data[u][-1]["category"]): + if "end_date" not in data[u][-1]: + data_chg = True + if data[u][-1]["start_date"] == ctime: + data[u].pop() + if not data[u]: + del data[u] + else: + data[u][-1]["end_date"] = ctime + if CMSSW_L2[u]: + data_chg = True + if u not in data: + data[u] = [{"start_date": ctime, "category": CMSSW_L2[u]}] + elif (data[u][-1]["end_date"] == ctime) and ( + set(CMSSW_L2[u]) == set(data[u][-1]["category"]) + ): + del data[u][-1]["end_date"] + else: + data[u].append({"start_date": ctime, "category": CMSSW_L2[u]}) + elif "end_date" in data[u][-1]: + data[u].append({"start_date": ctime, "category": CMSSW_L2[u]}) + data_chg = True for u in data: - if (u not in CMSSW_L2) and ('end_date' not in data[u][-1]): - data[u][-1]['end_date'] = ctime - data_chg=True + if (u not in CMSSW_L2) and ("end_date" not in data[u][-1]): + data[u][-1]["end_date"] = ctime + data_chg = True for u in CMSSW_L2: - if (u in data) and ('end_date' in data[u][-1]): - del data[u][-1]['end_date'] - data_chg = True + if (u in data) and ("end_date" in data[u][-1]): + del data[u][-1]["end_date"] + data_chg = True if data_chg: - print(" Updated L2") - with open(l2_file, "w") as ref: - dump(data, ref, sort_keys=True, indent=2) + print(" Updated L2") + with open(l2_file, "w") as ref: + dump(data, ref, sort_keys=True, indent=2) diff --git a/cmsutils.py b/cmsutils.py index 4ea3d840c080..308ea1b99564 100644 --- a/cmsutils.py +++ b/cmsutils.py @@ -7,113 +7,144 @@ from os.path import dirname, abspath try: - CMS_BOT_DIR = dirname(abspath(__file__)) + CMS_BOT_DIR = dirname(abspath(__file__)) except Exception as e: - from sys import argv - CMS_BOT_DIR = dirname( abspath(argv[0])) + from sys import argv + + CMS_BOT_DIR = dirname(abspath(argv[0])) + def getHostDomain(): - site = '' + site = "" import socket + site = socket.getfqdn() - fqdn = site.split('.') + fqdn = site.split(".") hname = fqdn[0] - dname = 'cern.ch' - if len(fqdn)>2: dname = fqdn[-2]+'.'+fqdn[-1] + dname = "cern.ch" + if len(fqdn) > 2: + dname = fqdn[-2] + "." + fqdn[-1] return hname, dname + def getDomain(): return getHostDomain()[1] + def getHostName(): return getHostDomain()[0] + def _getCPUCount(): cmd = "nproc" if platform == "darwin": - cmd = "sysctl -n hw.ncpu" + cmd = "sysctl -n hw.ncpu" error, count = run_cmd(cmd) if error: - print("Warning: unable to detect cpu count. Using 4 as default value") - out = "4" + print("Warning: unable to detect cpu count. Using 4 as default value") + out = "4" if not count.isdigit(): - return 4 + return 4 return int(count) + def _memorySizeGB(): cmd = "" if platform == "darwin": - cmd = "sysctl -n hw.memsize" + cmd = "sysctl -n hw.memsize" elif platform.startswith("linux"): - cmd = "free -t -m | grep '^Mem: *' | awk '{print $2}'" + cmd = "free -t -m | grep '^Mem: *' | awk '{print $2}'" error, out = run_cmd(cmd) if error: - print("Warning: unable to detect memory info. Using 8GB as default value") - return 8 + print("Warning: unable to detect memory info. Using 8GB as default value") + return 8 if not out.isdigit(): - return 8 + return 8 from math import ceil - count = int(ceil(float(out)/1024)) - if count == 0: count =1 + + count = int(ceil(float(out) / 1024)) + if count == 0: + count = 1 return count + MachineMemoryGB = _memorySizeGB() MachineCPUCount = _getCPUCount() + def _compilationProcesses(): count = MachineCPUCount * 2 - if MachineMemoryGB "+asctime()+ " in ", getcwd() ," executing ", cmd) - else: + if not inDir: + if debug: + print("--> " + asctime() + " in ", getcwd(), " executing ", cmd) + else: + if debug: + print("--> " + asctime() + " in " + inDir + " executing ", cmd) + cmd = "cd " + inDir + "; " + cmd + sys.stdout.flush() + sys.stderr.flush() + start = time() + ret = 0 + outX = "" + while cmd.endswith(";"): + cmd = cmd[:-1] + if dryRun: + print("DryRun for: " + cmd) + else: + ret, outX = run_cmd(cmd) + if debug: + print(outX) + stop = time() if debug: - print("--> "+asctime()+ " in " + inDir + " executing ", cmd) - cmd = "cd " + inDir + "; "+cmd - sys.stdout.flush() - sys.stderr.flush() - start = time() - ret = 0 - outX = "" - while cmd.endswith(";"): cmd=cmd[:-1] - if dryRun: - print("DryRun for: "+cmd) - else: - ret, outX = run_cmd(cmd) - if debug: - print(outX) - stop = time() - if debug: - print("--> "+asctime()+" cmd took", stop-start, "sec. ("+strftime("%H:%M:%S",gmtime(stop-start))+")") - sys.stdout.flush() - sys.stderr.flush() - return (ret,outX) + print( + "--> " + asctime() + " cmd took", + stop - start, + "sec. (" + strftime("%H:%M:%S", gmtime(stop - start)) + ")", + ) + sys.stdout.flush() + sys.stderr.flush() + return (ret, outX) + def getIBReleaseInfo(rel): - m = re.match("^CMSSW_(\d+_\d+(_[A-Z][A-Za-z0-9]+|))_X(_[A-Z]+|)_(\d\d\d\d-\d\d-\d\d-(\d\d)\d\d)",rel) - if not m: return ("","","") - rc = m.group(1).replace("_",".") - from datetime import datetime - day = datetime.strptime(m.group(4),"%Y-%m-%d-%H%M").strftime("%a").lower() - hour = m.group(5) - return (rc, day, hour) + m = re.match( + "^CMSSW_(\d+_\d+(_[A-Z][A-Za-z0-9]+|))_X(_[A-Z]+|)_(\d\d\d\d-\d\d-\d\d-(\d\d)\d\d)", rel + ) + if not m: + return ("", "", "") + rc = m.group(1).replace("_", ".") + from datetime import datetime + + day = datetime.strptime(m.group(4), "%Y-%m-%d-%H%M").strftime("%a").lower() + hour = m.group(5) + return (rc, day, hour) + def cmsswIB2Week(release): - from datetime import datetime - rel_sec = int(datetime.strptime(release.split("_")[-1], '%Y-%m-%d-%H%M').strftime('%s')) - return (str(int(((rel_sec/86400)+4)/7)), rel_sec) + from datetime import datetime + + rel_sec = int(datetime.strptime(release.split("_")[-1], "%Y-%m-%d-%H%M").strftime("%s")) + return (str(int(((rel_sec / 86400) + 4) / 7)), rel_sec) + # # Reads config.map and returns a list of the architectures for which a release needs to be built. @@ -121,39 +152,48 @@ def cmsswIB2Week(release): # that the IBs are disabled. # def get_config_map_properties(filters=None): - CONFIG_MAP_FILE = CMS_BOT_DIR + '/config.map' - specs = [] - f = open( CONFIG_MAP_FILE , 'r' ) - lines = [l.strip(" \n\t;") for l in f.read().split("\n") if l.strip(" \n\t;")] - for line in lines: - entry = dict(x.split("=",1) for x in line.split(";") if x) - skip = False - if filters: - for k in filters: - if (k in entry) and (entry[k]==filters[k]): - skip = True - break - if not skip: specs.append(entry) - return specs + CONFIG_MAP_FILE = CMS_BOT_DIR + "/config.map" + specs = [] + f = open(CONFIG_MAP_FILE, "r") + lines = [l.strip(" \n\t;") for l in f.read().split("\n") if l.strip(" \n\t;")] + for line in lines: + entry = dict(x.split("=", 1) for x in line.split(";") if x) + skip = False + if filters: + for k in filters: + if (k in entry) and (entry[k] == filters[k]): + skip = True + break + if not skip: + specs.append(entry) + return specs + def percentile(percentage, data, dlen): - R=(dlen+1)*percentage/100.0 - IR=int(R) - if IR>=dlen: return data[-1] - elif IR==0: return data[0] - FR=int((R-IR)*100) - res = data[IR-1] - if FR>0: res=(FR/100.0)*(data[IR]-res)+res - return res + R = (dlen + 1) * percentage / 100.0 + IR = int(R) + if IR >= dlen: + return data[-1] + elif IR == 0: + return data[0] + FR = int((R - IR) * 100) + res = data[IR - 1] + if FR > 0: + res = (FR / 100.0) * (data[IR] - res) + res + return res + def get_full_release_archs(release_name): - data = {} - ret, out = run_cmd("grep 'label="+ release_name +";' "+ CMS_BOT_DIR+"/releases.map") - for line in out.split("\n"): - arch="" - prod=0 - for item in [x.split("=") for x in line.split(";")]: - if item[0]=="architecture": arch = item[1] - elif item[0]=="prodarch": prod=item[1] - if arch: data[arch]=prod - return data + data = {} + ret, out = run_cmd("grep 'label=" + release_name + ";' " + CMS_BOT_DIR + "/releases.map") + for line in out.split("\n"): + arch = "" + prod = 0 + for item in [x.split("=") for x in line.split(";")]: + if item[0] == "architecture": + arch = item[1] + elif item[0] == "prodarch": + prod = item[1] + if arch: + data[arch] = prod + return data diff --git a/comment-gh-pr b/comment-gh-pr deleted file mode 100755 index 53a3d298ff5d..000000000000 --- a/comment-gh-pr +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -""" -Makes a comment on Pull request on Github -""" -from __future__ import print_function -from github import Github -from os.path import expanduser, dirname, abspath, join, exists -from optparse import OptionParser -from sys import exit -import re, sys -from socket import setdefaulttimeout -setdefaulttimeout(120) -SCRIPT_DIR = dirname(abspath(sys.argv[0])) - - - -if __name__ == "__main__": - parser = OptionParser(usage="%prog -p|--pullrequest -m|--message [-r|--repository ] [-n|--dry-run]") - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) - parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw") - parser.add_option("-p", "--pullrequest", dest="pr", help="Github Pull Request Number e.g. 10500", type="int", metavar="N") - parser.add_option("-m", "--message", dest="msg", help="Message to be added for Github Pull Request", type="str") - parser.add_option("-R", "--report-file", dest="report_file",help="Message from the file to be added for Github Pull Request", type="str") - opts, args = parser.parse_args() - - - if not opts.pr: parser.error("Missing pull request number : -p|--pullrequest ") - msg = "" - if opts.msg: msg = re.sub("@N@","\n",opts.msg) - elif opts.report_file: msg = open(opts.report_file).read() - else: parser.error("Missing pull request message: -m|--message OR -R|--report-file ") - if opts.dryRun: - print("Addeding Comments:",msg) - else: - repo_dir = join(SCRIPT_DIR,'repos',opts.repository.replace("-","_")) - if exists(join(repo_dir,"repo_config.py")): sys.path.insert(0,repo_dir) - import repo_config - gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) - from github_utils import comment_gh_pr - try: - comment_gh_pr(gh, opts.repository, opts.pr, msg) - print("Added comment for %s#%s" % (opts.repository, opts.pr)) - print("Comment message:\n",msg) - except Exception as e: - print("Failed to add comment: ",e) - exit(1) diff --git a/comment-gh-pr b/comment-gh-pr new file mode 120000 index 000000000000..26e8bd02f8c1 --- /dev/null +++ b/comment-gh-pr @@ -0,0 +1 @@ +comment-gh-pr.py \ No newline at end of file diff --git a/comment-gh-pr.py b/comment-gh-pr.py new file mode 100755 index 000000000000..d6dc474f8dff --- /dev/null +++ b/comment-gh-pr.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +""" +Makes a comment on Pull request on Github +""" +from __future__ import print_function +from github import Github +from os.path import expanduser, dirname, abspath, join, exists +from optparse import OptionParser +from sys import exit +import re, sys +from socket import setdefaulttimeout + +setdefaulttimeout(120) +SCRIPT_DIR = dirname(abspath(sys.argv[0])) + + +if __name__ == "__main__": + parser = OptionParser( + usage="%prog -p|--pullrequest -m|--message [-r|--repository ] [-n|--dry-run]" + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, + ) + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default="cms-sw/cmssw", + ) + parser.add_option( + "-p", + "--pullrequest", + dest="pr", + help="Github Pull Request Number e.g. 10500", + type="int", + metavar="N", + ) + parser.add_option( + "-m", + "--message", + dest="msg", + help="Message to be added for Github Pull Request", + type="str", + ) + parser.add_option( + "-R", + "--report-file", + dest="report_file", + help="Message from the file to be added for Github Pull Request", + type="str", + ) + opts, args = parser.parse_args() + + if not opts.pr: + parser.error("Missing pull request number : -p|--pullrequest ") + msg = "" + if opts.msg: + msg = re.sub("@N@", "\n", opts.msg) + elif opts.report_file: + msg = open(opts.report_file).read() + else: + parser.error( + "Missing pull request message: -m|--message OR -R|--report-file " + ) + if opts.dryRun: + print("Addeding Comments:", msg) + else: + repo_dir = join(SCRIPT_DIR, "repos", opts.repository.replace("-", "_")) + if exists(join(repo_dir, "repo_config.py")): + sys.path.insert(0, repo_dir) + import repo_config + + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) + from github_utils import comment_gh_pr + + try: + comment_gh_pr(gh, opts.repository, opts.pr, msg) + print("Added comment for %s#%s" % (opts.repository, opts.pr)) + print("Comment message:\n", msg) + except Exception as e: + print("Failed to add comment: ", e) + exit(1) diff --git a/comp/create_wm_archs.py b/comp/create_wm_archs.py index 3a8a6c749262..3bb744c89da4 100755 --- a/comp/create_wm_archs.py +++ b/comp/create_wm_archs.py @@ -2,84 +2,95 @@ from subprocess import getstatusoutput from os.path import join, dirname, exists from sys import exit -CMS_CVMFS="/cvmfs/cms.cern.ch" -COMP_DIR="%s/COMP" % CMS_CVMFS + +CMS_CVMFS = "/cvmfs/cms.cern.ch" +COMP_DIR = "%s/COMP" % CMS_CVMFS ENV_FILE = join("etc", "profile.d", "init.sh") FUTURE_PKGS = ["py2-future", "py3-future"] -FUTURE_PKG="py3-future" -FUTURE_VERSION="0.18.2" +FUTURE_PKG = "py3-future" +FUTURE_VERSION = "0.18.2" WM_ARCHS = { - "rhel5_x86_64" : ["slc5_amd64_gcc434"], - "rhel7_x86_64" : ["slc7_amd64_gcc630"], - "rhel6_x86_64" : ["slc6_amd64_gcc700" , {"python3" : "3.6.4", "py2-future": "0.16.0"}], - "rhel7_ppc64le" : ["slc7_ppc64le_gcc820", {"python3" : "3.8.2", "py2-future": "0.18.2"}], - "rhel7_aarch64" : ["slc7_aarch64_gcc820", {"python3" : "3.8.2", "py2-future": "0.18.2"}], - "rhel8_x86_64" : ["cc8_amd64_gcc9" , {"python3" : "3.8.2", "py3-future": "0.18.2"}], - "rhel8_aarch64" : ["cc8_aarch64_gcc9" , {"python3" : "3.8.2", "py3-future": "0.18.2"}], - "rhel8_ppc64le" : ["cc8_ppc64le_gcc9" , {"python3" : "3.8.2", "py3-future": "0.18.2"}], - "rhel9_x86_64" : ["cs9_amd64_gcc11" , {"python3" : "3.9.6", "py3-future": "0.18.2"}], + "rhel5_x86_64": ["slc5_amd64_gcc434"], + "rhel7_x86_64": ["slc7_amd64_gcc630"], + "rhel6_x86_64": ["slc6_amd64_gcc700", {"python3": "3.6.4", "py2-future": "0.16.0"}], + "rhel7_ppc64le": ["slc7_ppc64le_gcc820", {"python3": "3.8.2", "py2-future": "0.18.2"}], + "rhel7_aarch64": ["slc7_aarch64_gcc820", {"python3": "3.8.2", "py2-future": "0.18.2"}], + "rhel8_x86_64": ["cc8_amd64_gcc9", {"python3": "3.8.2", "py3-future": "0.18.2"}], + "rhel8_aarch64": ["cc8_aarch64_gcc9", {"python3": "3.8.2", "py3-future": "0.18.2"}], + "rhel8_ppc64le": ["cc8_ppc64le_gcc9", {"python3": "3.8.2", "py3-future": "0.18.2"}], + "rhel9_x86_64": ["cs9_amd64_gcc11", {"python3": "3.9.6", "py3-future": "0.18.2"}], } + def runcmd(cmd, debug=True): - if debug: print("Running: ",cmd) - e, out = getstatusoutput(cmd) - if e: - print(out) - exit (1) - return out + if debug: + print("Running: ", cmd) + e, out = getstatusoutput(cmd) + if e: + print(out) + exit(1) + return out + def create_default_links(comp_ver, def_ver): - if not exists(def_ver): - cmd = "mkdir -p {0} && ln -s {1} {2}".format(dirname(def_ver), comp_ver, def_ver) + if not exists(def_ver): + cmd = "mkdir -p {0} && ln -s {1} {2}".format(dirname(def_ver), comp_ver, def_ver) + def find_cms_version(arch, pkg, ver): - out = runcmd("ls -d %s/%s/external/%s/%s*" % (CMS_CVMFS, arch,pkg,ver), debug=False) - mver="" - for v in out.split("\n"): - mver=v - if v.endswith(ver): break - return mver + out = runcmd("ls -d %s/%s/external/%s/%s*" % (CMS_CVMFS, arch, pkg, ver), debug=False) + mver = "" + for v in out.split("\n"): + mver = v + if v.endswith(ver): + break + return mver + def create_comp_package(arch, pkg, ver, cmspkg): - comp_pkg = join(arch, 'external', pkg) - comp_ver = join(comp_pkg, ver) - if pkg=="python3": - if not exists(comp_ver): - cmd = "ln -s {0} {1}".format(cmspkg, comp_ver) - if not exists(comp_pkg): - cmd = "mkdir -p {0} && {1}".format(comp_pkg, cmd) - runcmd(cmd) - elif pkg in FUTURE_PKGS: - comp_init = join(comp_ver, ENV_FILE) - if not exists(comp_init): - comp_dir = dirname(comp_init) - cms_init = join(cmspkg, ENV_FILE) - cmd = "cp -fp {0} {1} && sed -i -e '/dependencies-setup/d;/PYTHON27PATH=/d;/LD_LIBRARY_PATH=/d' {1}".format(cms_init, comp_init) - cmd+= " && sed -i -e 's/PYTHON3PATH/PYTHONPATH/g' {0}".format(comp_init) - if not exists(comp_dir): - cmd = "mkdir -p {0} && {1}".format(comp_dir, cmd) - runcmd(cmd) - def_future = join(arch, 'external', FUTURE_PKG, FUTURE_VERSION) - if not exists(def_future): - comp_ver = ver if FUTURE_PKG==pkg else "../%s/%s" % (pkg, ver) - runcmd("mkdir -p {0} && ln -s {1} {2}".format(dirname(def_future), comp_ver, def_future)) - else: - print("ERROR: Unknown package %s" % pkg) - return False - return True + comp_pkg = join(arch, "external", pkg) + comp_ver = join(comp_pkg, ver) + if pkg == "python3": + if not exists(comp_ver): + cmd = "ln -s {0} {1}".format(cmspkg, comp_ver) + if not exists(comp_pkg): + cmd = "mkdir -p {0} && {1}".format(comp_pkg, cmd) + runcmd(cmd) + elif pkg in FUTURE_PKGS: + comp_init = join(comp_ver, ENV_FILE) + if not exists(comp_init): + comp_dir = dirname(comp_init) + cms_init = join(cmspkg, ENV_FILE) + cmd = "cp -fp {0} {1} && sed -i -e '/dependencies-setup/d;/PYTHON27PATH=/d;/LD_LIBRARY_PATH=/d' {1}".format( + cms_init, comp_init + ) + cmd += " && sed -i -e 's/PYTHON3PATH/PYTHONPATH/g' {0}".format(comp_init) + if not exists(comp_dir): + cmd = "mkdir -p {0} && {1}".format(comp_dir, cmd) + runcmd(cmd) + def_future = join(arch, "external", FUTURE_PKG, FUTURE_VERSION) + if not exists(def_future): + comp_ver = ver if FUTURE_PKG == pkg else "../%s/%s" % (pkg, ver) + runcmd( + "mkdir -p {0} && ln -s {1} {2}".format(dirname(def_future), comp_ver, def_future) + ) + else: + print("ERROR: Unknown package %s" % pkg) + return False + return True + for arch in WM_ARCHS: - arch_data = WM_ARCHS[arch] - sarch = arch_data[0] - if len(arch_data)>1: - for pkg in arch_data[1]: - ver = arch_data[1][pkg] - cmspkg = find_cms_version(sarch, pkg, ver) - if not (cmspkg and create_comp_package(sarch, pkg, ver, cmspkg)): + arch_data = WM_ARCHS[arch] + sarch = arch_data[0] + if len(arch_data) > 1: + for pkg in arch_data[1]: + ver = arch_data[1][pkg] + cmspkg = find_cms_version(sarch, pkg, ver) + if not (cmspkg and create_comp_package(sarch, pkg, ver, cmspkg)): + exit(1) + elif not exists(sarch): + print("ERROR: Missing %s installation area" % sarch) exit(1) - elif not exists(sarch): - print("ERROR: Missing %s installation area" % sarch) - exit(1) - if not exists(arch): - runcmd("ln -s {0} {1}".format(sarch, arch)) - + if not exists(arch): + runcmd("ln -s {0} {1}".format(sarch, arch)) diff --git a/compareTriggerResults b/compareTriggerResults deleted file mode 100755 index 9b788046b989..000000000000 --- a/compareTriggerResults +++ /dev/null @@ -1,292 +0,0 @@ -#!/usr/bin/env python -""" -Script to compare the content of edm::TriggerResults collections in EDM files across multiple workflows - - CMSSW dependencies: edmDumpEventContent, hltDiff -""" -from __future__ import print_function -import argparse -import os -import fnmatch -import subprocess - -def KILL(message): - raise RuntimeError(message) - -def WARNING(message): - print('>> Warning -- '+message) - -def get_output(cmds, permissive=False): - prc = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = prc.communicate() - if (not permissive) and prc.returncode: - KILL('get_output -- shell command failed (execute command to reproduce the error):\n'+' '*14+'> '+cmd) - return (out, err) - -def command_output_lines(cmds, stdout=True, stderr=False, permissive=False): - _tmp_out_ls = [] - if not (stdout or stderr): - WARNING('command_output_lines -- options "stdout" and "stderr" both set to FALSE, returning empty list') - return _tmp_out_ls - - _tmp_out = get_output(cmds, permissive=permissive) - if stdout: _tmp_out_ls += _tmp_out[0].split('\n') - if stderr: _tmp_out_ls += _tmp_out[1].split('\n') - - return _tmp_out_ls - -def which(program, permissive=False, verbose=False): - _exe_ls = [] - fpath, fname = os.path.split(program) - if fpath: - if os.path.isfile(program) and os.access(program, os.X_OK): - _exe_ls += [program] - else: - for path in os.environ["PATH"].split(os.pathsep): - path = path.strip('"') - exe_file = os.path.join(path, program) - if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK): - _exe_ls += [exe_file] - _exe_ls = list(set(_exe_ls)) - - if len(_exe_ls) == 0: - log_msg = 'which -- executable not found: '+program - if permissive: - if verbose: WARNING(log_msg) - return None - else: - KILL(log_msg) - - if verbose and len(_exe_ls) > 1: - WARNING('which -- executable "'+program+'" has multiple matches: \n'+str(_exe_ls)) - - return _exe_ls[0] - -def getListOfTriggerResultsProcessNames(inputEDMFile, verbosity=0): - ret = [] - try: - for outl in command_output_lines(['edmDumpEventContent', inputEDMFile]): - outl_split = [_tmp.replace('"','') for _tmp in outl.split()] - if len(outl_split) != 4: continue - if outl_split[0] == 'edm::TriggerResults' and outl_split[1] == 'TriggerResults' and outl_split[2] == '': - ret.append(outl_split[3]) - ret = list(set(ret)) - except: - if verbosity > 10: - WARNING('getListOfTriggerResultsProcessNames -- failed to execute "edmDumpEventContent '+inputEDMFile+'" (will return empty list)') - return ret - -def compareTriggerResults(**kwargs): - inputDir1 = kwargs.get('inputDir1') - inputDir2 = kwargs.get('inputDir2') - filePattern = kwargs.get('filePattern') - outputDir = kwargs.get('outputDir') - maxEvents = kwargs.get('maxEvents') - summaryFormat = kwargs.get('summaryFormat', None) - dryRun = kwargs.get('dryRun', False) - verbosity = kwargs.get('verbosity', 0) - - files1 = [os.path.join(dp, f) for dp, dn, filenames in os.walk(inputDir1) for f in filenames] - files1 = [f for f in files1 if fnmatch.fnmatch(f, filePattern)] - - files2 = [os.path.join(dp, f) for dp, dn, filenames in os.walk(inputDir2) for f in filenames] - files2 = [f for f in files2 if fnmatch.fnmatch(f, filePattern)] - - wfDict = {} - for f1 in sorted(files1): - fBasename, wfName = os.path.basename(f1), os.path.dirname(os.path.relpath(f1, inputDir1)) - f2 = os.path.join(inputDir2, wfName, fBasename) - if f2 not in files2: continue - - # get list of processNames of edm::TriggerResults collections - trProcessNames = getListOfTriggerResultsProcessNames(f1, verbosity) - if not trProcessNames: continue - - # remove duplicates across different EDM files of the same workflow - # (would become unnecessary calls to hltDiff) - trProcessNames2 = trProcessNames[:] - for _tmp1 in trProcessNames: - if wfName in wfDict: - if _tmp1 in wfDict[wfName]: - trProcessNames2.remove(_tmp1) - - # skip if empty list - if not trProcessNames2: continue - - # fill dictionary - if wfName not in wfDict: wfDict[wfName] = {} - for _tmp1 in trProcessNames2: - wfDict[wfName][_tmp1] = [f1, f2] - - if not wfDict: - if verbosity >= 0: - WARNING('compareTriggerResults -- found zero inputs to be compared (no outputs produced)') - return -1 - - # hltDiff calls - numWorkflowsChecked, numWorkflowsWithDiffs = 0, 0 - summaryLines = [] - - if summaryFormat == 'html': - summaryLines += [ - '', - '', - '

Summary of edm::TriggerResults Comparisons

', - '', - ] - elif summaryFormat == 'txt': - summaryLines += ['| {:25} | {:18} | {:12} | {:}'.format('Events with Diffs', 'Events Processed', 'Process Name', 'Workflow')] - summaryLines += ['-'*100] - - try: - sortedWfNames = sorted(wfDict, key=lambda k: float(k.split('_')[0])) - except: - sortedWfNames = sorted(wfDict.keys()) - - for wfName in sortedWfNames: - wfNameShort = wfName.split('_')[0] - wfOutputDir = os.path.join(outputDir, wfName) - if not dryRun: - try: - os.makedirs(wfOutputDir) - except: - warn_msg = 'target output directory already exists' if os.path.isdir(wfOutputDir) else 'failed to create output directory' - WARNING(warn_msg+' (will skip comparisons for this workflow): '+wfOutputDir) - continue - - wfHasDiff = False - for procName in wfDict[wfName]: - hltDiff_cmds = ['hltDiff'] - hltDiff_cmds += ['-m', str(maxEvents)]*(maxEvents >= 0) - hltDiff_cmds += ['-o', wfDict[wfName][procName][0], '-O', procName] - hltDiff_cmds += ['-n', wfDict[wfName][procName][1], '-N', procName] - hltDiff_cmds += ['-j', '-F', os.path.join(wfOutputDir, procName)] - - if dryRun: - if verbosity > 0: print('> '+' '.join(hltDiff_cmds)) - continue - - hltDiff_outputs = command_output_lines(hltDiff_cmds) - - diffStats = [] - with open(os.path.join(wfOutputDir, procName+'.log'), 'w') as outputLogFile: - for _tmp in hltDiff_outputs: - outputLogFile.write(_tmp+'\n') - # CAVEAT: relies on format of hltDiff outputs to stdout - # - see https://github.com/cms-sw/cmssw/blob/master/HLTrigger/Tools/bin/hltDiff.cc - if _tmp.startswith('Found '): - diffStatsTmp = [int(s) for s in _tmp.split() if s.isdigit()] - if len(diffStatsTmp) == 2: - if diffStats: - WARNING('logic error -- hltDiff statistics already known (check output of hltDiff)') - else: - diffStats = diffStatsTmp[:] - else: - WARNING('format error -- extracted N!=2 integers from output of hltDiff: '+str(diffStatsTmp)) - - if not diffStats: diffStats = [0, 0] - wfHasDiff |= diffStats[1] > 0 - - if summaryFormat == 'html': - summaryLines += [ - '', - ' ', - ' ', - ' ', - ' ', - '', - ] - elif summaryFormat == 'txt': - summaryLines += ['| {:25d} | {:18d} | {:12} | {:}'.format(diffStats[1], diffStats[0], procName, wfName)] - - numWorkflowsChecked += 1 - if wfHasDiff: numWorkflowsWithDiffs += 1 - - if summaryFormat == 'txt': - summaryLines += ['-'*100] - - if summaryFormat == 'html': - summaryLines += ['
WorkflowProcess NameEvents with DiffsEvents Processed
'+wfNameShort+''+procName+''+str(diffStats[1])+''+str(diffStats[0])+'
'] - - if dryRun: return 0 - - if summaryLines: - outputSummaryFilePath = os.path.join(outputDir, 'index.html' if summaryFormat == 'html' else 'summary.log') - with open(outputSummaryFilePath, 'w') as outputSummaryFile: - for _tmp in summaryLines: outputSummaryFile.write(_tmp+'\n') - - if verbosity >= 0: - if numWorkflowsChecked == 0: - print('SUMMARY TriggerResults: no workflows checked') - elif numWorkflowsWithDiffs == 0: - print('SUMMARY TriggerResults: no differences found') - else: - print('SUMMARY TriggerResults: found differences in {:d} / {:d} workflows'.format(numWorkflowsWithDiffs, len(wfDict.keys()))) - - return numWorkflowsWithDiffs - -#### main -if __name__ == '__main__': - ### args - parser = argparse.ArgumentParser(prog='./'+os.path.basename(__file__), formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__) - - parser.add_argument('-r', '--reference-dir', dest='inputDir_refe', action='store', default=None, required=True, - help='path to directory with baseline (or, "reference") workflow outputs') - - parser.add_argument('-t', '--target-dir', dest='inputDir_targ', action='store', default=None, required=True, - help='path to directory with new (or, "target") workflow outputs') - - parser.add_argument('-f', '--file-pattern', dest='file_pattern', action='store', default='*.root', - help='pattern of input EDM files to be compared (default: "*.root")') - - parser.add_argument('-o', '--output-dir', dest='outputDir', action='store', default=None, required=True, - help='path to output directory') - - parser.add_argument('-m', '--max-events', dest='max_events', action='store', type=int, default=-1, - help='maximum number of events considered per comparison (default: -1, i.e. all)') - - parser.add_argument('-s', '--summary', dest='summary', action='store', default=None, choices=["html", "txt"], - help='produce summary file in the specified format (must be "txt" or "html") (default: None, i.e. no summary)') - - parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true', default=False, - help='enable dry-run mode (default: False)') - - parser.add_argument('-v', '--verbosity', dest='verbosity', type=int, default=0, - help='level of verbosity (default: 0)') - - opts, opts_unknown = parser.parse_known_args() - ### ------------------------- - - # check: unrecognized command-line arguments - if len(opts_unknown) > 0: - KILL('unrecognized command-line arguments: '+str(opts_unknown)) - - # check: input directories - if not os.path.isdir(opts.inputDir_refe): - KILL('invalid path to directory with baseline (or, "reference") workflow outputs [-r]: '+opts.inputDir_refe) - - if not os.path.isdir(opts.inputDir_targ): - KILL('invalid path to directory with new (or, "target") workflow outputs [-t]: '+opts.inputDir_targ) - - # check: output - outDir = opts.outputDir - if not opts.dry_run and opts.summary is not None and os.path.exists(outDir): - KILL('target output directory already exists [-o]: '+outDir) - - # check: external dependencies - if which('edmDumpEventContent', permissive=True) is None: - KILL('executable "edmDumpEventContent" is not available (set up an appropriate CMSSW area)') - - if which('hltDiff', permissive=True) is None: - KILL('executable "hltDiff" is not available (set up an appropriate CMSSW area)') - - # run TriggerResults comparisons - compareTriggerResults(**{ - 'inputDir1': opts.inputDir_refe, - 'inputDir2': opts.inputDir_targ, - 'filePattern': opts.file_pattern, - 'outputDir': outDir, - 'maxEvents': opts.max_events, - 'summaryFormat': opts.summary, - 'dryRun': opts.dry_run, - 'verbosity': opts.verbosity, - }) diff --git a/compareTriggerResults b/compareTriggerResults new file mode 120000 index 000000000000..cdf3a7d3d1e9 --- /dev/null +++ b/compareTriggerResults @@ -0,0 +1 @@ +compareTriggerResults.py \ No newline at end of file diff --git a/compareTriggerResults.py b/compareTriggerResults.py new file mode 100755 index 000000000000..e77d5b2249a5 --- /dev/null +++ b/compareTriggerResults.py @@ -0,0 +1,424 @@ +#!/usr/bin/env python +""" +Script to compare the content of edm::TriggerResults collections in EDM files across multiple workflows + - CMSSW dependencies: edmDumpEventContent, hltDiff +""" +from __future__ import print_function +import argparse +import os +import fnmatch +import subprocess + + +def KILL(message): + raise RuntimeError(message) + + +def WARNING(message): + print(">> Warning -- " + message) + + +def get_output(cmds, permissive=False): + prc = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = prc.communicate() + if (not permissive) and prc.returncode: + KILL( + "get_output -- shell command failed (execute command to reproduce the error):\n" + + " " * 14 + + "> " + + cmd + ) + return (out, err) + + +def command_output_lines(cmds, stdout=True, stderr=False, permissive=False): + _tmp_out_ls = [] + if not (stdout or stderr): + WARNING( + 'command_output_lines -- options "stdout" and "stderr" both set to FALSE, returning empty list' + ) + return _tmp_out_ls + + _tmp_out = get_output(cmds, permissive=permissive) + if stdout: + _tmp_out_ls += _tmp_out[0].split("\n") + if stderr: + _tmp_out_ls += _tmp_out[1].split("\n") + + return _tmp_out_ls + + +def which(program, permissive=False, verbose=False): + _exe_ls = [] + fpath, fname = os.path.split(program) + if fpath: + if os.path.isfile(program) and os.access(program, os.X_OK): + _exe_ls += [program] + else: + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + exe_file = os.path.join(path, program) + if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK): + _exe_ls += [exe_file] + _exe_ls = list(set(_exe_ls)) + + if len(_exe_ls) == 0: + log_msg = "which -- executable not found: " + program + if permissive: + if verbose: + WARNING(log_msg) + return None + else: + KILL(log_msg) + + if verbose and len(_exe_ls) > 1: + WARNING('which -- executable "' + program + '" has multiple matches: \n' + str(_exe_ls)) + + return _exe_ls[0] + + +def getListOfTriggerResultsProcessNames(inputEDMFile, verbosity=0): + ret = [] + try: + for outl in command_output_lines(["edmDumpEventContent", inputEDMFile]): + outl_split = [_tmp.replace('"', "") for _tmp in outl.split()] + if len(outl_split) != 4: + continue + if ( + outl_split[0] == "edm::TriggerResults" + and outl_split[1] == "TriggerResults" + and outl_split[2] == "" + ): + ret.append(outl_split[3]) + ret = list(set(ret)) + except: + if verbosity > 10: + WARNING( + 'getListOfTriggerResultsProcessNames -- failed to execute "edmDumpEventContent ' + + inputEDMFile + + '" (will return empty list)' + ) + return ret + + +def compareTriggerResults(**kwargs): + inputDir1 = kwargs.get("inputDir1") + inputDir2 = kwargs.get("inputDir2") + filePattern = kwargs.get("filePattern") + outputDir = kwargs.get("outputDir") + maxEvents = kwargs.get("maxEvents") + summaryFormat = kwargs.get("summaryFormat", None) + dryRun = kwargs.get("dryRun", False) + verbosity = kwargs.get("verbosity", 0) + + files1 = [os.path.join(dp, f) for dp, dn, filenames in os.walk(inputDir1) for f in filenames] + files1 = [f for f in files1 if fnmatch.fnmatch(f, filePattern)] + + files2 = [os.path.join(dp, f) for dp, dn, filenames in os.walk(inputDir2) for f in filenames] + files2 = [f for f in files2 if fnmatch.fnmatch(f, filePattern)] + + wfDict = {} + for f1 in sorted(files1): + fBasename, wfName = os.path.basename(f1), os.path.dirname(os.path.relpath(f1, inputDir1)) + f2 = os.path.join(inputDir2, wfName, fBasename) + if f2 not in files2: + continue + + # get list of processNames of edm::TriggerResults collections + trProcessNames = getListOfTriggerResultsProcessNames(f1, verbosity) + if not trProcessNames: + continue + + # remove duplicates across different EDM files of the same workflow + # (would become unnecessary calls to hltDiff) + trProcessNames2 = trProcessNames[:] + for _tmp1 in trProcessNames: + if wfName in wfDict: + if _tmp1 in wfDict[wfName]: + trProcessNames2.remove(_tmp1) + + # skip if empty list + if not trProcessNames2: + continue + + # fill dictionary + if wfName not in wfDict: + wfDict[wfName] = {} + for _tmp1 in trProcessNames2: + wfDict[wfName][_tmp1] = [f1, f2] + + if not wfDict: + if verbosity >= 0: + WARNING( + "compareTriggerResults -- found zero inputs to be compared (no outputs produced)" + ) + return -1 + + # hltDiff calls + numWorkflowsChecked, numWorkflowsWithDiffs = 0, 0 + summaryLines = [] + + if summaryFormat == "html": + summaryLines += [ + "", + "", + "

Summary of edm::TriggerResults Comparisons

", + "", + ] + elif summaryFormat == "txt": + summaryLines += [ + "| {:25} | {:18} | {:12} | {:}".format( + "Events with Diffs", "Events Processed", "Process Name", "Workflow" + ) + ] + summaryLines += ["-" * 100] + + try: + sortedWfNames = sorted(wfDict, key=lambda k: float(k.split("_")[0])) + except: + sortedWfNames = sorted(wfDict.keys()) + + for wfName in sortedWfNames: + wfNameShort = wfName.split("_")[0] + wfOutputDir = os.path.join(outputDir, wfName) + if not dryRun: + try: + os.makedirs(wfOutputDir) + except: + warn_msg = ( + "target output directory already exists" + if os.path.isdir(wfOutputDir) + else "failed to create output directory" + ) + WARNING(warn_msg + " (will skip comparisons for this workflow): " + wfOutputDir) + continue + + wfHasDiff = False + for procName in wfDict[wfName]: + hltDiff_cmds = ["hltDiff"] + hltDiff_cmds += ["-m", str(maxEvents)] * (maxEvents >= 0) + hltDiff_cmds += ["-o", wfDict[wfName][procName][0], "-O", procName] + hltDiff_cmds += ["-n", wfDict[wfName][procName][1], "-N", procName] + hltDiff_cmds += ["-j", "-F", os.path.join(wfOutputDir, procName)] + + if dryRun: + if verbosity > 0: + print("> " + " ".join(hltDiff_cmds)) + continue + + hltDiff_outputs = command_output_lines(hltDiff_cmds) + + diffStats = [] + with open(os.path.join(wfOutputDir, procName + ".log"), "w") as outputLogFile: + for _tmp in hltDiff_outputs: + outputLogFile.write(_tmp + "\n") + # CAVEAT: relies on format of hltDiff outputs to stdout + # - see https://github.com/cms-sw/cmssw/blob/master/HLTrigger/Tools/bin/hltDiff.cc + if _tmp.startswith("Found "): + diffStatsTmp = [int(s) for s in _tmp.split() if s.isdigit()] + if len(diffStatsTmp) == 2: + if diffStats: + WARNING( + "logic error -- hltDiff statistics already known (check output of hltDiff)" + ) + else: + diffStats = diffStatsTmp[:] + else: + WARNING( + "format error -- extracted N!=2 integers from output of hltDiff: " + + str(diffStatsTmp) + ) + + if not diffStats: + diffStats = [0, 0] + wfHasDiff |= diffStats[1] > 0 + + if summaryFormat == "html": + summaryLines += [ + "", + ' ", + ' ", + ' ", + ' ", + "", + ] + elif summaryFormat == "txt": + summaryLines += [ + "| {:25d} | {:18d} | {:12} | {:}".format( + diffStats[1], diffStats[0], procName, wfName + ) + ] + + numWorkflowsChecked += 1 + if wfHasDiff: + numWorkflowsWithDiffs += 1 + + if summaryFormat == "txt": + summaryLines += ["-" * 100] + + if summaryFormat == "html": + summaryLines += ["
WorkflowProcess NameEvents with DiffsEvents Processed
' + wfNameShort + "' + + procName + + "' + str(diffStats[1]) + "' + str(diffStats[0]) + "
"] + + if dryRun: + return 0 + + if summaryLines: + outputSummaryFilePath = os.path.join( + outputDir, "index.html" if summaryFormat == "html" else "summary.log" + ) + with open(outputSummaryFilePath, "w") as outputSummaryFile: + for _tmp in summaryLines: + outputSummaryFile.write(_tmp + "\n") + + if verbosity >= 0: + if numWorkflowsChecked == 0: + print("SUMMARY TriggerResults: no workflows checked") + elif numWorkflowsWithDiffs == 0: + print("SUMMARY TriggerResults: no differences found") + else: + print( + "SUMMARY TriggerResults: found differences in {:d} / {:d} workflows".format( + numWorkflowsWithDiffs, len(wfDict.keys()) + ) + ) + + return numWorkflowsWithDiffs + + +#### main +if __name__ == "__main__": + ### args + parser = argparse.ArgumentParser( + prog="./" + os.path.basename(__file__), + formatter_class=argparse.RawDescriptionHelpFormatter, + description=__doc__, + ) + + parser.add_argument( + "-r", + "--reference-dir", + dest="inputDir_refe", + action="store", + default=None, + required=True, + help='path to directory with baseline (or, "reference") workflow outputs', + ) + + parser.add_argument( + "-t", + "--target-dir", + dest="inputDir_targ", + action="store", + default=None, + required=True, + help='path to directory with new (or, "target") workflow outputs', + ) + + parser.add_argument( + "-f", + "--file-pattern", + dest="file_pattern", + action="store", + default="*.root", + help='pattern of input EDM files to be compared (default: "*.root")', + ) + + parser.add_argument( + "-o", + "--output-dir", + dest="outputDir", + action="store", + default=None, + required=True, + help="path to output directory", + ) + + parser.add_argument( + "-m", + "--max-events", + dest="max_events", + action="store", + type=int, + default=-1, + help="maximum number of events considered per comparison (default: -1, i.e. all)", + ) + + parser.add_argument( + "-s", + "--summary", + dest="summary", + action="store", + default=None, + choices=["html", "txt"], + help='produce summary file in the specified format (must be "txt" or "html") (default: None, i.e. no summary)', + ) + + parser.add_argument( + "-d", + "--dry-run", + dest="dry_run", + action="store_true", + default=False, + help="enable dry-run mode (default: False)", + ) + + parser.add_argument( + "-v", + "--verbosity", + dest="verbosity", + type=int, + default=0, + help="level of verbosity (default: 0)", + ) + + opts, opts_unknown = parser.parse_known_args() + ### ------------------------- + + # check: unrecognized command-line arguments + if len(opts_unknown) > 0: + KILL("unrecognized command-line arguments: " + str(opts_unknown)) + + # check: input directories + if not os.path.isdir(opts.inputDir_refe): + KILL( + 'invalid path to directory with baseline (or, "reference") workflow outputs [-r]: ' + + opts.inputDir_refe + ) + + if not os.path.isdir(opts.inputDir_targ): + KILL( + 'invalid path to directory with new (or, "target") workflow outputs [-t]: ' + + opts.inputDir_targ + ) + + # check: output + outDir = opts.outputDir + if not opts.dry_run and opts.summary is not None and os.path.exists(outDir): + KILL("target output directory already exists [-o]: " + outDir) + + # check: external dependencies + if which("edmDumpEventContent", permissive=True) is None: + KILL( + 'executable "edmDumpEventContent" is not available (set up an appropriate CMSSW area)' + ) + + if which("hltDiff", permissive=True) is None: + KILL('executable "hltDiff" is not available (set up an appropriate CMSSW area)') + + # run TriggerResults comparisons + compareTriggerResults( + **{ + "inputDir1": opts.inputDir_refe, + "inputDir2": opts.inputDir_targ, + "filePattern": opts.file_pattern, + "outputDir": outDir, + "maxEvents": opts.max_events, + "summaryFormat": opts.summary, + "dryRun": opts.dry_run, + "verbosity": opts.verbosity, + } + ) diff --git a/compareTriggerResultsSummary b/compareTriggerResultsSummary deleted file mode 100755 index a34d83b46f1a..000000000000 --- a/compareTriggerResultsSummary +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python -""" -Script to summarise the outputs of compareTriggerResults -(i.e. the outputs of hltDiff in .json format) -""" -from __future__ import print_function -import argparse -import os -import json -import glob - -def KILL(message): - raise RuntimeError(message) - -def WARNING(message): - print('>> Warning -- '+message) - -def compareTriggerResultsSummary(**kwargs): - inputDir = kwargs.get('inputDir') - filePattern = kwargs.get('filePattern') - summaryFilePath = kwargs.get('outputFile') - summaryFormat = kwargs.get('outputFormat') - dryRun = kwargs.get('dryRun', False) - verbosity = kwargs.get('verbosity', 0) - - inputFiles = glob.glob(os.path.join(inputDir, filePattern)) - - wfDict = {} - for inpf in sorted(inputFiles): - fBasename, wfName = os.path.basename(inpf), os.path.dirname(os.path.relpath(inpf, inputDir)) - numEventsTotal, numEventsWithDiffs = None, None - try: - jsonDict = json.load(open(inpf, 'r')) - numEventsTotal = int(jsonDict["configuration"]["events"]) - numEventsWithDiffs = len(jsonDict["events"]) - except: - if verbosity > 10: - WARNING('compareTriggerResultsSummary -- failed to extract hltDiff statistics from input file: '+inpf) - - if numEventsTotal is None or numEventsWithDiffs is None: continue - - # fill dictionary - procName = os.path.splitext(fBasename)[0] - if wfName not in wfDict: wfDict[wfName] = {} - - if procName in wfDict[wfName]: - if verbosity > 10: - warn_msg = 'process key "'+procName+'" already exists for workflow "'+wfName+'" (will be ignored)' - WARNING('compareTriggerResultsSummary -- '+warn_msg+': '+inpf) - continue - - wfDict[wfName][procName] = {'numEventsTotal': numEventsTotal, 'numEventsWithDiffs': numEventsWithDiffs} - - if not wfDict: - if verbosity >= 0: - WARNING('compareTriggerResultsSummary -- found zero inputs to be compared (no outputs produced)') - return -1 - - # hltDiff calls - numWorkflowsChecked, numWorkflowsWithDiffs = 0, 0 - summaryLines = [] - - if summaryFormat == 'html': - summaryLines += [ - '', - '', - '

Summary of edm::TriggerResults Comparisons

', - '', - ] - elif summaryFormat == 'txt': - summaryLines += ['| {:25} | {:18} | {:12} | {:}'.format('Events with Diffs', 'Events Processed', 'Process Name', 'Workflow')] - summaryLines += ['-'*100] - - try: - sortedWfNames = sorted(wfDict, key=lambda k: float(k.split('_')[0])) - except: - sortedWfNames = sorted(wfDict.keys()) - - for wfName in sortedWfNames: - wfNameShort = wfName.split('_')[0] - - wfHasDiff = False - for procName in sorted(wfDict[wfName]): - numEventsTotal = wfDict[wfName][procName]['numEventsTotal'] - numEventsWithDiffs = wfDict[wfName][procName]['numEventsWithDiffs'] - - wfHasDiff |= (numEventsWithDiffs > 0) - - if summaryFormat == 'html': - summaryLines += [ - '', - ' ', - ' ', - ' ', - ' ', - '', - ] - elif summaryFormat == 'txt': - summaryLines += ['| {:25d} | {:18d} | {:12} | {:}'.format(numEventsWithDiffs, numEventsTotal, procName, wfName)] - - numWorkflowsChecked += 1 - if wfHasDiff: numWorkflowsWithDiffs += 1 - - if summaryFormat == 'txt': - summaryLines += ['-'*100] - - if summaryFormat == 'html': - summaryLines += ['
WorkflowProcess NameEvents with DiffsEvents Processed
'+wfNameShort+''+procName+''+str(numEventsWithDiffs)+''+str(numEventsTotal)+'
'] - - if dryRun: return 0 - - if summaryLines: - if os.path.exists(summaryFilePath): - if verbosity > 0: - WARNING('compareTriggerResultsSummary -- target output file already exists (summary will not be produced)') - else: - with open(summaryFilePath, 'w') as summaryFile: - for _tmp in summaryLines: summaryFile.write(_tmp+'\n') - - if verbosity >= 0: - if numWorkflowsChecked == 0: - print('SUMMARY TriggerResults: no workflows checked') - elif numWorkflowsWithDiffs == 0: - print('SUMMARY TriggerResults: no differences found') - else: - print('SUMMARY TriggerResults: found differences in {:d} / {:d} workflows'.format(numWorkflowsWithDiffs, len(wfDict.keys()))) - - return numWorkflowsWithDiffs - -#### main -if __name__ == '__main__': - ### args - parser = argparse.ArgumentParser(prog='./'+os.path.basename(__file__), formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__) - - parser.add_argument('-i', '--input-dir', dest='input_dir', action='store', default=None, required=True, - help='path to input directory') - - parser.add_argument('-f', '--file-pattern', dest='file_pattern', action='store', default='*.json', - help='pattern to select files in the input directory (default: "*.json")') - - parser.add_argument('-o', '--output-file', dest='output_file', action='store', default=None, required=True, - help='path to output file (summary of comparisons)') - - parser.add_argument('-F', '--output-format', dest='output_format', action='store', default='txt', choices=["html", "txt"], - help='format of output file (must be "txt" or "html") (default: "txt")') - - parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true', default=False, - help='enable dry-run mode (default: False)') - - parser.add_argument('-v', '--verbosity', dest='verbosity', type=int, default=0, - help='level of verbosity (default: 0)') - - opts, opts_unknown = parser.parse_known_args() - ### ------------------------- - - # check: unrecognized command-line arguments - if len(opts_unknown) > 0: - KILL('unrecognized command-line arguments: '+str(opts_unknown)) - - # check: input directories - if not os.path.isdir(opts.input_dir): - KILL('invalid path to input directory [-i]: '+opts.input_dir) - - # check: output - outFile = opts.output_file - if not opts.dry_run and os.path.exists(outFile): - KILL('target output file already exists [-o]: '+outFile) - - # analyse inputs and produce summary - compareTriggerResultsSummary(**{ - 'inputDir': opts.input_dir, - 'filePattern': opts.file_pattern, - 'outputFile': outFile, - 'outputFormat': opts.output_format, - 'dryRun': opts.dry_run, - 'verbosity': opts.verbosity, - }) diff --git a/compareTriggerResultsSummary b/compareTriggerResultsSummary new file mode 120000 index 000000000000..6b3a04bd899c --- /dev/null +++ b/compareTriggerResultsSummary @@ -0,0 +1 @@ +compareTriggerResultsSummary.py \ No newline at end of file diff --git a/compareTriggerResultsSummary.py b/compareTriggerResultsSummary.py new file mode 100755 index 000000000000..546352ea0e8d --- /dev/null +++ b/compareTriggerResultsSummary.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python +""" +Script to summarise the outputs of compareTriggerResults +(i.e. the outputs of hltDiff in .json format) +""" +from __future__ import print_function +import argparse +import os +import json +import glob + + +def KILL(message): + raise RuntimeError(message) + + +def WARNING(message): + print(">> Warning -- " + message) + + +def compareTriggerResultsSummary(**kwargs): + inputDir = kwargs.get("inputDir") + filePattern = kwargs.get("filePattern") + summaryFilePath = kwargs.get("outputFile") + summaryFormat = kwargs.get("outputFormat") + dryRun = kwargs.get("dryRun", False) + verbosity = kwargs.get("verbosity", 0) + + inputFiles = glob.glob(os.path.join(inputDir, filePattern)) + + wfDict = {} + for inpf in sorted(inputFiles): + fBasename, wfName = os.path.basename(inpf), os.path.dirname( + os.path.relpath(inpf, inputDir) + ) + numEventsTotal, numEventsWithDiffs = None, None + try: + jsonDict = json.load(open(inpf, "r")) + numEventsTotal = int(jsonDict["configuration"]["events"]) + numEventsWithDiffs = len(jsonDict["events"]) + except: + if verbosity > 10: + WARNING( + "compareTriggerResultsSummary -- failed to extract hltDiff statistics from input file: " + + inpf + ) + + if numEventsTotal is None or numEventsWithDiffs is None: + continue + + # fill dictionary + procName = os.path.splitext(fBasename)[0] + if wfName not in wfDict: + wfDict[wfName] = {} + + if procName in wfDict[wfName]: + if verbosity > 10: + warn_msg = ( + 'process key "' + + procName + + '" already exists for workflow "' + + wfName + + '" (will be ignored)' + ) + WARNING("compareTriggerResultsSummary -- " + warn_msg + ": " + inpf) + continue + + wfDict[wfName][procName] = { + "numEventsTotal": numEventsTotal, + "numEventsWithDiffs": numEventsWithDiffs, + } + + if not wfDict: + if verbosity >= 0: + WARNING( + "compareTriggerResultsSummary -- found zero inputs to be compared (no outputs produced)" + ) + return -1 + + # hltDiff calls + numWorkflowsChecked, numWorkflowsWithDiffs = 0, 0 + summaryLines = [] + + if summaryFormat == "html": + summaryLines += [ + "", + "", + "

Summary of edm::TriggerResults Comparisons

", + "", + ] + elif summaryFormat == "txt": + summaryLines += [ + "| {:25} | {:18} | {:12} | {:}".format( + "Events with Diffs", "Events Processed", "Process Name", "Workflow" + ) + ] + summaryLines += ["-" * 100] + + try: + sortedWfNames = sorted(wfDict, key=lambda k: float(k.split("_")[0])) + except: + sortedWfNames = sorted(wfDict.keys()) + + for wfName in sortedWfNames: + wfNameShort = wfName.split("_")[0] + + wfHasDiff = False + for procName in sorted(wfDict[wfName]): + numEventsTotal = wfDict[wfName][procName]["numEventsTotal"] + numEventsWithDiffs = wfDict[wfName][procName]["numEventsWithDiffs"] + + wfHasDiff |= numEventsWithDiffs > 0 + + if summaryFormat == "html": + summaryLines += [ + "", + ' ", + ' ", + ' ", + ' ", + "", + ] + elif summaryFormat == "txt": + summaryLines += [ + "| {:25d} | {:18d} | {:12} | {:}".format( + numEventsWithDiffs, numEventsTotal, procName, wfName + ) + ] + + numWorkflowsChecked += 1 + if wfHasDiff: + numWorkflowsWithDiffs += 1 + + if summaryFormat == "txt": + summaryLines += ["-" * 100] + + if summaryFormat == "html": + summaryLines += ["
WorkflowProcess NameEvents with DiffsEvents Processed
' + wfNameShort + "' + + procName + + "' + str(numEventsWithDiffs) + "' + str(numEventsTotal) + "
"] + + if dryRun: + return 0 + + if summaryLines: + if os.path.exists(summaryFilePath): + if verbosity > 0: + WARNING( + "compareTriggerResultsSummary -- target output file already exists (summary will not be produced)" + ) + else: + with open(summaryFilePath, "w") as summaryFile: + for _tmp in summaryLines: + summaryFile.write(_tmp + "\n") + + if verbosity >= 0: + if numWorkflowsChecked == 0: + print("SUMMARY TriggerResults: no workflows checked") + elif numWorkflowsWithDiffs == 0: + print("SUMMARY TriggerResults: no differences found") + else: + print( + "SUMMARY TriggerResults: found differences in {:d} / {:d} workflows".format( + numWorkflowsWithDiffs, len(wfDict.keys()) + ) + ) + + return numWorkflowsWithDiffs + + +#### main +if __name__ == "__main__": + ### args + parser = argparse.ArgumentParser( + prog="./" + os.path.basename(__file__), + formatter_class=argparse.RawDescriptionHelpFormatter, + description=__doc__, + ) + + parser.add_argument( + "-i", + "--input-dir", + dest="input_dir", + action="store", + default=None, + required=True, + help="path to input directory", + ) + + parser.add_argument( + "-f", + "--file-pattern", + dest="file_pattern", + action="store", + default="*.json", + help='pattern to select files in the input directory (default: "*.json")', + ) + + parser.add_argument( + "-o", + "--output-file", + dest="output_file", + action="store", + default=None, + required=True, + help="path to output file (summary of comparisons)", + ) + + parser.add_argument( + "-F", + "--output-format", + dest="output_format", + action="store", + default="txt", + choices=["html", "txt"], + help='format of output file (must be "txt" or "html") (default: "txt")', + ) + + parser.add_argument( + "-d", + "--dry-run", + dest="dry_run", + action="store_true", + default=False, + help="enable dry-run mode (default: False)", + ) + + parser.add_argument( + "-v", + "--verbosity", + dest="verbosity", + type=int, + default=0, + help="level of verbosity (default: 0)", + ) + + opts, opts_unknown = parser.parse_known_args() + ### ------------------------- + + # check: unrecognized command-line arguments + if len(opts_unknown) > 0: + KILL("unrecognized command-line arguments: " + str(opts_unknown)) + + # check: input directories + if not os.path.isdir(opts.input_dir): + KILL("invalid path to input directory [-i]: " + opts.input_dir) + + # check: output + outFile = opts.output_file + if not opts.dry_run and os.path.exists(outFile): + KILL("target output file already exists [-o]: " + outFile) + + # analyse inputs and produce summary + compareTriggerResultsSummary( + **{ + "inputDir": opts.input_dir, + "filePattern": opts.file_pattern, + "outputFile": outFile, + "outputFormat": opts.output_format, + "dryRun": opts.dry_run, + "verbosity": opts.verbosity, + } + ) diff --git a/comparisons/analyzeFWComparison.py b/comparisons/analyzeFWComparison.py index 73bf245928a2..bb4c6e319870 100755 --- a/comparisons/analyzeFWComparison.py +++ b/comparisons/analyzeFWComparison.py @@ -10,94 +10,102 @@ from os import walk import re -#----------------------------------------------------------------------------------- +# ----------------------------------------------------------------------------------- # This script analyses the results from the FWlite comparison ( JR Comparison ) -# Per each workflow, it checks how many diffs were found. This is the number of +# Per each workflow, it checks how many diffs were found. This is the number of # png files that are present. -# -# -If the workflow has less than 10 differences, the DQM comparison should be run +# +# -If the workflow has less than 10 differences, the DQM comparison should be run # with mod 0 # -If the workflow has from 10 to 100 differences, the DQM comparison should be run # with mod 3 # -Else run with mod 2 # -# The results will be saved in the file DQMParameters.txt which indicates to the -# next step in the comparison, how it should be run. +# The results will be saved in the file DQMParameters.txt which indicates to the +# next step in the comparison, how it should be run. # -#----------------------------------------------------------------------------------- -#---- Start of execution -#----------------------------------------------------------------------------------- +# ----------------------------------------------------------------------------------- +# ---- Start of execution +# ----------------------------------------------------------------------------------- if __name__ == "__main__": - - #----------------------------------------------------------------------------------- - #---- Parser Options - #----------------------------------------------------------------------------------- - parser = OptionParser(usage="usage: %prog DIR OUTFILE \n DIR: The directory where the results of the FWLite comparison are" - "\n OUTFILE: The file to which you want to save the parameters") - - parser.add_option( "-R" , "--relmon" , dest="relmon" , action="store_true", help="Generate the thresholds for the relmon comparisons", default=False ) - (options, args) = parser.parse_args() - - #----------------------------------------------------------------------------------- - #---- Review of arguments - #----------------------------------------------------------------------------------- - - if ( len(args) < 2 ): - - print('not enough arguments\n') - parser.print_help() - exit() - - #----------------------------------------------------------------------------------- - #---- Global Variables - #----------------------------------------------------------------------------------- - - RESULTS_DIR = args[0] - WF_REGEXP = '[0-9]{1,}p[0-9]{1,}' - BASE_WF_NUM_PARAM = 'FOR_WF' - PARAMS_FILE = args[1] - ALT_COMP_PARAMS = { 0:'MOD=0', 2:'MOD=2', 3:'MOD=3' } - RELMON_COMP_THRESHOLDS = { 0:'TH=0.999999999999', 2:'TH=0.1', 3:'TH=0.999' } - - params_dict = RELMON_COMP_THRESHOLDS if options.relmon else ALT_COMP_PARAMS - worflow_params = {} - - for iwalk,(current_dir,subdir,files) in enumerate(walk( RESULTS_DIR )): - #naming convention is that a comparison sub-directory starts with 'all_' - if not current_dir.split('/')[-1].startswith('all_'): continue - if not '_' in current_dir: continue - current_wf = current_dir.split('_',)[0] - print('Processing: %s' % current_dir) - if '/' in current_wf: current_wf=current_wf.split('/')[-1] - if not current_wf[0].isdigit(): continue - - print('Workflow number is: %s' % current_wf) - diff_files = [file for file in files if file.endswith('.png')] - num_diffs = len( diff_files ) - print('It had %s diffs' % num_diffs) - - if num_diffs < 10: - mod = 0 - elif num_diffs < 100: - mod = 3 - else: - mod = 2 - - print('This needs to be run with %s' % params_dict [ mod ]) - if current_wf in worflow_params: - #print('taking max of',mod,'and',worflow_params[current_wf]) - worflow_params[current_wf] = max(mod,worflow_params[current_wf]) - else: - worflow_params[current_wf] = mod - - f = open( PARAMS_FILE , 'w') - for wf,mod in worflow_params.items(): - f.write( '%s=%s;%s\n'%(BASE_WF_NUM_PARAM,wf,RELMON_COMP_THRESHOLDS[mod]) ) - f.close() - - - - - + # ----------------------------------------------------------------------------------- + # ---- Parser Options + # ----------------------------------------------------------------------------------- + parser = OptionParser( + usage="usage: %prog DIR OUTFILE \n DIR: The directory where the results of the FWLite comparison are" + "\n OUTFILE: The file to which you want to save the parameters" + ) + + parser.add_option( + "-R", + "--relmon", + dest="relmon", + action="store_true", + help="Generate the thresholds for the relmon comparisons", + default=False, + ) + (options, args) = parser.parse_args() + + # ----------------------------------------------------------------------------------- + # ---- Review of arguments + # ----------------------------------------------------------------------------------- + + if len(args) < 2: + print("not enough arguments\n") + parser.print_help() + exit() + + # ----------------------------------------------------------------------------------- + # ---- Global Variables + # ----------------------------------------------------------------------------------- + + RESULTS_DIR = args[0] + WF_REGEXP = "[0-9]{1,}p[0-9]{1,}" + BASE_WF_NUM_PARAM = "FOR_WF" + PARAMS_FILE = args[1] + ALT_COMP_PARAMS = {0: "MOD=0", 2: "MOD=2", 3: "MOD=3"} + RELMON_COMP_THRESHOLDS = {0: "TH=0.999999999999", 2: "TH=0.1", 3: "TH=0.999"} + + params_dict = RELMON_COMP_THRESHOLDS if options.relmon else ALT_COMP_PARAMS + worflow_params = {} + + for iwalk, (current_dir, subdir, files) in enumerate(walk(RESULTS_DIR)): + # naming convention is that a comparison sub-directory starts with 'all_' + if not current_dir.split("/")[-1].startswith("all_"): + continue + if not "_" in current_dir: + continue + current_wf = current_dir.split( + "_", + )[0] + print("Processing: %s" % current_dir) + if "/" in current_wf: + current_wf = current_wf.split("/")[-1] + if not current_wf[0].isdigit(): + continue + + print("Workflow number is: %s" % current_wf) + diff_files = [file for file in files if file.endswith(".png")] + num_diffs = len(diff_files) + print("It had %s diffs" % num_diffs) + + if num_diffs < 10: + mod = 0 + elif num_diffs < 100: + mod = 3 + else: + mod = 2 + + print("This needs to be run with %s" % params_dict[mod]) + if current_wf in worflow_params: + # print('taking max of',mod,'and',worflow_params[current_wf]) + worflow_params[current_wf] = max(mod, worflow_params[current_wf]) + else: + worflow_params[current_wf] = mod + + f = open(PARAMS_FILE, "w") + for wf, mod in worflow_params.items(): + f.write("%s=%s;%s\n" % (BASE_WF_NUM_PARAM, wf, RELMON_COMP_THRESHOLDS[mod])) + f.close() diff --git a/comparisons/validateJR.py b/comparisons/validateJR.py index c826a0eb9d39..eecf70089117 100755 --- a/comparisons/validateJR.py +++ b/comparisons/validateJR.py @@ -1,5 +1,6 @@ import glob, os, sys + def makedirs(dir): try: os.makedirs(dir, exist_ok=True) @@ -7,142 +8,192 @@ def makedirs(dir): if not os.path.exists(dir): os.makedirs(dir) + ## commented lines are mostly python3 fstring syntax that we cannot use until we totally loose python2 support in the PR validation + def autoLoadEnabler(): - if os.path.isfile( os.path.join(os.environ['CMSSW_RELEASE_BASE'],'src/FWCore/FWLite/interface/FWLiteEnabler.h')): - return 'FWLiteEnabler::enable();' + if os.path.isfile( + os.path.join( + os.environ["CMSSW_RELEASE_BASE"], "src/FWCore/FWLite/interface/FWLiteEnabler.h" + ) + ): + return "FWLiteEnabler::enable();" else: - return 'AutoLibraryLoader::enable();' + return "AutoLibraryLoader::enable();" + def compile_lib(): - lib_dir = 'validate_lib' - #if not os.path.isfile(f'{lib_dir}/validate_C.so'): - if not os.path.isfile('%s/validate_C.so'%(lib_dir,)): + lib_dir = "validate_lib" + # if not os.path.isfile(f'{lib_dir}/validate_C.so'): + if not os.path.isfile("%s/validate_C.so" % (lib_dir,)): makedirs(lib_dir) - if not 'VALIDATE_C_SCRIPT' in os.environ or not os.environ['VALIDATE_C_SCRIPT']: - os.environ['VALIDATE_C_SCRIPT'] = os.path.join(os.environ['HOME'],'tools','validate.C') - if os.path.isfile(os.environ['VALIDATE_C_SCRIPT']): - #os.system(f"cp $VALIDATE_C_SCRIPT {lib_dir}/validate.C") - os.system("cp $VALIDATE_C_SCRIPT %s/validate.C"%(lib_dir,)) - #command = f'cd {lib_dir};'+'echo -e "gSystem->Load(\\"libFWCoreFWLite.so\\");{autoLoadEnabler()}\n .L validate.C+ \n .qqqqqq\" | root -l -b' - command = 'cd %s;'%(lib_dir,)+'echo -e "gSystem->Load(\\"libFWCoreFWLite.so\\");%s\n .L validate.C+ \n .qqqqqq\" | root -l -b'%(autoLoadEnabler(),) - #print(f"compiling library with {command}") - print("compiling library with %s"%(command,)) - os.system( command ) - os.environ['LD_LIBRARY_PATH'] = ':'.join([os.path.join(os.getcwd(),'validate_lib'),os.environ['LD_LIBRARY_PATH']]) - #return os.path.isfile(f'{lib_dir}/validate_C.so') - return os.path.isfile('%s/validate_C.so'%(lib_dir,)) + if not "VALIDATE_C_SCRIPT" in os.environ or not os.environ["VALIDATE_C_SCRIPT"]: + os.environ["VALIDATE_C_SCRIPT"] = os.path.join( + os.environ["HOME"], "tools", "validate.C" + ) + if os.path.isfile(os.environ["VALIDATE_C_SCRIPT"]): + # os.system(f"cp $VALIDATE_C_SCRIPT {lib_dir}/validate.C") + os.system("cp $VALIDATE_C_SCRIPT %s/validate.C" % (lib_dir,)) + # command = f'cd {lib_dir};'+'echo -e "gSystem->Load(\\"libFWCoreFWLite.so\\");{autoLoadEnabler()}\n .L validate.C+ \n .qqqqqq\" | root -l -b' + command = "cd %s;" % ( + lib_dir, + ) + 'echo -e "gSystem->Load(\\"libFWCoreFWLite.so\\");%s\n .L validate.C+ \n .qqqqqq" | root -l -b' % ( + autoLoadEnabler(), + ) + # print(f"compiling library with {command}") + print("compiling library with %s" % (command,)) + os.system(command) + os.environ["LD_LIBRARY_PATH"] = ":".join( + [os.path.join(os.getcwd(), "validate_lib"), os.environ["LD_LIBRARY_PATH"]] + ) + # return os.path.isfile(f'{lib_dir}/validate_C.so') + return os.path.isfile("%s/validate_C.so" % (lib_dir,)) + def run_comparison(fileName, base_dir, ref_dir, processName, spec, output_dir): - base_file=os.path.join(base_dir,fileName) - ref_file=os.path.join(ref_dir,fileName) + base_file = os.path.join(base_dir, fileName) + ref_file = os.path.join(ref_dir, fileName) if not os.path.isfile(base_file) or not os.path.isfile(ref_file): return False - logFile=fileName.replace('.root','.log') + logFile = fileName.replace(".root", ".log") makedirs(output_dir) - #command = f'cd {output_dir}; echo -e "gSystem->Load(\\"libFWCoreFWLite.so\\");{autoLoadEnabler()}gSystem->Load(\\"validate_C.so\\");validate(\\"{spec}\\",\\"{base_file}\\",\\"{ref_file}\\",\\"{processName}\\");\n.qqqqqq" | root -l -b >& {logFile}' - command = 'cd %s;'%output_dir + 'echo -e "gSystem->Load(\\"libFWCoreFWLite.so\\");%sgSystem->Load(\\"validate_C.so\\");validate(\\"%s\\",\\"%s\\",\\"%s\\",\\"%s\\");\n.qqqqqq" | root -l -b >& %s'%(autoLoadEnabler(), spec, base_file, ref_file, processName, logFile) - #print(f"running comparison with {command}") - #print(f"log of comparing {fileName} process {processName} from {base_dir} and {ref_dir} into {output_dir} with spec {spec} shown in {logFile}") - print("log of comparing %s process %s from %s and %s into %s with spec %s shown in %s"%(fileName, processName, base_dir, ref_dir, output_dir, spec, logFile )) - c=os.system( command ) + # command = f'cd {output_dir}; echo -e "gSystem->Load(\\"libFWCoreFWLite.so\\");{autoLoadEnabler()}gSystem->Load(\\"validate_C.so\\");validate(\\"{spec}\\",\\"{base_file}\\",\\"{ref_file}\\",\\"{processName}\\");\n.qqqqqq" | root -l -b >& {logFile}' + command = ( + "cd %s;" % output_dir + + 'echo -e "gSystem->Load(\\"libFWCoreFWLite.so\\");%sgSystem->Load(\\"validate_C.so\\");validate(\\"%s\\",\\"%s\\",\\"%s\\",\\"%s\\");\n.qqqqqq" | root -l -b >& %s' + % (autoLoadEnabler(), spec, base_file, ref_file, processName, logFile) + ) + # print(f"running comparison with {command}") + # print(f"log of comparing {fileName} process {processName} from {base_dir} and {ref_dir} into {output_dir} with spec {spec} shown in {logFile}") + print( + "log of comparing %s process %s from %s and %s into %s with spec %s shown in %s" + % (fileName, processName, base_dir, ref_dir, output_dir, spec, logFile) + ) + c = os.system(command) return True + def file_processes(fileName): - max_proc=20 - prov_file = fileName+'.edmProvDump' + max_proc = 20 + prov_file = fileName + ".edmProvDump" if not os.path.isfile(prov_file): - #print(f"dumping provenance of {fileName} in {prov_file}") - print("dumping provenance of %s in %s"%(fileName, prov_file)) - #c=os.system("edmProvDump {fileName} > {prov_file}") - c=os.system("edmProvDump %s > %s"%(fileName, prov_file)) - if c!=0: return [] - #print(f"finding processes of {fileName} in {prov_file}") - #raw_proc= os.popen(f"grep -e 'Processing History:' -A {max_proc} {prov_file} | awk '{{print $1}}'").read().split('\n')[1:] - raw_proc= os.popen("grep -e 'Processing History:' -A %s %s | awk '{print $1}'"%(max_proc, prov_file)).read().split('\n')[1:] + # print(f"dumping provenance of {fileName} in {prov_file}") + print("dumping provenance of %s in %s" % (fileName, prov_file)) + # c=os.system("edmProvDump {fileName} > {prov_file}") + c = os.system("edmProvDump %s > %s" % (fileName, prov_file)) + if c != 0: + return [] + # print(f"finding processes of {fileName} in {prov_file}") + # raw_proc= os.popen(f"grep -e 'Processing History:' -A {max_proc} {prov_file} | awk '{{print $1}}'").read().split('\n')[1:] + raw_proc = ( + os.popen( + "grep -e 'Processing History:' -A %s %s | awk '{print $1}'" % (max_proc, prov_file) + ) + .read() + .split("\n")[1:] + ) processes = [] for proc_ in raw_proc: - if '--' in proc_: break - processes.append( proc_ ) + if "--" in proc_: + break + processes.append(proc_) return processes + def last_process(fileName): processes_ = file_processes(fileName) - #print(f"found processes {processes_} in {fileName}") - if processes_: return processes_[-1] + # print(f"found processes {processes_} in {fileName}") + if processes_: + return processes_[-1] return None + def file_index(fileName): - ndigits=3 - fn= fileName.replace('step','').replace('.root','') - if '_' in fn : fn,_=fn.split('_',1) + ndigits = 3 + fn = fileName.replace("step", "").replace(".root", "") + if "_" in fn: + fn, _ = fn.split("_", 1) while ndigits: index = fn[-ndigits:] if index.isdigit(): return int(index) - ndigits-=1 + ndigits -= 1 return None + def process_file(each_root_file): - #print(f'processing {each_root_file} in {os.getpid()}') - process_of_interest=['ZStoRECO','RECO','reRECO','PAT','NANO','DQM','HLT','HLT2'] - if any([pat in each_root_file for pat in ['inDQM','DQM_V']]): + # print(f'processing {each_root_file} in {os.getpid()}') + process_of_interest = ["ZStoRECO", "RECO", "reRECO", "PAT", "NANO", "DQM", "HLT", "HLT2"] + if any([pat in each_root_file for pat in ["inDQM", "DQM_V"]]): return processName = last_process(each_root_file) if not processName in process_of_interest: return - #print(f"found process of interest {processName} in file {each_root_file}") - ref_path,fileName = each_root_file.rsplit('/',1) - path = ref_path.replace( options.ref, options.base ) - _,fullName = path.rsplit('/',1) - wfn,therest=fullName.split('_',1) - wfn='wf'+wfn.replace('.','p') + # print(f"found process of interest {processName} in file {each_root_file}") + ref_path, fileName = each_root_file.rsplit("/", 1) + path = ref_path.replace(options.ref, options.base) + _, fullName = path.rsplit("/", 1) + wfn, therest = fullName.split("_", 1) + wfn = "wf" + wfn.replace(".", "p") # specify what are the specific branches to look at - spec = 'all' - if ('inMINIAOD' in fileName): spec += '_mini' + spec = "all" + if "inMINIAOD" in fileName: + spec += "_mini" # the compressed name should uniquely identify the workflow and the output file - compressedName = therest.replace('+','').replace('.','').replace('_','') - compressedName += fileName.replace('.root','') + compressedName = therest.replace("+", "").replace(".", "").replace("_", "") + compressedName += fileName.replace(".root", "") compressedName += wfn - #print(f"compressing {path} into {compressedName}") - output_dir = os.path.join(#'OldVSNew', - fullName, - #'_'.join([spec,processName,fileName.replace('.root','').split('_')[-1] if '_' in fileName else '']), - '_'.join([spec,processName,fileName.replace('.root','')]) - ) - + # print(f"compressing {path} into {compressedName}") + output_dir = os.path.join( #'OldVSNew', + fullName, + #'_'.join([spec,processName,fileName.replace('.root','').split('_')[-1] if '_' in fileName else '']), + "_".join([spec, processName, fileName.replace(".root", "")]), + ) run_comparison(fileName, path, ref_path, processName, spec, output_dir) - #print(f'\t{each_root_file} processed in {os.getpid()}') + # print(f'\t{each_root_file} processed in {os.getpid()}') + if __name__ == "__main__": from optparse import OptionParser + parser = OptionParser(usage="%prog [options]") - parser.add_option("--base", dest="base", default="base/", help="path to the file to compare with ref") + parser.add_option( + "--base", dest="base", default="base/", help="path to the file to compare with ref" + ) parser.add_option("--ref", dest="ref", default="ref/", help="path to the reference files") - parser.add_option("--wf", dest="workflow", default="*", help="pattern for listing the workflows to run the comparison for {base}/{wf}_*/...") - parser.add_option("--procs", dest="procs", default=None, type=int, help="number of processes to run") + parser.add_option( + "--wf", + dest="workflow", + default="*", + help="pattern for listing the workflows to run the comparison for {base}/{wf}_*/...", + ) + parser.add_option( + "--procs", dest="procs", default=None, type=int, help="number of processes to run" + ) (options, args) = parser.parse_args() - if not compile_lib():sys.exit() + if not compile_lib(): + sys.exit() - #all_output_root_files = glob.glob(f'{options.base}/{options.workflow}_*/step*.root') - all_output_root_files = glob.glob('%s/%s_*/step*.root'%(options.ref,options.workflow)) + # all_output_root_files = glob.glob(f'{options.base}/{options.workflow}_*/step*.root') + all_output_root_files = glob.glob("%s/%s_*/step*.root" % (options.ref, options.workflow)) - #print(f'{len(all_output_root_files)} files to process') - print('%d files to process'%(len(all_output_root_files))) + # print(f'{len(all_output_root_files)} files to process') + print("%d files to process" % (len(all_output_root_files))) - if options.procs==0: + if options.procs == 0: for afile in all_output_root_files: process_file(afile) else: from multiprocessing import Pool - #with Pool(options.procs) as threads: #only python3 + + # with Pool(options.procs) as threads: #only python3 # results = [threads.apply_async(process_file, (f, )) for f in all_output_root_files] # for r in results: r.wait() threads = Pool(options.procs) - results = [threads.apply_async(process_file, (f, )) for f in all_output_root_files] + results = [threads.apply_async(process_file, (f,)) for f in all_output_root_files] for r in results: r.wait() diff --git a/condor/tests/node-check.py b/condor/tests/node-check.py index 0654788e61cf..af63550f2a19 100755 --- a/condor/tests/node-check.py +++ b/condor/tests/node-check.py @@ -10,120 +10,142 @@ from os.path import dirname, abspath import sys -sys.path.append(dirname(dirname(dirname(abspath(__file__))))) # in order to import cms-bot level modules + +sys.path.append( + dirname(dirname(dirname(abspath(__file__)))) +) # in order to import cms-bot level modules from _py2with3compatibility import run_cmd + def do_load(obj): - mem_size = obj.memory*1024*1024 - cache = array('B', [0]) * mem_size - print("Run: ", obj.id) - while obj.state: - x=0 - for j in range(1024): - for k in range(1024): - x=j*k - sleep(0.001) - -class LoadMaster (object): - def __init__ (self, memory, max_child, pipe_in, max_time=0): - self.memory = memory - self.input = pipe_in - self.max_child = max_child - self.max_time = max_time - self.childs = [] - - def get_command(self): - if self.input: return self.input.readline().strip() - cFile = 'auto-load' - while not exists(cFile): sleep(0.2) - sleep(0.5) - o, cmd = run_cmd("head -1 %s; rm -f %s" % (cFile, cFile)) - return cmd.strip() - - def remove_child(self): - if len(self.childs)==0: return - write(self.childs[-1][1], 'stop\n') - waitpid(self.childs[-1][0], 0) - self.childs.pop() - print("Childs:",len(self.childs)) - - def remove_childs(self, count): - for c in range(count): self.remove_child() - - def remove_all(self): - self.remove_childs(self.max_child) - - def add_child(self): - if self.max_child==len(self.childs): return - pin, pout = pipe() - pid = fork() - if pid == 0: - close(pout) - c = LoadClient(len(self.childs), self.memory) - c.start(pin) - exit(0) - else: - close(pin) - self.childs.append([pid, pout]) - print("Childs:",len(self.childs)) - - def add_childs(self, count): - for c in range(count): self.add_child() - - def add_all(self): - self.add_childs(self.max_child) - - def start(self): - stime = time() - while True: - if self.max_time<=0: - cmd = self.get_command() - elif (time()-stime)>self.max_time: - cmd = "exit" - elif self.childs: - sleep(1) - continue - else: - cmd = "start" - print("master: %s" % cmd) - if cmd in ['stop', 'exit']: self.remove_all() - elif cmd=='start': self.add_all() - else: - m = match('^([+]|[-]|)([1-9][0-9]{0,1})$', cmd) - if m: - count = int(m.group(2)) - if m.group(1)=='+': self.add_childs(count) - elif m.group(1)=='-': self.remove_childs(count) - else: - while len(self.childs)>count: self.remove_child() - while len(self.childs) self.max_time: + cmd = "exit" + elif self.childs: + sleep(1) + continue + else: + cmd = "start" + print("master: %s" % cmd) + if cmd in ["stop", "exit"]: + self.remove_all() + elif cmd == "start": + self.add_all() + else: + m = match("^([+]|[-]|)([1-9][0-9]{0,1})$", cmd) + if m: + count = int(m.group(2)) + if m.group(1) == "+": + self.add_childs(count) + elif m.group(1) == "-": + self.remove_childs(count) + else: + while len(self.childs) > count: + self.remove_child() + while len(self.childs) < count: + self.add_child() + if cmd == "exit": + break + return + + +class LoadClient(object): + def __init__(self, cid, memory): + self.memory = memory + self.id = cid + self.state = True + + def start(self, pipe_in): + thr = Thread(target=do_load, args=(self,)) + thr.start() + pin = fdopen(pipe_in) + while self.state: + cmd = pin.readline().strip() + print("%s: %s" % (self.id, cmd)) + if cmd == "stop": + self.state = False + pin.close() + thr.join() + print("Done:", self.id) + exit(0) + + +childs = int(argv[1]) +memory = int(argv[2]) +try: + max_time = int(argv[3]) +except: + max_time = 0 master = LoadMaster(memory, childs, None, max_time) master.start() print("ALL OK") - diff --git a/config.map b/config.map index 53a8631a49cf..de4e6b519d15 100644 --- a/config.map +++ b/config.map @@ -63,4 +63,5 @@ SCRAM_ARCH=slc6_amd64_gcc630;PKGTOOLS_TAG=V00-33-XX;CMSDIST_TAG=IB/CMSSW_9_4_X/g SCRAM_ARCH=slc7_amd64_gcc630;PKGTOOLS_TAG=V00-33-XX;CMSDIST_TAG=IB/CMSSW_9_4_X/gcc630;RELEASE_BRANCH=CMSSW_9_4_X;RELEASE_QUEUE=CMSSW_9_4_X;DOCKER_IMG=cmssw/cc7; SCRAM_ARCH=slc6_amd64_gcc530;PKGTOOLS_TAG=V00-31-XX;CMSDIST_TAG=IB/CMSSW_8_0_X/gcc530;RELEASE_QUEUE=CMSSW_8_0_X;ADDITIONAL_TESTS=HLT;PR_TESTS=1;PROD_ARCH=1;ENABLE_DEBUG=1;PRS_TEST_CLANG=1;DOCKER_IMG=cmssw/slc6:latest;REQUIRED_TEST=true; SCRAM_ARCH=slc7_amd64_gcc530;PKGTOOLS_TAG=V00-31-XX;CMSDIST_TAG=IB/CMSSW_8_0_X/gcc530;RELEASE_QUEUE=CMSSW_8_0_X;DOCKER_IMG=cmssw/cc7; +SCRAM_ARCH=slc6_amd64_gcc481;PKGTOOLS_TAG=V00-31-XX;CMSDIST_TAG=IB/CMSSW_7_1_X/stable;RELEASE_QUEUE=CMSSW_7_1_X;ADDITIONAL_TESTS=HLT;PR_TESTS=1;PROD_ARCH=1;DOCKER_IMG=cmssw/slc6:latest;REQUIRED_TEST=true; SCRAM_ARCH=slc6_amd64_gcc472;PKGTOOLS_TAG=V00-31-XX;CMSDIST_TAG=IB/CMSSW_5_3_X/slc6_amd64_gcc472;RELEASE_QUEUE=CMSSW_5_3_X;ADDITIONAL_TESTS=HLT;PR_TESTS=1;DO_STATIC_CHECKS=false;PROD_ARCH=1;DOCKER_IMG=cmssw/slc6:latest;REQUIRED_TEST=true; diff --git a/crab/pset.py b/crab/pset.py index 6e006db2aef8..5996fb8c04e2 100644 --- a/crab/pset.py +++ b/crab/pset.py @@ -1,112 +1,111 @@ # Auto generated configuration file -# using: -# Revision: 1.19 -# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v -# with command line options: MinBias_8TeV_cfi --conditions auto:startup -s GEN,SIM --datatier GEN-SIM -n 10 +# using: +# Revision: 1.19 +# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v +# with command line options: MinBias_8TeV_cfi --conditions auto:startup -s GEN,SIM --datatier GEN-SIM -n 10 # --relval 9000,300 --eventcontent RAWSIM --io MinBias.io --python MinBias.py --no_exec --fileout minbias.root import FWCore.ParameterSet.Config as cms -process = cms.Process('SIM') +process = cms.Process("SIM") # Import of standard configurations -process.load('Configuration.StandardSequences.Services_cff') -process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi') -process.load('FWCore.MessageService.MessageLogger_cfi') -process.load('Configuration.EventContent.EventContent_cff') -process.load('SimGeneral.MixingModule.mixNoPU_cfi') -process.load('Configuration.StandardSequences.GeometryRecoDB_cff') -process.load('Configuration.Geometry.GeometrySimDB_cff') -process.load('Configuration.StandardSequences.MagneticField_38T_cff') -process.load('Configuration.StandardSequences.Generator_cff') -process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic8TeVCollision_cfi') -process.load('GeneratorInterface.Core.genFilterSummary_cff') -process.load('Configuration.StandardSequences.SimIdeal_cff') -process.load('Configuration.StandardSequences.EndOfProcess_cff') -process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff') - -process.maxEvents = cms.untracked.PSet( - input = cms.untracked.int32(10) -) +process.load("Configuration.StandardSequences.Services_cff") +process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi") +process.load("FWCore.MessageService.MessageLogger_cfi") +process.load("Configuration.EventContent.EventContent_cff") +process.load("SimGeneral.MixingModule.mixNoPU_cfi") +process.load("Configuration.StandardSequences.GeometryRecoDB_cff") +process.load("Configuration.Geometry.GeometrySimDB_cff") +process.load("Configuration.StandardSequences.MagneticField_38T_cff") +process.load("Configuration.StandardSequences.Generator_cff") +process.load("IOMC.EventVertexGenerators.VtxSmearedRealistic8TeVCollision_cfi") +process.load("GeneratorInterface.Core.genFilterSummary_cff") +process.load("Configuration.StandardSequences.SimIdeal_cff") +process.load("Configuration.StandardSequences.EndOfProcess_cff") +process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") + +process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(10)) # Input source process.source = cms.Source("EmptySource") -process.options = cms.untracked.PSet( - -) +process.options = cms.untracked.PSet() # Production Info process.configurationMetadata = cms.untracked.PSet( - version = cms.untracked.string('$Revision: 1.19 $'), - annotation = cms.untracked.string('MinBias_8TeV_cfi nevts:10'), - name = cms.untracked.string('Applications') + version=cms.untracked.string("$Revision: 1.19 $"), + annotation=cms.untracked.string("MinBias_8TeV_cfi nevts:10"), + name=cms.untracked.string("Applications"), ) # Output definition -process.RAWSIMoutput = cms.OutputModule("PoolOutputModule", - splitLevel = cms.untracked.int32(0), - eventAutoFlushCompressedSize = cms.untracked.int32(5242880), - outputCommands = process.RAWSIMEventContent.outputCommands, - fileName = cms.untracked.string('minbias.root'), - dataset = cms.untracked.PSet( - filterName = cms.untracked.string(''), - dataTier = cms.untracked.string('GEN-SIM') +process.RAWSIMoutput = cms.OutputModule( + "PoolOutputModule", + splitLevel=cms.untracked.int32(0), + eventAutoFlushCompressedSize=cms.untracked.int32(5242880), + outputCommands=process.RAWSIMEventContent.outputCommands, + fileName=cms.untracked.string("minbias.root"), + dataset=cms.untracked.PSet( + filterName=cms.untracked.string(""), dataTier=cms.untracked.string("GEN-SIM") ), - SelectEvents = cms.untracked.PSet( - SelectEvents = cms.vstring('generation_step') - ) + SelectEvents=cms.untracked.PSet(SelectEvents=cms.vstring("generation_step")), ) # Additional output definition # Other statements -process.genstepfilter.triggerConditions=cms.vstring("generation_step") +process.genstepfilter.triggerConditions = cms.vstring("generation_step") from Configuration.AlCa.GlobalTag import GlobalTag -process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:startup', '') -process.generator = cms.EDFilter("Pythia6GeneratorFilter", - pythiaPylistVerbosity = cms.untracked.int32(0), - filterEfficiency = cms.untracked.double(1.0), - pythiaHepMCVerbosity = cms.untracked.bool(False), - comEnergy = cms.double(8000.0), - maxEventsToPrint = cms.untracked.int32(0), - PythiaParameters = cms.PSet( - pythiaUESettings = cms.vstring('MSTU(21)=1 ! Check on possible errors during program execution', - 'MSTJ(22)=2 ! Decay those unstable particles', - 'PARJ(71)=10 . ! for which ctau 10 mm', - 'MSTP(33)=0 ! no K factors in hard cross sections', - 'MSTP(2)=1 ! which order running alphaS', - 'MSTP(51)=10042 ! structure function chosen (external PDF CTEQ6L1)', - 'MSTP(52)=2 ! work with LHAPDF', - 'PARP(82)=1.921 ! pt cutoff for multiparton interactions', - 'PARP(89)=1800. ! sqrts for which PARP82 is set', - 'PARP(90)=0.227 ! Multiple interactions: rescaling power', - 'MSTP(95)=6 ! CR (color reconnection parameters)', - 'PARP(77)=1.016 ! CR', - 'PARP(78)=0.538 ! CR', - 'PARP(80)=0.1 ! Prob. colored parton from BBR', - 'PARP(83)=0.356 ! Multiple interactions: matter distribution parameter', - 'PARP(84)=0.651 ! Multiple interactions: matter distribution parameter', - 'PARP(62)=1.025 ! ISR cutoff', - 'MSTP(91)=1 ! Gaussian primordial kT', - 'PARP(93)=10.0 ! primordial kT-max', - 'MSTP(81)=21 ! multiple parton interactions 1 is Pythia default', - 'MSTP(82)=4 ! Defines the multi-parton model'), - processParameters = cms.vstring('MSEL=0 ! User defined processes', - 'MSUB(11)=1 ! Min bias process', - 'MSUB(12)=1 ! Min bias process', - 'MSUB(13)=1 ! Min bias process', - 'MSUB(28)=1 ! Min bias process', - 'MSUB(53)=1 ! Min bias process', - 'MSUB(68)=1 ! Min bias process', - 'MSUB(92)=1 ! Min bias process, single diffractive', - 'MSUB(93)=1 ! Min bias process, single diffractive', - 'MSUB(94)=1 ! Min bias process, double diffractive', - 'MSUB(95)=1 ! Min bias process'), - parameterSets = cms.vstring('pythiaUESettings', - 'processParameters') - ) +process.GlobalTag = GlobalTag(process.GlobalTag, "auto:startup", "") + +process.generator = cms.EDFilter( + "Pythia6GeneratorFilter", + pythiaPylistVerbosity=cms.untracked.int32(0), + filterEfficiency=cms.untracked.double(1.0), + pythiaHepMCVerbosity=cms.untracked.bool(False), + comEnergy=cms.double(8000.0), + maxEventsToPrint=cms.untracked.int32(0), + PythiaParameters=cms.PSet( + pythiaUESettings=cms.vstring( + "MSTU(21)=1 ! Check on possible errors during program execution", + "MSTJ(22)=2 ! Decay those unstable particles", + "PARJ(71)=10 . ! for which ctau 10 mm", + "MSTP(33)=0 ! no K factors in hard cross sections", + "MSTP(2)=1 ! which order running alphaS", + "MSTP(51)=10042 ! structure function chosen (external PDF CTEQ6L1)", + "MSTP(52)=2 ! work with LHAPDF", + "PARP(82)=1.921 ! pt cutoff for multiparton interactions", + "PARP(89)=1800. ! sqrts for which PARP82 is set", + "PARP(90)=0.227 ! Multiple interactions: rescaling power", + "MSTP(95)=6 ! CR (color reconnection parameters)", + "PARP(77)=1.016 ! CR", + "PARP(78)=0.538 ! CR", + "PARP(80)=0.1 ! Prob. colored parton from BBR", + "PARP(83)=0.356 ! Multiple interactions: matter distribution parameter", + "PARP(84)=0.651 ! Multiple interactions: matter distribution parameter", + "PARP(62)=1.025 ! ISR cutoff", + "MSTP(91)=1 ! Gaussian primordial kT", + "PARP(93)=10.0 ! primordial kT-max", + "MSTP(81)=21 ! multiple parton interactions 1 is Pythia default", + "MSTP(82)=4 ! Defines the multi-parton model", + ), + processParameters=cms.vstring( + "MSEL=0 ! User defined processes", + "MSUB(11)=1 ! Min bias process", + "MSUB(12)=1 ! Min bias process", + "MSUB(13)=1 ! Min bias process", + "MSUB(28)=1 ! Min bias process", + "MSUB(53)=1 ! Min bias process", + "MSUB(68)=1 ! Min bias process", + "MSUB(92)=1 ! Min bias process, single diffractive", + "MSUB(93)=1 ! Min bias process, single diffractive", + "MSUB(94)=1 ! Min bias process, double diffractive", + "MSUB(95)=1 ! Min bias process", + ), + parameterSets=cms.vstring("pythiaUESettings", "processParameters"), + ), ) # Path and EndPath definitions @@ -122,10 +121,9 @@ process.genfiltersummary_step, process.simulation_step, process.endjob_step, - process.RAWSIMoutput_step + process.RAWSIMoutput_step, ) # Filter all path with the production filter sequence for path in process.paths: - getattr(process,path)._seq = process.generator * getattr(process,path)._seq - + getattr(process, path)._seq = process.generator * getattr(process, path)._seq diff --git a/crab/task.py b/crab/task.py index 3ade88261404..bdd079c08cdf 100644 --- a/crab/task.py +++ b/crab/task.py @@ -1,37 +1,39 @@ from CRABClient.UserUtilities import config import os, re, time -archs = os.environ['SCRAM_ARCH'].split("_") -osMajorVer = int(re.sub('[a-z]', '', archs[0])) +archs = os.environ["SCRAM_ARCH"].split("_") +osMajorVer = int(re.sub("[a-z]", "", archs[0])) config = config() -config.General.instance = os.getenv('CRABCONFIGINSTANCE','prod') -config.General.requestName = os.getenv('CRAB_REQUEST', str(int(time.time()))) -config.General.transferOutputs = True -config.General.transferLogs = False +config.General.instance = os.getenv("CRABCONFIGINSTANCE", "prod") +config.General.requestName = os.getenv("CRAB_REQUEST", str(int(time.time()))) +config.General.transferOutputs = True +config.General.transferLogs = False -config.Data.unitsPerJob = 10 -config.Data.totalUnits = 10 -config.Data.splitting = 'EventBased' -config.Data.publication = False +config.Data.unitsPerJob = 10 +config.Data.totalUnits = 10 +config.Data.splitting = "EventBased" +config.Data.publication = False -config.JobType.psetName = os.path.join(os.path.dirname(__file__), 'pset.py') -config.JobType.pluginName = 'PrivateMC' -config.JobType.maxJobRuntimeMin = 60 -config.JobType.maxMemoryMB = 2000 +config.JobType.psetName = os.path.join(os.path.dirname(__file__), "pset.py") +config.JobType.pluginName = "PrivateMC" +config.JobType.maxJobRuntimeMin = 60 +config.JobType.maxMemoryMB = 2000 config.JobType.allowUndistributedCMSSW = True -config.Site.storageSite = 'T2_CH_CERN' +config.Site.storageSite = "T2_CH_CERN" -if 'CRAB_SCHEDD_NAME' in os.environ and os.environ['CRAB_SCHEDD_NAME']!='': - config.Debug.scheddName = os.environ['CRAB_SCHEDD_NAME'] -if 'CRAB_COLLECTOR' in os.environ and os.environ['CRAB_COLLECTOR']!='': - config.Debug.collector = os.environ['CRAB_COLLECTOR'] +if "CRAB_SCHEDD_NAME" in os.environ and os.environ["CRAB_SCHEDD_NAME"] != "": + config.Debug.scheddName = os.environ["CRAB_SCHEDD_NAME"] +if "CRAB_COLLECTOR" in os.environ and os.environ["CRAB_COLLECTOR"] != "": + config.Debug.collector = os.environ["CRAB_COLLECTOR"] -config.Debug.extraJDL = ['+REQUIRED_OS="rhel%s"' % osMajorVer] -if 'amd64' == archs[1]: - config.Debug.extraJDL.append('+DESIRED_Archs="%s"' % 'X86_64' if ('amd64' == archs[1]) else archs[1]) -if 'SINGULARITY_IMAGE' in os.environ and os.environ['SINGULARITY_IMAGE']!='': - config.Debug.extraJDL.append('+SingularityImage="%s"' % os.environ['SINGULARITY_IMAGE']) -if 'CRAB_SITE' in os.environ and os.environ['CRAB_SITE']!='': - config.Debug.extraJDL.append('+DESIRED_Sites="%s"' % os.environ['CRAB_SITE']) +config.Debug.extraJDL = ['+REQUIRED_OS="rhel%s"' % osMajorVer] +if "amd64" == archs[1]: + config.Debug.extraJDL.append( + '+DESIRED_Archs="%s"' % "X86_64" if ("amd64" == archs[1]) else archs[1] + ) +if "SINGULARITY_IMAGE" in os.environ and os.environ["SINGULARITY_IMAGE"] != "": + config.Debug.extraJDL.append('+SingularityImage="%s"' % os.environ["SINGULARITY_IMAGE"]) +if "CRAB_SITE" in os.environ and os.environ["CRAB_SITE"] != "": + config.Debug.extraJDL.append('+DESIRED_Sites="%s"' % os.environ["CRAB_SITE"]) diff --git a/create-gh-issue.py b/create-gh-issue.py index 1c1f5fb803bb..aafb66336df3 100755 --- a/create-gh-issue.py +++ b/create-gh-issue.py @@ -5,41 +5,69 @@ import sys, re from argparse import ArgumentParser from _py2with3compatibility import run_cmd, quote + SCRIPT_DIR = dirname(abspath(sys.argv[0])) parser = ArgumentParser() -parser.add_argument("-r", "--repository", dest="repo", help="Github Repositoy name e.g cms-sw/cms-bot",type=str) -parser.add_argument("-t", "--title", dest="title", help="Issue title",type=str) -parser.add_argument("-m", "--message", dest="msg", help="Message to be posted s body of the GH issue",type=str, default='') -parser.add_argument("-R", "--report_file", dest="report_file", help="File name contaning the issue message",type=str, default='') +parser.add_argument( + "-r", "--repository", dest="repo", help="Github Repositoy name e.g cms-sw/cms-bot", type=str +) +parser.add_argument("-t", "--title", dest="title", help="Issue title", type=str) +parser.add_argument( + "-m", + "--message", + dest="msg", + help="Message to be posted s body of the GH issue", + type=str, + default="", +) +parser.add_argument( + "-R", + "--report_file", + dest="report_file", + help="File name contaning the issue message", + type=str, + default="", +) args = parser.parse_args() -mgs="" -if not args.repo: parser.error("Missing Repo") -if not args.title: parser.error("Missing PR title") -if args.msg: msg = re.sub("@N@","\n",args.msg) -elif args.report_file: msg = open(args.report_file).read() -else: parser.error("Missing issue message: -m|--message OR -R|--report-file ") +mgs = "" +if not args.repo: + parser.error("Missing Repo") +if not args.title: + parser.error("Missing PR title") +if args.msg: + msg = re.sub("@N@", "\n", args.msg) +elif args.report_file: + msg = open(args.report_file).read() +else: + parser.error("Missing issue message: -m|--message OR -R|--report-file ") print("Authenticating to Github and connecting to repo") -repo_dir = join(SCRIPT_DIR,'repos',args.repo.replace("-","_")) -if exists(join(repo_dir,"repo_config.py")): sys.path.insert(0,repo_dir) +repo_dir = join(SCRIPT_DIR, "repos", args.repo.replace("-", "_")) +if exists(join(repo_dir, "repo_config.py")): + sys.path.insert(0, repo_dir) import repo_config + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) print("Authentication succeeeded") gh_repo = gh.get_repo(args.repo) -cmd = "curl -s 'https://api.github.com/search/issues?q=%s+repo:%s+in:title+type:issue' | grep '\"number\"' | head -1 | sed -e 's|.*: ||;s|,.*||'" % (quote(args.title),args.repo) -print("Checking existing Issue",cmd) +cmd = ( + "curl -s 'https://api.github.com/search/issues?q=%s+repo:%s+in:title+type:issue' | grep '\"number\"' | head -1 | sed -e 's|.*: ||;s|,.*||'" + % (quote(args.title), args.repo) +) +print("Checking existing Issue", cmd) e, o = run_cmd(cmd) -print("Existing Issues:",e,o) +print("Existing Issues:", e, o) issue = None if not e: - try: issue = gh_repo.get_issue(int(o)) - except: pass + try: + issue = gh_repo.get_issue(int(o)) + except: + pass if issue: - print("Updating comment") - issue.create_comment(msg) + print("Updating comment") + issue.create_comment(msg) else: - print("Creating issue request") - gh_repo.create_issue(args.title, msg) - + print("Creating issue request") + gh_repo.create_issue(args.title, msg) diff --git a/create-gh-pr.py b/create-gh-pr.py index b59c0af07121..5244bd11218e 100755 --- a/create-gh-pr.py +++ b/create-gh-pr.py @@ -4,23 +4,55 @@ from argparse import ArgumentParser parser = ArgumentParser() -parser.add_argument("-r", "--repository", dest="repo", help="Github Repositoy name e.g cms-sw/cms-bot",type=str) -parser.add_argument("-b", "--base_branch", dest="base_branch",help="Repository branch againt which new Pull request should be created",type=str) -parser.add_argument("-f", "--feature_branch", dest="feature_branch",help="New feature branch to be merged",type=str) -parser.add_argument("-t", "--title", dest="title", help="Pull request title",type=str) -parser.add_argument("-d", "--body", dest="body", help="Pull request body text, optional",type=str, default='') -parser.add_argument("-c", "--comment", dest="comment", help="Extra comment after creating Pull requests e.g. please tests",type=str, default='') +parser.add_argument( + "-r", "--repository", dest="repo", help="Github Repositoy name e.g cms-sw/cms-bot", type=str +) +parser.add_argument( + "-b", + "--base_branch", + dest="base_branch", + help="Repository branch againt which new Pull request should be created", + type=str, +) +parser.add_argument( + "-f", + "--feature_branch", + dest="feature_branch", + help="New feature branch to be merged", + type=str, +) +parser.add_argument("-t", "--title", dest="title", help="Pull request title", type=str) +parser.add_argument( + "-d", "--body", dest="body", help="Pull request body text, optional", type=str, default="" +) +parser.add_argument( + "-c", + "--comment", + dest="comment", + help="Extra comment after creating Pull requests e.g. please tests", + type=str, + default="", +) args = parser.parse_args() -if not args.repo: parser.error("Missing Repo") -if not args.base_branch: parser.error("Missing base branch name.") -if not args.feature_branch: parser.error("Missing feature branch name.") -if not args.title: parser.error("Missing PR title") +if not args.repo: + parser.error("Missing Repo") +if not args.base_branch: + parser.error("Missing base branch name.") +if not args.feature_branch: + parser.error("Missing feature branch name.") +if not args.title: + parser.error("Missing PR title") print("Authenticating to Github and connecting to repo") -gh = Github(login_or_token = open(expanduser("~/.github-token")).read().strip()) +gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) print("Authentication succeeeded") gh_repo = gh.get_repo(args.repo) print("Creating pull request") -pr = gh_repo.create_pull(title = args.title, body = args.body.replace('@N@','\n'), base = args.base_branch, head = args.feature_branch) +pr = gh_repo.create_pull( + title=args.title, + body=args.body.replace("@N@", "\n"), + base=args.base_branch, + head=args.feature_branch, +) if args.comment: - pr.create_issue_comment(body=args.comment) + pr.create_issue_comment(body=args.comment) diff --git a/create-gh-release.py b/create-gh-release.py index f3458719b6ec..994e95825073 100755 --- a/create-gh-release.py +++ b/create-gh-release.py @@ -4,25 +4,29 @@ from _py2with3compatibility import Request, urlopen from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO -GH_TOKEN = open( expanduser("~/.github-token")).read().strip() +GH_TOKEN = open(expanduser("~/.github-token")).read().strip() release_name = sys.argv[1] -branch = sys.argv[2] -print('Creating release:\n %s based on %s' % (release_name, branch)) +branch = sys.argv[2] +print("Creating release:\n %s based on %s" % (release_name, branch)) # creating releases will be available in the next version of pyGithub -params = { "tag_name" : release_name, - "target_commitish" : branch, - "name" : release_name, - "body" : 'cms-bot is going to build this release', - "draft" : False, - "prerelease" : False } +params = { + "tag_name": release_name, + "target_commitish": branch, + "name": release_name, + "body": "cms-bot is going to build this release", + "draft": False, + "prerelease": False, +} -request = Request("https://api.github.com/repos/" + GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO +"/releases", - headers={"Authorization" : "token " + GH_TOKEN }) -request.get_method = lambda: 'POST' -print('--') +request = Request( + "https://api.github.com/repos/" + GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO + "/releases", + headers={"Authorization": "token " + GH_TOKEN}, +) +request.get_method = lambda: "POST" +print("--") try: - print(urlopen( request, json.dumps( params ).encode()).read()) - print("OK release",release_name,"created") + print(urlopen(request, json.dumps(params).encode()).read()) + print("OK release", release_name, "created") except Exception as e: - print('There was an error while creating the release:\n', e) + print("There was an error while creating the release:\n", e) diff --git a/create-github-hooks b/create-github-hooks deleted file mode 100755 index 450cf34c7465..000000000000 --- a/create-github-hooks +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python3 -from github import Github -from os.path import expanduser, exists, join, dirname, abspath -from os import environ -from optparse import OptionParser -from github_hooks_config import get_repository_hooks, get_event_hooks -from github_utils import api_rate_limits -import hashlib -from categories import EXTERNAL_REPOS, CMSSW_REPOS, CMSDIST_REPOS -from sys import argv -from socket import setdefaulttimeout -setdefaulttimeout(120) -SCRIPT_DIR = dirname(abspath(argv[0])) - -#Get secret from file -def get_secret(hook_name): - if "GH_HOOK_SECRET_FILE" in environ: secret_file=environ['GH_HOOK_SECRET_FILE'] - else: - secret_file = '/data/secrets/' + hook_name - if not exists(secret_file): - secret_file = '/data/secrets/github_hook_secret_cmsbot' - return open(secret_file,'r').read().split('\n')[0].strip() -#match hook config -def match_config(new,old): - if new["active"] != old.active: - return False - elif set(new["events"]) != set(old.events): - return False - for key in new["config"]: - if (not key in old.config) or (key!='secret' and new["config"][key] != old.config[key]): - return False - return True - -#main section -if __name__ == "__main__": - parser = OptionParser(usage="%prog [-k|--hook ] [-r|--repository ] [-f|--force] [-n|--dry-run]") - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) - parser.add_option("-f", "--force", dest="force", action="store_true", help="Force update github hook", default=False) - parser.add_option("-r", "--repository",dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default=None) - parser.add_option("-e", "--externals", dest="externals", action="store_true", help="Only process CMS externals repositories", default=False) - parser.add_option("-u", "--users", dest="users", action="store_true", help="Only process USER externals repositories", default=False) - parser.add_option("-c", "--cmssw", dest="cmssw", action="store_true", help="Only process "+",".join(CMSSW_REPOS)+" repository", default=False) - parser.add_option("-d", "--cmsdist", dest="cmsdist", action="store_true", help="Only process "+",".join(CMSDIST_REPOS)+" repository", default=False) - parser.add_option("-a", "--all", dest="all", action="store_true", help="Process all CMS repository i.e. externals, cmsdist and cmssw", default=False) - parser.add_option("-k", "--hook", dest="hook", help="Github Hook name", type=str, default="") - opts, args = parser.parse_args() - - repos_names = [] - if opts.repository: - repos_names.append(opts.repository) - elif opts.all: - opts.externals = True - opts.cmssw = True - opts.cmsdist = True - elif (not opts.externals) and (not opts.cmssw) and (not opts.cmsdist) and (not opts.users): - parser.error("Too few arguments, please use either -e, -c , -u or -d") - - if not repos_names: - if opts.externals: repos_names = repos_names + EXTERNAL_REPOS - if opts.cmssw: repos_names = repos_names + CMSSW_REPOS - if opts.cmsdist: repos_names = repos_names + CMSDIST_REPOS - if opts.users: - from glob import glob - for rconf in glob(join(SCRIPT_DIR,"repos","*","*","repo_config.py")): - repos_names.append("/".join(rconf.split("/")[-3:-1])) - print("Added User repo: ",repos_names[-1]) - - ghx = Github(login_or_token = open(expanduser("~/.github-token")).read().strip()) - api_rate_limits(ghx) - #get repos to be processed - repos = {} - for r in set(repos_names): - if not "/" in r: - for repo in ghx.get_user(r).get_repos(): - repos[repo.full_name]=repo - api_rate_limits(ghx) - else: - repos[r]=None - - #process repos - for repo_name in repos: - gh = ghx - isUserRepo = False - if exists (join(SCRIPT_DIR,"repos",repo_name,"repo_config.py")): - exec('from repos.'+repo_name.replace("/",".")+' import repo_config') - if not repo_config.ADD_WEB_HOOK: - print("Skipped Web hook:",repo_name) - continue - isUserRepo = True - gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) - repo_name = repo_config.GH_REPO_FULLNAME - xfile = repo_name.replace("/","-")+".done" - if exists(xfile): continue - print("Checking for repo ",repo_name) - if isUserRepo: - hk_conf = get_event_hooks(repo_config.VALID_WEB_HOOKS) - else: - hk_conf = get_repository_hooks (repo_name, opts.hook) - hooks = list(hk_conf.keys()) - if not hooks: - print("==>Warning: No hook found for repository",repo_name) - continue - - print("Found hooks:",hooks) - repo = repos[repo_name] - if not repo: repo = gh.get_repo(repo_name) - repo_hooks_all = {} - for hook in repo.get_hooks(): - if "name" in hook.config: - repo_hooks_all[ hook.config['name'] ] = hook - api_rate_limits(gh) - print("Dryrun:",opts.dryRun) - for hook in hooks: - print("checking for web hook", hook) - hook_conf = hk_conf[hook] - hook_conf["name"] = "web" - hook_conf["config"]["insecure_ssl"] = "1" - hook_conf["config"]["secret"] = get_secret(hook) - hook_conf["config"]["name"] = hook - hook_conf["config"]["data"] = hashlib.sha256(hook_conf["config"]["secret"].encode()).hexdigest() - if hook in repo_hooks_all: - old_hook = repo_hooks_all[hook] - if opts.force or not match_config(hook_conf,old_hook): - if not opts.dryRun: - old_hook.edit(**hook_conf) - api_rate_limits(gh) - print("hook updated",hook) - else: - print("Hook configuration is same",hook) - else: - if not opts.dryRun: - repo.create_hook(**hook_conf) - api_rate_limits(gh) - print("Hook created in github.....success",hook) - ref = open(xfile,"w") - ref.close() diff --git a/create-github-hooks b/create-github-hooks new file mode 120000 index 000000000000..a975131a5e3a --- /dev/null +++ b/create-github-hooks @@ -0,0 +1 @@ +create-github-hooks.py \ No newline at end of file diff --git a/create-github-hooks.py b/create-github-hooks.py new file mode 100755 index 000000000000..441dc6dd3365 --- /dev/null +++ b/create-github-hooks.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python3 +from github import Github +from os.path import expanduser, exists, join, dirname, abspath +from os import environ +from optparse import OptionParser +from github_hooks_config import get_repository_hooks, get_event_hooks +from github_utils import api_rate_limits +import hashlib +from categories import EXTERNAL_REPOS, CMSSW_REPOS, CMSDIST_REPOS +from sys import argv +from socket import setdefaulttimeout + +setdefaulttimeout(120) +SCRIPT_DIR = dirname(abspath(argv[0])) + + +# Get secret from file +def get_secret(hook_name): + if "GH_HOOK_SECRET_FILE" in environ: + secret_file = environ["GH_HOOK_SECRET_FILE"] + else: + secret_file = "/data/secrets/" + hook_name + if not exists(secret_file): + secret_file = "/data/secrets/github_hook_secret_cmsbot" + return open(secret_file, "r").read().split("\n")[0].strip() + + +# match hook config +def match_config(new, old): + if new["active"] != old.active: + return False + elif set(new["events"]) != set(old.events): + return False + for key in new["config"]: + if (not key in old.config) or (key != "secret" and new["config"][key] != old.config[key]): + return False + return True + + +# main section +if __name__ == "__main__": + parser = OptionParser( + usage="%prog [-k|--hook ] [-r|--repository ] [-f|--force] [-n|--dry-run]" + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, + ) + parser.add_option( + "-f", + "--force", + dest="force", + action="store_true", + help="Force update github hook", + default=False, + ) + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default=None, + ) + parser.add_option( + "-e", + "--externals", + dest="externals", + action="store_true", + help="Only process CMS externals repositories", + default=False, + ) + parser.add_option( + "-u", + "--users", + dest="users", + action="store_true", + help="Only process USER externals repositories", + default=False, + ) + parser.add_option( + "-c", + "--cmssw", + dest="cmssw", + action="store_true", + help="Only process " + ",".join(CMSSW_REPOS) + " repository", + default=False, + ) + parser.add_option( + "-d", + "--cmsdist", + dest="cmsdist", + action="store_true", + help="Only process " + ",".join(CMSDIST_REPOS) + " repository", + default=False, + ) + parser.add_option( + "-a", + "--all", + dest="all", + action="store_true", + help="Process all CMS repository i.e. externals, cmsdist and cmssw", + default=False, + ) + parser.add_option("-k", "--hook", dest="hook", help="Github Hook name", type=str, default="") + opts, args = parser.parse_args() + + repos_names = [] + if opts.repository: + repos_names.append(opts.repository) + elif opts.all: + opts.externals = True + opts.cmssw = True + opts.cmsdist = True + elif (not opts.externals) and (not opts.cmssw) and (not opts.cmsdist) and (not opts.users): + parser.error("Too few arguments, please use either -e, -c , -u or -d") + + if not repos_names: + if opts.externals: + repos_names = repos_names + EXTERNAL_REPOS + if opts.cmssw: + repos_names = repos_names + CMSSW_REPOS + if opts.cmsdist: + repos_names = repos_names + CMSDIST_REPOS + if opts.users: + from glob import glob + + for rconf in glob(join(SCRIPT_DIR, "repos", "*", "*", "repo_config.py")): + repos_names.append("/".join(rconf.split("/")[-3:-1])) + print("Added User repo: ", repos_names[-1]) + + ghx = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) + api_rate_limits(ghx) + # get repos to be processed + repos = {} + for r in set(repos_names): + if not "/" in r: + for repo in ghx.get_user(r).get_repos(): + repos[repo.full_name] = repo + api_rate_limits(ghx) + else: + repos[r] = None + + # process repos + for repo_name in repos: + gh = ghx + isUserRepo = False + if exists(join(SCRIPT_DIR, "repos", repo_name, "repo_config.py")): + exec("from repos." + repo_name.replace("/", ".") + " import repo_config") + if not repo_config.ADD_WEB_HOOK: + print("Skipped Web hook:", repo_name) + continue + isUserRepo = True + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) + repo_name = repo_config.GH_REPO_FULLNAME + xfile = repo_name.replace("/", "-") + ".done" + if exists(xfile): + continue + print("Checking for repo ", repo_name) + if isUserRepo: + hk_conf = get_event_hooks(repo_config.VALID_WEB_HOOKS) + else: + hk_conf = get_repository_hooks(repo_name, opts.hook) + hooks = list(hk_conf.keys()) + if not hooks: + print("==>Warning: No hook found for repository", repo_name) + continue + + print("Found hooks:", hooks) + repo = repos[repo_name] + if not repo: + repo = gh.get_repo(repo_name) + repo_hooks_all = {} + for hook in repo.get_hooks(): + if "name" in hook.config: + repo_hooks_all[hook.config["name"]] = hook + api_rate_limits(gh) + print("Dryrun:", opts.dryRun) + for hook in hooks: + print("checking for web hook", hook) + hook_conf = hk_conf[hook] + hook_conf["name"] = "web" + hook_conf["config"]["insecure_ssl"] = "1" + hook_conf["config"]["secret"] = get_secret(hook) + hook_conf["config"]["name"] = hook + hook_conf["config"]["data"] = hashlib.sha256( + hook_conf["config"]["secret"].encode() + ).hexdigest() + if hook in repo_hooks_all: + old_hook = repo_hooks_all[hook] + if opts.force or not match_config(hook_conf, old_hook): + if not opts.dryRun: + old_hook.edit(**hook_conf) + api_rate_limits(gh) + print("hook updated", hook) + else: + print("Hook configuration is same", hook) + else: + if not opts.dryRun: + repo.create_hook(**hook_conf) + api_rate_limits(gh) + print("Hook created in github.....success", hook) + ref = open(xfile, "w") + ref.close() diff --git a/create-new-data-pr.py b/create-new-data-pr.py index 75dcefba2ba2..39c86e517207 100755 --- a/create-new-data-pr.py +++ b/create-new-data-pr.py @@ -9,156 +9,197 @@ from github_utils import get_git_tree from _py2with3compatibility import run_cmd, HTTPError, urlopen + def update_tag_version(current_version): - updated_version = int(current_version)+1 - if updated_version<10: - updated_version = '0%s' % updated_version + updated_version = int(current_version) + 1 + if updated_version < 10: + updated_version = "0%s" % updated_version return str(updated_version) + def get_tag_from_string(tag_string=None): tag = None - for i in tag_string.split('\n'): + for i in tag_string.split("\n"): m = re.search("(V[0-9]{2}(-[0-9]{2})+)", i) if m: - tag = m.group() - break + tag = m.group() + break return tag -if __name__ == "__main__": - parser = OptionParser(usage="%prog ") - - parser.add_option("-r", "--data-repo", dest="data_repo", help="Github data repositoy name e.g. cms-data/RecoTauTag-TrainingFiles.", - type=str, default=None) - parser.add_option("-d", "--dist-repo", dest="dist_repo", help="Github dist repositoy name e.g. cms-sw/cmsdist.", - type=str, default='') - parser.add_option("-p", "--pull-request", dest="pull_request", help="Pull request number", - type=str, default=None) - opts, args = parser.parse_args() - - gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) - - data_repo = gh.get_repo(opts.data_repo) - data_prid = int(opts.pull_request) - dist_repo = gh.get_repo(opts.dist_repo) - data_repo_pr = data_repo.get_pull(data_prid) - if not data_repo_pr.merged: - print('The pull request isn\'t merged !') - exit(0) - data_pr_base_branch = data_repo_pr.base.ref - data_repo_default_branch = data_repo.default_branch - # create master just to exist on the cms-data repo if it doesn't - if data_repo_default_branch != "master": - if "master" not in [branch.name for branch in data_repo.get_branches()]: - data_repo.create_git_ref(ref='refs/heads/master', sha=data_repo.get_branch(data_repo_default_branch).commit.sha) - - err, out = run_cmd("rm -rf repo && git clone --bare https://github.com/%s -b %s repo && GIT_DIR=repo git log --pretty='%%d'" % (opts.data_repo, data_pr_base_branch)) - last_release_tag = get_tag_from_string(out) - - if last_release_tag: - comparison = data_repo.compare(data_pr_base_branch, last_release_tag) - print('commits behind ', comparison.behind_by) - create_new_tag = True if comparison.behind_by > 0 else False # last tag and master commit difference - print('create new tag ? ', create_new_tag) - else: - create_new_tag = True - last_release_tag = "V00-00-00" - - # if created files and modified files are the same count, all files are new - - response = urlopen("https://api.github.com/repos/%s/pulls/%s" % (opts.data_repo, opts.pull_request)) - res_json = loads(response.read().decode()) - print(res_json['additions'], res_json['changed_files'], res_json['deletions']) - files_modified = res_json['deletions'] + res_json['changed_files'] - only_new_files=(files_modified==0) - - # if the latest tag/release compared with master(base) or the pr(head) branch is behind then make new tag - new_tag = last_release_tag # in case the tag doesnt change - if create_new_tag: - while True: - print("searching next tag for ",last_release_tag) - tag_data = last_release_tag.strip('V').split('-') - if len(tag_data)<3: tag_data.append('00') - print(tag_data) - # update minor for now - if only_new_files: - tag_data[-1] = update_tag_version(tag_data[-1]) - else: - tag_data[-2] = update_tag_version(tag_data[-2]) - tag_data[-1] = '00' - print('New tag data', tag_data) - new_tag = 'V%s' % '-'.join(tag_data) - try: - has_tag = get_git_tree(new_tag, opts.data_repo) - if "sha" not in has_tag: break - last_release_tag = last_release_tag+"-00-00" - except HTTPError as e: +if __name__ == "__main__": + parser = OptionParser(usage="%prog ") + + parser.add_option( + "-r", + "--data-repo", + dest="data_repo", + help="Github data repositoy name e.g. cms-data/RecoTauTag-TrainingFiles.", + type=str, + default=None, + ) + parser.add_option( + "-d", + "--dist-repo", + dest="dist_repo", + help="Github dist repositoy name e.g. cms-sw/cmsdist.", + type=str, + default="", + ) + parser.add_option( + "-p", + "--pull-request", + dest="pull_request", + help="Pull request number", + type=str, + default=None, + ) + opts, args = parser.parse_args() + + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) + + data_repo = gh.get_repo(opts.data_repo) + data_prid = int(opts.pull_request) + dist_repo = gh.get_repo(opts.dist_repo) + data_repo_pr = data_repo.get_pull(data_prid) + if not data_repo_pr.merged: + print("The pull request isn't merged !") + exit(0) + data_pr_base_branch = data_repo_pr.base.ref + data_repo_default_branch = data_repo.default_branch + # create master just to exist on the cms-data repo if it doesn't + if data_repo_default_branch != "master": + if "master" not in [branch.name for branch in data_repo.get_branches()]: + data_repo.create_git_ref( + ref="refs/heads/master", + sha=data_repo.get_branch(data_repo_default_branch).commit.sha, + ) + + err, out = run_cmd( + "rm -rf repo && git clone --bare https://github.com/%s -b %s repo && GIT_DIR=repo git log --pretty='%%d'" + % (opts.data_repo, data_pr_base_branch) + ) + last_release_tag = get_tag_from_string(out) + + if last_release_tag: + comparison = data_repo.compare(data_pr_base_branch, last_release_tag) + print("commits behind ", comparison.behind_by) + create_new_tag = ( + True if comparison.behind_by > 0 else False + ) # last tag and master commit difference + print("create new tag ? ", create_new_tag) + else: + create_new_tag = True + last_release_tag = "V00-00-00" + + # if created files and modified files are the same count, all files are new + + response = urlopen( + "https://api.github.com/repos/%s/pulls/%s" % (opts.data_repo, opts.pull_request) + ) + res_json = loads(response.read().decode()) + print(res_json["additions"], res_json["changed_files"], res_json["deletions"]) + files_modified = res_json["deletions"] + res_json["changed_files"] + only_new_files = files_modified == 0 + + # if the latest tag/release compared with master(base) or the pr(head) branch is behind then make new tag + new_tag = last_release_tag # in case the tag doesnt change + if create_new_tag: + while True: + print("searching next tag for ", last_release_tag) + tag_data = last_release_tag.strip("V").split("-") + if len(tag_data) < 3: + tag_data.append("00") + print(tag_data) + # update minor for now + if only_new_files: + tag_data[-1] = update_tag_version(tag_data[-1]) + else: + tag_data[-2] = update_tag_version(tag_data[-2]) + tag_data[-1] = "00" + print("New tag data", tag_data) + new_tag = "V%s" % "-".join(tag_data) + try: + has_tag = get_git_tree(new_tag, opts.data_repo) + if "sha" not in has_tag: + break + last_release_tag = last_release_tag + "-00-00" + except HTTPError as e: + break + print(new_tag) + tag_ref = data_repo.create_git_ref( + ref="refs/tags/" + new_tag, sha=data_repo.get_branch(data_pr_base_branch).commit.sha + ) + default_cms_dist_branch = dist_repo.default_branch + repo_name_only = opts.data_repo.split("/")[1] + repo_tag_pr_branch = "update-" + repo_name_only + "-to-" + new_tag + + sb = dist_repo.get_branch(default_cms_dist_branch) + dest_branch = None # + + try: + dist_repo.create_git_ref(ref="refs/heads/" + repo_tag_pr_branch, sha=sb.commit.sha) + dest_branch = dist_repo.get_branch(repo_tag_pr_branch) + except Exception as e: + print(str(e)) + dest_branch = dist_repo.get_branch(repo_tag_pr_branch) + print("Branch exists") + + # file with tags on the default branch + cmsswdatafile = "data/cmsswdata.txt" + content_file = dist_repo.get_contents(cmsswdatafile, repo_tag_pr_branch) + cmsswdatafile_raw = content_file.decoded_content + new_content = "" + # remove the existing line no matter where it is and put the new line right under default + + count = 0 # omit first line linebreaker + for line in cmsswdatafile_raw.splitlines(): + line = line.decode() + updated_line = None + if "[default]" in line: + updated_line = "\n" + line + "\n" + repo_name_only + "=" + new_tag + "" + elif repo_name_only in line: + updated_line = "" + else: + if count > 0: + updated_line = "\n" + line + else: + updated_line = line + count = count + 1 + new_content = new_content + updated_line + + mssg = "Update tag for " + repo_name_only + " to " + new_tag + update_file_object = dist_repo.update_file( + cmsswdatafile, mssg, new_content, content_file.sha, repo_tag_pr_branch + ) + + # file with tags on the default branch + cmsswdataspec = "cmsswdata.spec" + content_file = dist_repo.get_contents(cmsswdataspec, repo_tag_pr_branch) + cmsswdatafile_raw = content_file.decoded_content + new_content = [] + data_pkg = " data-" + repo_name_only + added_pkg = False + for line in cmsswdatafile_raw.splitlines(): + line = line.decode() + new_content.append(line) + if not line.startswith("Requires: "): + continue + if data_pkg in line: + added_pkg = False break - print(new_tag) - tag_ref = data_repo.create_git_ref(ref='refs/tags/'+new_tag, sha=data_repo.get_branch(data_pr_base_branch).commit.sha) - default_cms_dist_branch = dist_repo.default_branch - repo_name_only = opts.data_repo.split('/')[1] - repo_tag_pr_branch = 'update-'+repo_name_only+'-to-'+new_tag - - sb = dist_repo.get_branch(default_cms_dist_branch) - dest_branch = None # - - try: - dist_repo.create_git_ref(ref='refs/heads/' + repo_tag_pr_branch, sha=sb.commit.sha) - dest_branch = dist_repo.get_branch(repo_tag_pr_branch) - except Exception as e: - print(str(e)) - dest_branch = dist_repo.get_branch(repo_tag_pr_branch) - print('Branch exists') - - # file with tags on the default branch - cmsswdatafile = "data/cmsswdata.txt" - content_file = dist_repo.get_contents(cmsswdatafile, repo_tag_pr_branch) - cmsswdatafile_raw = content_file.decoded_content - new_content = '' - # remove the existing line no matter where it is and put the new line right under default - - count = 0 # omit first line linebreaker - for line in cmsswdatafile_raw.splitlines(): - line = line.decode() - updated_line = None - if '[default]' in line: - updated_line = '\n'+line+'\n'+repo_name_only+'='+new_tag+'' - elif repo_name_only in line: - updated_line = '' - else: - if count > 0: - updated_line = '\n'+line - else: - updated_line = line - count=count+1 - new_content = new_content+updated_line - - mssg = 'Update tag for '+repo_name_only+' to '+new_tag - update_file_object = dist_repo.update_file(cmsswdatafile, mssg, new_content, content_file.sha, repo_tag_pr_branch) - - # file with tags on the default branch - cmsswdataspec = "cmsswdata.spec" - content_file = dist_repo.get_contents(cmsswdataspec, repo_tag_pr_branch) - cmsswdatafile_raw = content_file.decoded_content - new_content = [] - data_pkg = ' data-'+repo_name_only - added_pkg = False - for line in cmsswdatafile_raw.splitlines(): - line = line.decode() - new_content.append(line) - if not line.startswith('Requires: '): continue - if data_pkg in line: - added_pkg = False - break - if not added_pkg: - added_pkg = True - new_content.append('Requires:'+data_pkg) - - if added_pkg: - mssg = 'Update cmssdata spec for'+data_pkg - update_file_object = dist_repo.update_file(cmsswdataspec, mssg, '\n'.join(new_content), content_file.sha, repo_tag_pr_branch) - - title = 'Update tag for '+repo_name_only+' to '+new_tag - body = 'Move '+repo_name_only+" data to new tag, see \n" + data_repo_pr.html_url + '\n' - change_tag_pull_request = dist_repo.create_pull(title=title, body=body, base=default_cms_dist_branch, head=repo_tag_pr_branch) + if not added_pkg: + added_pkg = True + new_content.append("Requires:" + data_pkg) + + if added_pkg: + mssg = "Update cmssdata spec for" + data_pkg + update_file_object = dist_repo.update_file( + cmsswdataspec, mssg, "\n".join(new_content), content_file.sha, repo_tag_pr_branch + ) + + title = "Update tag for " + repo_name_only + " to " + new_tag + body = "Move " + repo_name_only + " data to new tag, see \n" + data_repo_pr.html_url + "\n" + change_tag_pull_request = dist_repo.create_pull( + title=title, body=body, base=default_cms_dist_branch, head=repo_tag_pr_branch + ) diff --git a/create_json.py b/create_json.py index 016670010bcc..952dd5fd63d1 100755 --- a/create_json.py +++ b/create_json.py @@ -6,27 +6,31 @@ from optparse import OptionParser parser = OptionParser() -parser.add_option('--logfile') -parser.add_option('--jsonfile') +parser.add_option("--logfile") +parser.add_option("--jsonfile") (options, args) = parser.parse_args() + def extract_data(inputfile): - list_of_dicts = [] - with open(inputfile, 'r') as file: - first_char = file.read(1) - if not first_char: print("Error: Input file is empty"), sys.exit(1) - pattern = re.compile('^([a-z]+)\+([\w-]+)\+([\w.-]+)\s\(([\w]+)\)') - matched_lines = [pattern.match(l) for l in file.readlines()] - for line in matched_lines: - if line: - list_of_dicts.append(dict( - package_type = line.group(1), - name = line.group(2), - ver_suffix = line.group(3), - hashtag = line.group(4) - )) - return json.dumps(list_of_dicts, sort_keys=True, indent=2) + list_of_dicts = [] + with open(inputfile, "r") as file: + first_char = file.read(1) + if not first_char: + print("Error: Input file is empty"), sys.exit(1) + pattern = re.compile("^([a-z]+)\+([\w-]+)\+([\w.-]+)\s\(([\w]+)\)") + matched_lines = [pattern.match(l) for l in file.readlines()] + for line in matched_lines: + if line: + list_of_dicts.append( + dict( + package_type=line.group(1), + name=line.group(2), + ver_suffix=line.group(3), + hashtag=line.group(4), + ) + ) + return json.dumps(list_of_dicts, sort_keys=True, indent=2) -with open(options.jsonfile, 'w' ) as file: - file.write(extract_data(options.logfile)) +with open(options.jsonfile, "w") as file: + file.write(extract_data(options.logfile)) diff --git a/cuda/install-cuda.py b/cuda/install-cuda.py index f20a37ac52f8..e5d61d557cf7 100755 --- a/cuda/install-cuda.py +++ b/cuda/install-cuda.py @@ -26,18 +26,22 @@ verbose = INFO + def error(text): if verbose >= ERROR: print("Error:", text) + def warning(text): if verbose >= WARNING: print("Warning:", text) + def info(text): if verbose >= INFO: print("Info:", text) + def debug(text): if verbose >= DEBUG: print("Debug:", text) @@ -45,7 +49,7 @@ def debug(text): # Describe an NVIDIA software component for a specific architecture class Component: - def __init__(self, catalog = None, key = None, os_arch = None): + def __init__(self, catalog=None, key=None, os_arch=None): # General information about an NVIDIA software component # key used in the JSON catalog, e.g. 'cuda_cccl' self.key = str() @@ -71,11 +75,11 @@ def __init__(self, catalog = None, key = None, os_arch = None): def fill(self, catalog, key, os_arch): # Check for None arguments if catalog is None: - raise TypeError('catalog cannot be None') + raise TypeError("catalog cannot be None") if key is None: - raise TypeError('key cannot be None') + raise TypeError("key cannot be None") if os_arch is None: - raise TypeError('os_arch cannot be None') + raise TypeError("os_arch cannot be None") # Store the key self.key = key @@ -84,19 +88,20 @@ def fill(self, catalog, key, os_arch): if key not in catalog: raise RuntimeError(f"the component '{key}' is not available in the JSON catalog") component = catalog[key] - self.name = component['name'] - self.version = component['version'] - self.license = component['license'] + self.name = component["name"] + self.version = component["version"] + self.license = component["license"] # Extract the architecture-specific information about the package if os_arch not in component: - raise RuntimeError(f"the '{name}' component is not available for the '{os_arch}' architecture") + raise RuntimeError( + f"the '{name}' component is not available for the '{os_arch}' architecture" + ) package = component[os_arch] - self.path = package['relative_path'] - self.size = int(package['size']) - self.md5sum = package['md5'] - self.sha256 = package['sha256'] - + self.path = package["relative_path"] + self.size = int(package["size"]) + self.md5sum = package["md5"] + self.sha256 = package["sha256"] # Remove all the suffixes in the list @@ -106,36 +111,43 @@ def removesuffix(arg, *suffixes): modified = False for suffix in suffixes: if arg.endswith(suffix): - arg = arg[:-len(suffix)] + arg = arg[: -len(suffix)] modified = True break return arg -#Create symlinks for soname or libname.so + +# Create symlinks for soname or libname.so def fix_symlinks(install_dir, dry_run=False): - target_dir = os.path.join(install_dir, "targets", "*") - libdirs = [os.path.join(install_dir, "drivers", "lib")] - libdirs.append(os.path.join(target_dir, "lib")) - libdirs.append(os.path.join(target_dir, "lib", "stubs")) - for xdir in libdirs: - for dirname in glob.glob(xdir): - for lib in [os.path.basename(l) for l in glob.glob(os.path.join(dirname, "lib*.so*"))]: - xlib = lib.split(".so")[0][3:] - err, out = subprocess.getstatusoutput("objdump -p %s | grep 'SONAME'" % os.path.join(dirname, lib)) - if not 'SONAME' in out: continue - lib_names = ["lib%s.so" % xlib, out.split('SONAME')[-1].strip()] - for l in lib_names: - full_lib = os.path.join(dirname, l) - if os.path.exists(full_lib): continue - if not dry_run: os.symlink(lib, full_lib) - print("Creating link", full_lib,"->", lib) + target_dir = os.path.join(install_dir, "targets", "*") + libdirs = [os.path.join(install_dir, "drivers", "lib")] + libdirs.append(os.path.join(target_dir, "lib")) + libdirs.append(os.path.join(target_dir, "lib", "stubs")) + for xdir in libdirs: + for dirname in glob.glob(xdir): + for lib in [os.path.basename(l) for l in glob.glob(os.path.join(dirname, "lib*.so*"))]: + xlib = lib.split(".so")[0][3:] + err, out = subprocess.getstatusoutput( + "objdump -p %s | grep 'SONAME'" % os.path.join(dirname, lib) + ) + if not "SONAME" in out: + continue + lib_names = ["lib%s.so" % xlib, out.split("SONAME")[-1].strip()] + for l in lib_names: + full_lib = os.path.join(dirname, l) + if os.path.exists(full_lib): + continue + if not dry_run: + os.symlink(lib, full_lib) + print("Creating link", full_lib, "->", lib) + # Move the file or directory tree "src" to "dst", merging any directories that already exist. # Similar to shutil.copytree(src, dst, symlinks=True, ignore=None, copy_function=shutil.move, ignore_dangling_symlinks=True, dirs_exist_ok=True) def movetree(src, dst, overwrite=False): # Make sure the parent of the dst tree exists - dstparent = os.path.normpath(os.path.join(dst, '..')) + dstparent = os.path.normpath(os.path.join(dst, "..")) os.makedirs(dstparent, exist_ok=True) # If the dst tree does not exist, simply move the src tree there @@ -148,9 +160,11 @@ def movetree(src, dst, overwrite=False): dstmode = os.lstat(dst).st_mode # If both src and dst are files or links, the behaviour depends on the `overwrite` parameter. - if (stat.S_ISLNK(srcmode) or stat.S_ISREG(srcmode)) and (stat.S_ISLNK(dstmode) or stat.S_ISREG(dstmode)): + if (stat.S_ISLNK(srcmode) or stat.S_ISREG(srcmode)) and ( + stat.S_ISLNK(dstmode) or stat.S_ISREG(dstmode) + ): # If overwrite is True, overwrite dst. - if (overwrite): + if overwrite: os.remove(dst) shutil.move(src, dst) # If overwrite is False, ignore dst and leave src at its original location. @@ -185,9 +199,11 @@ def download_catalog(url, download_dir): # Check the content type try: - content_type = request.headers['Content-type'] - if (content_type != 'application/json'): - warning(f"the JSON catalog at {url} has the content type '{content_type}' instead of 'application/json'") + content_type = request.headers["Content-type"] + if content_type != "application/json": + warning( + f"the JSON catalog at {url} has the content type '{content_type}' instead of 'application/json'" + ) except: warning(f"the JSON catalog at {url} does not have a valid content type") @@ -202,14 +218,18 @@ def download_catalog(url, download_dir): def parse_catalog(json_file, os_arch): # Load and deserialise the JSON catalog try: - catalog = json.load(open(json_file, 'r')) + catalog = json.load(open(json_file, "r")) except json.decoder.JSONDecodeError as e: error(f"the catalog at is not a valid JSON file") raise e # Skip the 'release_date' and other non-package entries, and the components # that are not available for the given architecture. - components = [ Component(catalog, key, os_arch) for key in catalog if type(catalog[key]) is dict and os_arch in catalog[key] ] + components = [ + Component(catalog, key, os_arch) + for key in catalog + if type(catalog[key]) is dict and os_arch in catalog[key] + ] return components @@ -219,14 +239,16 @@ def check_package(component, local_file): # Check the file size reported by the filesystem stats = os.stat(local_file) - if (stats.st_size != component.size): - raise RuntimeError(f"package '{name}' should have a size of {component.size} bytes, but file {local_file} has a size of {stats.st_size} bytes.") + if stats.st_size != component.size: + raise RuntimeError( + f"package '{name}' should have a size of {component.size} bytes, but file {local_file} has a size of {stats.st_size} bytes." + ) # Read the file in buffered mode, compute its size, the md5 checksum and sha256 hash one chunk at a time size = 0 algo_md5sum = hashlib.md5() algo_sha256 = hashlib.sha256() - with open(local_file, 'rb') as f: + with open(local_file, "rb") as f: chunk = f.read(stats.st_blksize) while chunk: size += len(chunk) @@ -235,14 +257,20 @@ def check_package(component, local_file): chunk = f.read(stats.st_blksize) # Check the file size, checksum and hash with the expected values - if (size != component.size): - raise RuntimeError(f"package '{name}' should have a size of {component.size} bytes, but only {size} bytes could be read from file {local_file}.") + if size != component.size: + raise RuntimeError( + f"package '{name}' should have a size of {component.size} bytes, but only {size} bytes could be read from file {local_file}." + ) md5sum = algo_md5sum.hexdigest() - if (md5sum != component.md5sum): - raise RuntimeError(f"package '{name}' should have an md5 checksum of {component.md5sum}, but file {local_file} has an md5 checksum of {md5sum}.") + if md5sum != component.md5sum: + raise RuntimeError( + f"package '{name}' should have an md5 checksum of {component.md5sum}, but file {local_file} has an md5 checksum of {md5sum}." + ) sha256 = algo_sha256.hexdigest() - if (sha256 != component.sha256): - raise RuntimeError(f"package '{name}' should have a sha256 hash of {component.sha256}, but file {local_file} has a sha256 hash of {sha256}.") + if sha256 != component.sha256: + raise RuntimeError( + f"package '{name}' should have a sha256 hash of {component.sha256}, but file {local_file} has a sha256 hash of {sha256}." + ) # Download a software component package relative to `base_url` and save it to a local file under `download_dir`. @@ -258,7 +286,9 @@ def download_package(base_url, download_dir, component): return target except RuntimeError as e: # If the checks fail, delete the local file and try to download it again. - warning(f"file {target} exists, but does not match the expected size, checksum or hash:") + warning( + f"file {target} exists, but does not match the expected size, checksum or hash:" + ) print(e) os.remove(target) @@ -273,17 +303,19 @@ def download_package(base_url, download_dir, component): def unpack_package(package, local_dir): # Open the package as a tar archive try: - archive = tarfile.open(package, 'r:*') + archive = tarfile.open(package, "r:*") except: raise RuntimeError(f"the package {package} is not a valid archive.") # Check that all components of the archive expand inside the expected directory package_name = os.path.basename(package) - package_name = removesuffix(package_name, '.tar', '.tgz', '.gz', '.bz2', '.xz') + package_name = removesuffix(package_name, ".tar", ".tgz", ".gz", ".bz2", ".xz") archive_dir = os.path.join(local_dir, package_name) for info in archive: if not os.path.normpath(os.path.join(local_dir, info.name)).startswith(archive_dir): - raise RuntimeError(f"the package {package} contents are not in the expected directory.") + raise RuntimeError( + f"the package {package} contents are not in the expected directory." + ) # Delete any pre-existing directory (or file) with the same name if os.path.exists(archive_dir): @@ -299,7 +331,7 @@ def unpack_package(package, local_dir): class RemapRules: - def __init__(self, move = [], keep = [], link = [], skip = [], replace = []): + def __init__(self, move=[], keep=[], link=[], skip=[], replace=[]): self.move = list(move) self.keep = list(keep) self.link = list(link) @@ -308,8 +340,8 @@ def __init__(self, move = [], keep = [], link = [], skip = [], replace = []): def apply(self, archive_dir, install_dir): # move files or directory from 'src' to 'dst' - for (src, dst) in self.move: - if src == '.': + for src, dst in self.move: + if src == ".": src = archive_dir else: src = os.path.join(archive_dir, src) @@ -318,7 +350,7 @@ def apply(self, archive_dir, install_dir): movetree(src, dst) # keep files or directory at 'arg' for arg in self.keep: - if arg == '.': + if arg == ".": src = archive_dir dst = install_dir else: @@ -327,7 +359,7 @@ def apply(self, archive_dir, install_dir): if os.path.exists(src): movetree(src, dst) # symlink files or directory from 'src' (relative) to 'dst' - for (src, dst) in self.link: + for src, dst in self.link: dst = os.path.join(install_dir, dst) tmp = os.path.join(os.path.dirname(dst), src) debug(f"attempt to symlink {src} to {dst}") @@ -349,202 +381,236 @@ def apply(self, archive_dir, install_dir): else: os.remove(src) # apply pair of pattern, text replacements in 'reps' to 'src' - for src,reps in self.replace: + for src, reps in self.replace: src = os.path.join(archive_dir, src) debug(f"applying replacements to {src}") if not os.path.exists(src): warning(f"{src} does not exist") continue mode = stat.S_IMODE(os.stat(src).st_mode) - with open(src, 'r') as f: + with open(src, "r") as f: content = f.read() for pattern, replace in reps: content = content.replace(pattern, replace) os.chmod(src, mode | stat.S_IWUSR) - with open(src, 'w') as f: + with open(src, "w") as f: f.write(content) os.chmod(src, mode) def build_remap_rules(target): - remap = { # these rules are applied to every package, if the sources exist, after the package-specific ones - '*': RemapRules( - move = [ + "*": RemapRules( + move=[ # the source is relative to the unpacked package directory # the destination is relative to the installation directory - ('lib', f'{target}/lib'), - ('include', f'{target}/include'), - ('pkg-config', f'{target}/lib/pkgconfig'), - ('res', f'{target}/res'), + ("lib", f"{target}/lib"), + ("include", f"{target}/include"), + ("pkg-config", f"{target}/lib/pkgconfig"), + ("res", f"{target}/res"), ], - keep = [ + keep=[ # relative to the unpacked package directory, move to the # same location relative to the installation directory ], - link = [ + link=[ # both source and destination are relative to the installation directory # and will use relative symlinks - (f'{target}/lib', 'lib64'), - (f'{target}/include', 'include'), - (f'{target}/res', 'res'), + (f"{target}/lib", "lib64"), + (f"{target}/include", "include"), + (f"{target}/res", "res"), ], - skip = [ + skip=[ # relative to the unpacked package directory, allows wildcards ], - replace = [ + replace=[ # list of files, patterns and replacement text - ] - ), - 'cuda_cupti': RemapRules( - move = [ - ('samples', 'extras/CUPTI/samples'), - ('doc', 'extras/CUPTI/doc'), - ] - ), - 'cuda_demo_suite' : RemapRules( - move = [ - ('demo_suite', 'extras/demo_suite'), - ] - ), - 'cuda_documentation': RemapRules( - keep = [ - '.' - ] - ), - 'cuda_gdb': RemapRules( - skip = [ - 'extras/cuda-gdb-*.src.tar.gz', - ] - ), - 'cuda_nvvp' : RemapRules( - link = [ - ('nvvp', 'bin/computeprof') - ] - ), - 'libcufile' : RemapRules( - move = [ - ('README', 'gds/README'), - ('etc/cufile.json', 'gds/cufile.json'), - ('samples', 'gds/samples'), - ('tools', 'gds/tools'), ], - skip = [ - 'etc', - ] ), - 'nvidia_driver': RemapRules( - move = [ - ('.', 'drivers') + "cuda_cupti": RemapRules( + move=[ + ("samples", "extras/CUPTI/samples"), + ("doc", "extras/CUPTI/doc"), ] ), - 'libnvidia_nscq': RemapRules( - move = [ - ('.', 'drivers') + "cuda_demo_suite": RemapRules( + move=[ + ("demo_suite", "extras/demo_suite"), ] ), - 'nvidia_fs' : RemapRules( - move = [ - ('.', 'drivers/nvidia_fs') + "cuda_documentation": RemapRules(keep=["."]), + "cuda_gdb": RemapRules( + skip=[ + "extras/cuda-gdb-*.src.tar.gz", ] ), - 'fabricmanager' : RemapRules( - move = [ - ('.', 'fabricmanager') - ] + "cuda_nvvp": RemapRules(link=[("nvvp", "bin/computeprof")]), + "libcufile": RemapRules( + move=[ + ("README", "gds/README"), + ("etc/cufile.json", "gds/cufile.json"), + ("samples", "gds/samples"), + ("tools", "gds/tools"), + ], + skip=[ + "etc", + ], ), + "nvidia_driver": RemapRules(move=[(".", "drivers")]), + "libnvidia_nscq": RemapRules(move=[(".", "drivers")]), + "nvidia_fs": RemapRules(move=[(".", "drivers/nvidia_fs")]), + "fabricmanager": RemapRules(move=[(".", "fabricmanager")]), } return remap - # Move the contents of package to the installation directory def install_package(component, archive_dir, install_dir, rules): - # Apply the package-specific remap rules - if component.key in rules: - rules[component.key].apply(archive_dir, install_dir) + # Apply the package-specific remap rules + if component.key in rules: + rules[component.key].apply(archive_dir, install_dir) - # If the top-level archive directory was moved by a remap rule, there is nothing left to do - if not os.path.isdir(archive_dir): - return + # If the top-level archive directory was moved by a remap rule, there is nothing left to do + if not os.path.isdir(archive_dir): + return - # Apply the global remap rules - if '*' in rules: - rules['*'].apply(archive_dir, install_dir) + # Apply the global remap rules + if "*" in rules: + rules["*"].apply(archive_dir, install_dir) - # Move any files in the top-level archive directory to a .../share/doc/package subdirectory of the installation directory - top_level_files = [f'{archive_dir}/{f.name}' for f in os.scandir(archive_dir) if not f.is_dir(follow_symlinks=False)] - if (top_level_files): - share_doc = os.path.join(install_dir, 'share/doc', component.key) - os.makedirs(share_doc) - for f in top_level_files: - shutil.move(f, share_doc) + # Move any files in the top-level archive directory to a .../share/doc/package subdirectory of the installation directory + top_level_files = [ + f"{archive_dir}/{f.name}" + for f in os.scandir(archive_dir) + if not f.is_dir(follow_symlinks=False) + ] + if top_level_files: + share_doc = os.path.join(install_dir, "share/doc", component.key) + os.makedirs(share_doc) + for f in top_level_files: + shutil.move(f, share_doc) - # Move everything else to the installation directory - movetree(archive_dir, install_dir) + # Move everything else to the installation directory + movetree(archive_dir, install_dir) def main(): global verbose # Base URL for the NVIDIA JSON catalogs and the redistributable software components - base_url = 'https://developer.download.nvidia.com/compute/cuda/redist/' + base_url = "https://developer.download.nvidia.com/compute/cuda/redist/" # Packages that should _not_ be unpacked and installed - blacklist = [ 'fabricmanager', 'libnvidia_nscq' ] + blacklist = ["fabricmanager", "libnvidia_nscq"] # If not empty, restrinct the installation to the packages in this list, minus those in the blacklist. - whitelist = [ ] + whitelist = [] # Command line arguments and options - parser = argparse.ArgumentParser( - description = 'Download, unpack and install the CUDA runtime.') + parser = argparse.ArgumentParser(description="Download, unpack and install the CUDA runtime.") - parser.add_argument('version', metavar='VERSION', nargs='*', help='Version to download, unpack and install, e.g. 11.7.1 or 12.0.0') + parser.add_argument( + "version", + metavar="VERSION", + nargs="*", + help="Version to download, unpack and install, e.g. 11.7.1 or 12.0.0", + ) # Possible architectures for the NVIDIA redistributables: 'x86_64', 'ppc64le', 'sbsa' (aarch64 server), 'aarch64' (aarch64 embedded) # We supporty only aarch server, so we use 'aarch64' to select 'sbsa' - parser.add_argument('-a', '--arch', metavar='ARCH', choices=['x86_64', 'aarch64', 'ppc64le'], default='x86_64', - help='the architecture to download the components for; aarch64 selects the ARM sbsa architecture (Server Base System Architecture)') + parser.add_argument( + "-a", + "--arch", + metavar="ARCH", + choices=["x86_64", "aarch64", "ppc64le"], + default="x86_64", + help="the architecture to download the components for; aarch64 selects the ARM sbsa architecture (Server Base System Architecture)", + ) # We support only Linux, so we actually override any user's choice with 'linux' - parser.add_argument('-o', '--os', metavar='OS', choices=['rhel7', 'rhel8', 'rhel9'], default='rhel9', - help='the operating system to download the components for; currently this is ignored, because a single set of components supports all recent Linux versions') - parser.add_argument('-d', '--download-dir', metavar='PATH', default=None, - help='directory where the components should be downloaded; the default is /cvmfs/patatrack.cern.ch/externals/ARCH/OS/nvidia/download/cuda-VERSION') - parser.add_argument('-i', '--install-dir', metavar='PATH', default=None, - help='directory where the components should be installed; the default is /cvmfs/patatrack.cern.ch/externals/ARCH/OS/nvidia/cuda-VERSION') - parser.add_argument('-u', '--base-url', metavar='URL', default=base_url, - help='base URL for the NVIDIA JSON catalogs and the redistributable software components') - parser.add_argument('-t', '--temp-dir', metavar='PATH', default=None, - help='temporary directory for unpacking the components; if not specified a system default will be used') - parser.add_argument('-x', '--exclude', metavar='COMPONENT', nargs='*', default=[], - help='components to exclude from the installation; the default is%s' % (': ' + ' '.join(blacklist) if blacklist else ' to install all components')) - parser.add_argument('-s', '--select', metavar='COMPONENT', nargs='*', default=[], - help='components to include in the installation; the default is%s' % (': ' + ' '.join(whitelist) if whitelist else ' to install all components')) - parser.add_argument('-c', '--cvmfs', action='store_true', default=False, - help='special handling for CVMFS targets: cvmfs_server transaction/publish patatrack.cern.ch, create .cvmfscatalog in the download and installation directories') - parser.add_argument('-v', '--verbose', action='store_true', default=False, - help='be more verbose') + parser.add_argument( + "-o", + "--os", + metavar="OS", + choices=["rhel7", "rhel8", "rhel9"], + default="rhel9", + help="the operating system to download the components for; currently this is ignored, because a single set of components supports all recent Linux versions", + ) + parser.add_argument( + "-d", + "--download-dir", + metavar="PATH", + default=None, + help="directory where the components should be downloaded; the default is /cvmfs/patatrack.cern.ch/externals/ARCH/OS/nvidia/download/cuda-VERSION", + ) + parser.add_argument( + "-i", + "--install-dir", + metavar="PATH", + default=None, + help="directory where the components should be installed; the default is /cvmfs/patatrack.cern.ch/externals/ARCH/OS/nvidia/cuda-VERSION", + ) + parser.add_argument( + "-u", + "--base-url", + metavar="URL", + default=base_url, + help="base URL for the NVIDIA JSON catalogs and the redistributable software components", + ) + parser.add_argument( + "-t", + "--temp-dir", + metavar="PATH", + default=None, + help="temporary directory for unpacking the components; if not specified a system default will be used", + ) + parser.add_argument( + "-x", + "--exclude", + metavar="COMPONENT", + nargs="*", + default=[], + help="components to exclude from the installation; the default is%s" + % (": " + " ".join(blacklist) if blacklist else " to install all components"), + ) + parser.add_argument( + "-s", + "--select", + metavar="COMPONENT", + nargs="*", + default=[], + help="components to include in the installation; the default is%s" + % (": " + " ".join(whitelist) if whitelist else " to install all components"), + ) + parser.add_argument( + "-c", + "--cvmfs", + action="store_true", + default=False, + help="special handling for CVMFS targets: cvmfs_server transaction/publish patatrack.cern.ch, create .cvmfscatalog in the download and installation directories", + ) + parser.add_argument( + "-v", "--verbose", action="store_true", default=False, help="be more verbose" + ) args = parser.parse_args() if args.verbose: verbose = DEBUG # We supporty only aarch server, so we use 'aarch64' to select 'sbsa' - if args.arch == 'aarch64': - args.arch = 'sbsa' + if args.arch == "aarch64": + args.arch = "sbsa" # Valid combinations: 'linux-x86_64', 'linux-ppc64le', 'linux-sbsa', 'windows-x86_64', 'linux-aarch64' - os_arch = f'linux-{args.arch}' + os_arch = f"linux-{args.arch}" # Customise the remap rules for the ARCH-OS target - target = f'targets/{args.arch}-linux' + target = f"targets/{args.arch}-linux" rules = build_remap_rules(target) # Pattern used to check the version numbers (e.g. 11.7.1 or 12.0.0) - version_check = re.compile(r'^[1-9][0-9]*\.[0-9]+\.[0-9]+$') + version_check = re.compile(r"^[1-9][0-9]*\.[0-9]+\.[0-9]+$") # Blacklist and whitelist if args.exclude: @@ -553,30 +619,29 @@ def main(): whitelist = args.select for arg in args.version: - # Start a CVMFS transaction if args.cvmfs: - subprocess.run(['/bin/cvmfs_server', 'transaction', 'patatrack.cern.ch']) + subprocess.run(["/bin/cvmfs_server", "transaction", "patatrack.cern.ch"]) # CUDA version and catalog URL - if 'https://' in arg: - version = os.path.basename(arg).replace('redistrib_', '').replace('.json', '') + if "https://" in arg: + version = os.path.basename(arg).replace("redistrib_", "").replace(".json", "") url = arg base = os.path.dirname(arg) else: version = arg - url = f'https://developer.download.nvidia.com/compute/cuda/redist/redistrib_{version}.json' + url = f"https://developer.download.nvidia.com/compute/cuda/redist/redistrib_{version}.json" base = args.base_url - if not base.endswith('/'): - base += '/' + if not base.endswith("/"): + base += "/" # Check the version number if not version_check.match(version): - raise RuntimeError(f'Error: invalid CUDA version {version}') + raise RuntimeError(f"Error: invalid CUDA version {version}") # Download directory if args.download_dir is None: - download_dir = f'/cvmfs/patatrack.cern.ch/externals/{args.arch}/{args.os}/nvidia/download/cuda-{version}' + download_dir = f"/cvmfs/patatrack.cern.ch/externals/{args.arch}/{args.os}/nvidia/download/cuda-{version}" else: download_dir = args.download_dir os.makedirs(download_dir, exist_ok=True) @@ -590,52 +655,68 @@ def main(): # Installation directory if args.install_dir is None: - install_dir = f'/cvmfs/patatrack.cern.ch/externals/{args.arch}/{args.os}/nvidia/cuda-{version}' + install_dir = ( + f"/cvmfs/patatrack.cern.ch/externals/{args.arch}/{args.os}/nvidia/cuda-{version}" + ) else: install_dir = args.install_dir # Create a CVMFS catalog in the download directory if args.cvmfs: - open(f'{download_dir}/.cvmfscatalog', 'w').close() + open(f"{download_dir}/.cvmfscatalog", "w").close() info(f"downloading CUDA {version} catalog from {url}") catalog = download_catalog(url, download_dir) components = parse_catalog(catalog, os_arch) # Version-dependent rules for Nsight Compute and Nsight Systems - cuda_major, cuda_minor, cuda_point = version.split('.') + cuda_major, cuda_minor, cuda_point = version.split(".") for component in components: # Nsight Compute - if component.key == 'nsight_compute': - tool_version = '.'.join(component.version.split('.')[0:3]) - rules['nsight_compute'] = RemapRules( - move = [ + if component.key == "nsight_compute": + tool_version = ".".join(component.version.split(".")[0:3]) + rules["nsight_compute"] = RemapRules( + move=[ # move source to destination in the installation directory - (f'nsight-compute/{tool_version}', f'nsight-compute-{tool_version}'), + (f"nsight-compute/{tool_version}", f"nsight-compute-{tool_version}"), ], - skip = [ + skip=[ # skip sources - 'nsight-compute', + "nsight-compute", ], ) # Nsight Systems - if component.key == 'nsight_systems': - tool_version = '.'.join(component.version.split('.')[0:3]) - rules['nsight_systems'] = RemapRules( - move = [ + if component.key == "nsight_systems": + tool_version = ".".join(component.version.split(".")[0:3]) + rules["nsight_systems"] = RemapRules( + move=[ # move source to destination in the installation directory - ('bin/nsight-exporter', 'bin/nsys-exporter'), - (f'nsight-systems/{tool_version}', f'nsight-systems-{tool_version}'), + ("bin/nsight-exporter", "bin/nsys-exporter"), + (f"nsight-systems/{tool_version}", f"nsight-systems-{tool_version}"), ], - skip = [ + skip=[ # skip sources - 'nsight-systems', + "nsight-systems", ], - replace = [ + replace=[ # list of files, each associated to a list of patterns and replacement text - ('bin/nsys', [('#VERSION_RSPLIT#', tool_version), ('#CUDA_MAJOR#', cuda_major), ('#CUDA_MINOR#', cuda_minor)]), - ('bin/nsys-ui', [('#VERSION_RSPLIT#', tool_version), ('#CUDA_MAJOR#', cuda_major), ('#CUDA_MINOR#', cuda_minor)]), - ] + ( + "bin/nsys", + [ + ("#VERSION_RSPLIT#", tool_version), + ("#CUDA_MAJOR#", cuda_major), + ("#CUDA_MINOR#", cuda_minor), + ], + ), + ( + "bin/nsys-ui", + [ + ("#VERSION_RSPLIT#", tool_version), + ("#CUDA_MAJOR#", cuda_major), + ("#CUDA_MINOR#", cuda_minor), + ], + ), + ], ) # Populate a list of all packages to be installed @@ -671,12 +752,12 @@ def main(): # Create a CVMFS catalog in the installation directory if args.cvmfs: - open(f'{install_dir}/.cvmfscatalog', 'w').close() + open(f"{install_dir}/.cvmfscatalog", "w").close() # Commit and publish the CVMFS transaction if args.cvmfs: - subprocess.run(['/bin/cvmfs_server', 'publish', 'patatrack.cern.ch']) + subprocess.run(["/bin/cvmfs_server", "publish", "patatrack.cern.ch"]) + if __name__ == "__main__": main() - diff --git a/cvmfs_deployment/has_lease.py b/cvmfs_deployment/has_lease.py index 80282408cf0e..3a65664f607a 100755 --- a/cvmfs_deployment/has_lease.py +++ b/cvmfs_deployment/has_lease.py @@ -1,25 +1,25 @@ #!/usr/bin/env python3 import json, sys, requests -gw = sys.argv[1] +gw = sys.argv[1] path = sys.argv[2].strip("/") -rep = requests.get(gw + '/leases') -data = rep.json()['data'] +rep = requests.get(gw + "/leases") +data = rep.json()["data"] ecode = 1 for xentry in data.keys(): - entry = xentry.strip("/") - rest = "" - if entry.startswith(path): - rest = entry[len(path):] - elif path.startswith(entry): - rest = path[len(entry):] - else: - continue - print(rest) - if rest and rest[0]!="/": - continue - ecode = 0 - print("Yes, there is lease for %s" % entry) - print(data[xentry]) - break + entry = xentry.strip("/") + rest = "" + if entry.startswith(path): + rest = entry[len(path) :] + elif path.startswith(entry): + rest = path[len(entry) :] + else: + continue + print(rest) + if rest and rest[0] != "/": + continue + ecode = 0 + print("Yes, there is lease for %s" % entry) + print(data[xentry]) + break sys.exit(ecode) diff --git a/das-utils/CMSWeb.py b/das-utils/CMSWeb.py index 693c4ef0ade2..94243c91315b 100755 --- a/das-utils/CMSWeb.py +++ b/das-utils/CMSWeb.py @@ -4,119 +4,165 @@ import json import sys from os.path import dirname, abspath + sys.path.append(dirname(dirname(abspath(__file__)))) # in order to import cms-bot level modules from _py2with3compatibility import urlencode, HTTPSConnection -# FIXME - is this script is used ? -def format(s, **kwds): return s % kwds -class CMSWeb (object): - def __init__ (self): - self.URL_CMSWEB_BASE='cmsweb.cern.ch' - self.URL_PHEDEX_BLOCKREPLICAS='/phedex/datasvc/json/prod/blockreplicas' - self.URL_DBS_DATASETS='/dbs/prod/global/DBSReader/datasets' - self.URL_DBS_FILES='/dbs/prod/global/DBSReader/files' - self.URL_DBS_RUNS='/dbs/prod/global/DBSReader/runs' - self.URL_DBS_BLOCKS='/dbs/prod/global/DBSReader/blocks' - self.conn = HTTPSConnection(self.URL_CMSWEB_BASE, cert_file='/tmp/x509up_u{0}'.format(getuid()), timeout=30) - self.reply_cache = {} - self.last_result = "" - self.errors = 0 - - def __del__(self): self.conn.close () - - def get_cmsweb_data(self, url): - self.last_result = url - if url in self.reply_cache: return True, self.reply_cache[url] - msg ="" - try: - self.conn.request('GET', url) - msg = self.conn.getresponse() - if msg.status!=200: - self.errors = self.errors + 1 - print('Result: {0} {1}: {2}'.format(msg.status, msg.reason, url)) - return False, {} - self.reply_cache[url]=json.loads(msg.read()) - return True, self.reply_cache[url] - except Exception as e: - print("Error:", e, url) - self.errors = self.errors + 1 - return False, {} - - def search(self, lfn): - lfn_data = {"ds_status":"UNKNOWN", "ds_block":"UNKNOWN", "ds_owner":"UNKNOWN","at_cern":"UNKNOWN","dataset":"UNKNOWN","ds_files":"0"} - - # Find the block - jmsg = self.search_lfn(lfn) - if not jmsg: return lfn_data - block = jmsg[0]['block_name'] - dataset = jmsg[0]['dataset'] - lfn_data['ds_block'] = block - lfn_data['dataset'] = dataset - - # Check if dataset is still VALID - status, res = self.search_dataset_status(dataset) - if status: lfn_data['ds_status'] = res - - # Check if dataset/block exists at T2_CH_CERN and belongs to IB RelVals group - status, res = self.search_block(block) - if status: - for x in res: - lfn_data[x] = res[x] - return lfn_data - - def search_block(self, block): - status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_PHEDEX_BLOCKREPLICAS, urlencode({'block': block}))) - if not status: return False, {} - if len(jmsg['phedex']['block']) == 0: return False, {} - block_data = {"at_cern" : "no", "replicas" : [], "ds_files": "0", "ds_owner":"UNKNOWN"} - for replica in jmsg['phedex']['block'][0]['replica']: - if (not "group" in replica) or (not replica['group']): continue - block_data["replica"].append(replica["node"]) - block_data["ds_files"] = str(replica["files"]) - block_data["ds_owner"] = replica["group"].strip().replace(" ","_") - if replica["node"] == "T2_CH_CERN": block_data["at_cern"] = "yes" - return True, block_data - - def search_dataset_status(self, dataset): - status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_DBS_DATASETS, urlencode({'detail': 1, 'dataset_access_type': '*', 'dataset': dataset}))) - if not status: return False, "" - return True, jmsg[0]['dataset_access_type'].strip().replace(" ","_") - - def search_lfn(self, lfn): - status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_DBS_BLOCKS, urlencode({'detail': 1,'logical_file_name': lfn}))) - if not status: return {} - return jmsg - - def search_files(self, dataset): - status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_DBS_FILES, urlencode({'detail': 1,'dataset': dataset}))) - if not status: return {} - return jmsg - - def search_runs(self, dataset): - status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_DBS_RUNS, urlencode({'dataset': dataset}))) - if not status: return {} - return jmsg - - def search_blocks(self, dataset): - status, jmsg = self.get_cmsweb_data('{0}?{1}'.format(self.URL_DBS_BLOCKS, urlencode({'dataset': dataset, "detail":1}))) - if not status: return {} - return jmsg -if __name__ == "__main__": - from optparse import OptionParser - parser = OptionParser(usage="%prog ") - opts, args = parser.parse_args() - - cmsweb = None - for data in args: - if not cmsweb: cmsweb=CMSWeb() - if data.endswith(".root"): - cmsweb.search(data,{}) - else: - cmsweb.search_dataset(data.split("#")[0]) - if "#" in data: cmsweb.search_block(data) - info = {data : cmsweb.reply_cache} - print(json.dumps(info, indent=2, sort_keys=True, separators=(',',': '))) - cmsweb.reply_cache = {} +# FIXME - is this script is used ? +def format(s, **kwds): + return s % kwds + + +class CMSWeb(object): + def __init__(self): + self.URL_CMSWEB_BASE = "cmsweb.cern.ch" + self.URL_PHEDEX_BLOCKREPLICAS = "/phedex/datasvc/json/prod/blockreplicas" + self.URL_DBS_DATASETS = "/dbs/prod/global/DBSReader/datasets" + self.URL_DBS_FILES = "/dbs/prod/global/DBSReader/files" + self.URL_DBS_RUNS = "/dbs/prod/global/DBSReader/runs" + self.URL_DBS_BLOCKS = "/dbs/prod/global/DBSReader/blocks" + self.conn = HTTPSConnection( + self.URL_CMSWEB_BASE, cert_file="/tmp/x509up_u{0}".format(getuid()), timeout=30 + ) + self.reply_cache = {} + self.last_result = "" + self.errors = 0 + + def __del__(self): + self.conn.close() + + def get_cmsweb_data(self, url): + self.last_result = url + if url in self.reply_cache: + return True, self.reply_cache[url] + msg = "" + try: + self.conn.request("GET", url) + msg = self.conn.getresponse() + if msg.status != 200: + self.errors = self.errors + 1 + print("Result: {0} {1}: {2}".format(msg.status, msg.reason, url)) + return False, {} + self.reply_cache[url] = json.loads(msg.read()) + return True, self.reply_cache[url] + except Exception as e: + print("Error:", e, url) + self.errors = self.errors + 1 + return False, {} + + def search(self, lfn): + lfn_data = { + "ds_status": "UNKNOWN", + "ds_block": "UNKNOWN", + "ds_owner": "UNKNOWN", + "at_cern": "UNKNOWN", + "dataset": "UNKNOWN", + "ds_files": "0", + } + + # Find the block + jmsg = self.search_lfn(lfn) + if not jmsg: + return lfn_data + block = jmsg[0]["block_name"] + dataset = jmsg[0]["dataset"] + lfn_data["ds_block"] = block + lfn_data["dataset"] = dataset + + # Check if dataset is still VALID + status, res = self.search_dataset_status(dataset) + if status: + lfn_data["ds_status"] = res + + # Check if dataset/block exists at T2_CH_CERN and belongs to IB RelVals group + status, res = self.search_block(block) + if status: + for x in res: + lfn_data[x] = res[x] + return lfn_data + + def search_block(self, block): + status, jmsg = self.get_cmsweb_data( + "{0}?{1}".format(self.URL_PHEDEX_BLOCKREPLICAS, urlencode({"block": block})) + ) + if not status: + return False, {} + if len(jmsg["phedex"]["block"]) == 0: + return False, {} + block_data = {"at_cern": "no", "replicas": [], "ds_files": "0", "ds_owner": "UNKNOWN"} + for replica in jmsg["phedex"]["block"][0]["replica"]: + if (not "group" in replica) or (not replica["group"]): + continue + block_data["replica"].append(replica["node"]) + block_data["ds_files"] = str(replica["files"]) + block_data["ds_owner"] = replica["group"].strip().replace(" ", "_") + if replica["node"] == "T2_CH_CERN": + block_data["at_cern"] = "yes" + return True, block_data + + def search_dataset_status(self, dataset): + status, jmsg = self.get_cmsweb_data( + "{0}?{1}".format( + self.URL_DBS_DATASETS, + urlencode({"detail": 1, "dataset_access_type": "*", "dataset": dataset}), + ) + ) + if not status: + return False, "" + return True, jmsg[0]["dataset_access_type"].strip().replace(" ", "_") + + def search_lfn(self, lfn): + status, jmsg = self.get_cmsweb_data( + "{0}?{1}".format( + self.URL_DBS_BLOCKS, urlencode({"detail": 1, "logical_file_name": lfn}) + ) + ) + if not status: + return {} + return jmsg + + def search_files(self, dataset): + status, jmsg = self.get_cmsweb_data( + "{0}?{1}".format(self.URL_DBS_FILES, urlencode({"detail": 1, "dataset": dataset})) + ) + if not status: + return {} + return jmsg + + def search_runs(self, dataset): + status, jmsg = self.get_cmsweb_data( + "{0}?{1}".format(self.URL_DBS_RUNS, urlencode({"dataset": dataset})) + ) + if not status: + return {} + return jmsg + + def search_blocks(self, dataset): + status, jmsg = self.get_cmsweb_data( + "{0}?{1}".format(self.URL_DBS_BLOCKS, urlencode({"dataset": dataset, "detail": 1})) + ) + if not status: + return {} + return jmsg +if __name__ == "__main__": + from optparse import OptionParser + + parser = OptionParser(usage="%prog ") + opts, args = parser.parse_args() + + cmsweb = None + for data in args: + if not cmsweb: + cmsweb = CMSWeb() + if data.endswith(".root"): + cmsweb.search(data, {}) + else: + cmsweb.search_dataset(data.split("#")[0]) + if "#" in data: + cmsweb.search_block(data) + info = {data: cmsweb.reply_cache} + print(json.dumps(info, indent=2, sort_keys=True, separators=(",", ": "))) + cmsweb.reply_cache = {} diff --git a/das-utils/cleanup-unused-ibeos.py b/das-utils/cleanup-unused-ibeos.py index 12906fdb041f..8a64a84aa303 100755 --- a/das-utils/cleanup-unused-ibeos.py +++ b/das-utils/cleanup-unused-ibeos.py @@ -5,6 +5,7 @@ from sys import argv, exit from os.path import dirname, abspath import sys + sys.path.append(dirname(dirname(abspath(__file__)))) # in order to import top level modules from _py2with3compatibility import run_cmd @@ -13,86 +14,94 @@ eos_base = "/eos/cms/store/user/cmsbuild" unused_days_threshold = 360 try: - days=int(argv[1]) + days = int(argv[1]) except: - days=30 -if days<30: - days=30 -if (unused_days_threshold-days)<180: unused_days_threshold=days+180 -active_days_threshold = int(unused_days_threshold/2) + days = 30 +if days < 30: + days = 30 +if (unused_days_threshold - days) < 180: + unused_days_threshold = days + 180 +active_days_threshold = int(unused_days_threshold / 2) + def get_unused_days(eosfile): - e, o = run_cmd("%s fileinfo %s | grep 'Modify:' | sed 's|.* Timestamp: ||'" % (eos_cmd, eosfile)) - if e or (o == ""): - print("Error: Getting timestamp for %s\n%s" % (eosfile, o)) - return -1 - return int((time()-float(o))/86400) + e, o = run_cmd( + "%s fileinfo %s | grep 'Modify:' | sed 's|.* Timestamp: ||'" % (eos_cmd, eosfile) + ) + if e or (o == ""): + print("Error: Getting timestamp for %s\n%s" % (eosfile, o)) + return -1 + return int((time() - float(o)) / 86400) -e , o = run_cmd("PYTHONPATH=%s/.. %s/ib-datasets.py --days %s" % (script_path, script_path, days)) + +e, o = run_cmd("PYTHONPATH=%s/.. %s/ib-datasets.py --days %s" % (script_path, script_path, days)) if e: - print(o) - exit(1) + print(o) + exit(1) jdata = json.loads(o) used = {} -for o in jdata['hits']['hits']: - used[o['_source']['lfn'].strip()]=1 +for o in jdata["hits"]["hits"]: + used[o["_source"]["lfn"].strip()] = 1 e, o = run_cmd("%s find -f %s" % (eos_cmd, eos_base)) if e: - print(o) - exit(1) + print(o) + exit(1) total = 0 active = 0 unused = [] all_files = [] for pfn in o.split("\n"): - l = pfn.replace(eos_base,"") - if not l.startswith("/store/"): - if l.endswith(".root.unused"): - pfn = pfn.replace(".root.unused", ".root") - run_cmd("%s file rename %s.unused %s" % (eos_cmd, pfn, pfn)) - continue - all_files.append(l) - if not l.endswith(".root"): continue - total += 1 - if l in used: - run_cmd("%s file touch %s" % (eos_cmd, pfn)) - active += 1 - continue - unused_days = get_unused_days(pfn) - print("%s unused for last %s days." % (pfn,unused_days)) - if ((unused_days+days)1) and (fields[0]==xf): - try: - res = res + " [" +",".join([str(i) for i in item[xf][0][field_map[xf]]])+ "]" - except Exception as e: - with open(efile, "w") as ofile: - ofile.write("Wrong DAS result format for lumi\n") - ofile.write(json.dumps(item)) - ofile.write("\n%s\n" % e) - print(" Failed to load das output:",sha,e) + +def run_das_client( + outfile, query, override, dasclient="das_client", options="", threshold=900, retry=5, limit=0 +): + sha = basename(outfile) + field = query.split(" ", 1)[0] + if "=" in field: + field = field.split("=", 1)[0] + fields = field.split(",") + field_filter = "" + field = fields[-1] + if field in ["file", "site", "dataset"]: + field_filter = " | grep %s.name | sort %s.name | unique" % (field, field) + retry_str = "" + if "das_client" in dasclient: + retry_str = "--retry=%s" % retry + das_cmd = "%s --format=json --limit=%s --query '%s%s' %s --threshold=%s %s" % ( + dasclient, + limit, + query, + field_filter, + retry_str, + threshold, + options, + ) + print(" Running: ", sha, das_cmd) + print(" Fields:", sha, fields) + stime = time() + err, out = run_cmd(das_cmd) + print(" QueryTime: [%s] %s" % (int(time() - stime), query)) + if opts.debug: + print("DEBUG OUT:\n%s\n%s" % (err, out)) + efile = "%s.error" % outfile + with open(efile, "w") as ofile: + ofile.write(out) + if err: + print(" DAS ERROR:", sha, out) return False - if fields[0]=="file" and res in ignore_lfn: - print(" Ignoring %s" % res) - continue - if not res in results: results.append(res) - print(" Results:",sha,len(results)) - if (len(results)==0) and ('site=T2_CH_CERN' in query): - query = query.replace("site=T2_CH_CERN","").strip() - lmt = 0 - if "file" in fields: lmt = 100 - print("Removed T2_CH_CERN restrictions and limit set to %s: %s" % (lmt, query)) - return run_das_client(outfile, query, override, dasclient, options, threshold, retry, limit=lmt) - if results or override: - xfile = outfile+".json" - write_json (xfile+".tmp", jdata) - if exists (xfile): - e, o = run_cmd("diff -u %s %s.tmp | grep '^+ ' | sed 's| ||g;s|\"||g;s|^+[a-zA-Z0-9][a-zA-Z0-9_]*:||;s|,$||' | grep -v '[0-9][0-9]*\(\.[0-9]*\|\)$'" % (xfile,xfile)) - if o: - run_cmd("mv %s.tmp %s" % (xfile,xfile)) - else: - run_cmd("rm %s.tmp" % xfile) - else: - run_cmd("mv %s.tmp %s" % (xfile,xfile)) - print(" Success %s '%s', found %s results." % (sha, query, len(results))) - if results: - with open(outfile, "w") as ofile: - for res in sorted(results): - ofile.write(res+'\n') - run_cmd("echo '%s' > %s.timestamp" % (int(time()), outfile)) - else: - run_cmd("rm -f %s" % (outfile)) - return True + try: + jdata = json.loads(out) + except Exception as e: + print(" Failed to load das output:", sha, e) + return False + if ( + (not "status" in jdata) + or (jdata["status"] != "ok") + or (not "data" in jdata) + or (("ecode" in jdata) and (jdata["ecode"] != "")) + ): + print("Failed: %s %s\n %s" % (sha, query, out)) + return False + all_ok = True + for fx in fields: + fn = field_map[fx] + for item in jdata["data"]: + try: + if ( + (not fx in item) + or (not item[fx]) + or (not fn in item[fx][0]) + or (item[fx][0][fn] is None) + ): + all_ok = False + except Exception as e: + with open(efile, "w") as ofile: + ofile.write("Wrong DAS result format for %s,%s\n" % (fn, fx)) + ofile.write(json.dumps(item)) + ofile.write("\n%s\n" % e) + return False + if not all_ok: + # if 'site=T2_CH_CERN' in query: + # run_cmd("rm -f %s" % efile) + # query = query.replace("site=T2_CH_CERN","").strip() + # lmt = 0 + # if "file" in fields: lmt = 100 + # print("Removed T2_CH_CERN restrictions and limit set to %s: %s" % (lmt, query)) + # return run_das_client(outfile, query, override, dasclient, options, threshold, retry, limit=lmt) + print(" DAS WRONG Results:", fields, sha, out) + return False + run_cmd("rm -f %s" % efile) + results = [] + for item in jdata["data"]: + res = str(item[field][0][field_map[field]]) + xf = "lumi" + if (len(fields) > 1) and (fields[0] == xf): + try: + res = res + " [" + ",".join([str(i) for i in item[xf][0][field_map[xf]]]) + "]" + except Exception as e: + with open(efile, "w") as ofile: + ofile.write("Wrong DAS result format for lumi\n") + ofile.write(json.dumps(item)) + ofile.write("\n%s\n" % e) + print(" Failed to load das output:", sha, e) + return False + if fields[0] == "file" and res in ignore_lfn: + print(" Ignoring %s" % res) + continue + if not res in results: + results.append(res) + print(" Results:", sha, len(results)) + if (len(results) == 0) and ("site=T2_CH_CERN" in query): + query = query.replace("site=T2_CH_CERN", "").strip() + lmt = 0 + if "file" in fields: + lmt = 100 + print("Removed T2_CH_CERN restrictions and limit set to %s: %s" % (lmt, query)) + return run_das_client( + outfile, query, override, dasclient, options, threshold, retry, limit=lmt + ) + if results or override: + xfile = outfile + ".json" + write_json(xfile + ".tmp", jdata) + if exists(xfile): + e, o = run_cmd( + "diff -u %s %s.tmp | grep '^+ ' | sed 's| ||g;s|\"||g;s|^+[a-zA-Z0-9][a-zA-Z0-9_]*:||;s|,$||' | grep -v '[0-9][0-9]*\(\.[0-9]*\|\)$'" + % (xfile, xfile) + ) + if o: + run_cmd("mv %s.tmp %s" % (xfile, xfile)) + else: + run_cmd("rm %s.tmp" % xfile) + else: + run_cmd("mv %s.tmp %s" % (xfile, xfile)) + print(" Success %s '%s', found %s results." % (sha, query, len(results))) + if results: + with open(outfile, "w") as ofile: + for res in sorted(results): + ofile.write(res + "\n") + run_cmd("echo '%s' > %s.timestamp" % (int(time()), outfile)) + else: + run_cmd("rm -f %s" % (outfile)) + return True + def cleanup_timestamps(store): - run_cmd("find %s -name '*.timestamp' | xargs rm -f" % store) - run_cmd("find %s -name '*.tmp' | xargs rm -f" % store) - run_cmd("find %s -name '*.error' | xargs rm -f" % store) + run_cmd("find %s -name '*.timestamp' | xargs rm -f" % store) + run_cmd("find %s -name '*.tmp' | xargs rm -f" % store) + run_cmd("find %s -name '*.error' | xargs rm -f" % store) + def read_timestramps(timestramps_file): - timestramps = {} - if exists (timestramps_file): timestramps = read_json (timestramps_file) - return timestramps + timestramps = {} + if exists(timestramps_file): + timestramps = read_json(timestramps_file) + return timestramps + def update_timestamp(timestramps, timestramps_file, store): - e, o = run_cmd("find %s -name '*.timestamp'" % store) - for ts_file in o.split("\n"): - if not ts_file.endswith('.timestamp'): continue - sha = basename(ts_file).replace(".timestamp","") - with open(ts_file) as f: - timestramps[sha] = int(float(f.readlines()[0].strip())) - write_json(timestramps_file, timestramps) - cleanup_timestamps(store) + e, o = run_cmd("find %s -name '*.timestamp'" % store) + for ts_file in o.split("\n"): + if not ts_file.endswith(".timestamp"): + continue + sha = basename(ts_file).replace(".timestamp", "") + with open(ts_file) as f: + timestramps[sha] = int(float(f.readlines()[0].strip())) + write_json(timestramps_file, timestramps) + cleanup_timestamps(store) IGNORE_QUERIES = {} -if 'IGNORE_DAS_QUERY_SHA' in environ: - for s in environ['IGNORE_DAS_QUERY_SHA'].split(','): - IGNORE_QUERIES[s]=1 +if "IGNORE_DAS_QUERY_SHA" in environ: + for s in environ["IGNORE_DAS_QUERY_SHA"].split(","): + IGNORE_QUERIES[s] = 1 if __name__ == "__main__": - parser = OptionParser(usage="%prog ") - parser.add_option("-t", "--threshold", dest="threshold", help="Threshold time in sec to refresh query results. Default is 86400s", type=int, default=86400) - parser.add_option("-o", "--override", dest="override", help="Override previous cache requests if cache empty results are returned from das", action="store_true", default=False) - parser.add_option("-j", "--jobs", dest="jobs", help="Parallel das_client queries to run. Default is equal to cpu count but max value is 32", type=int, default=-1) - parser.add_option("-s", "--store", dest="store", help="Name of object store directory to store the das queries results", default=None) - parser.add_option("-c", "--client", dest="client", help="Das client to use either das_client or dasgoclient", default="das_client") - parser.add_option("-q", "--query", dest="query", help="Only process this query", default=None) - parser.add_option("-d", "--debug", dest="debug", help="Run debug mode", action="store_true", default=False) + parser = OptionParser(usage="%prog ") + parser.add_option( + "-t", + "--threshold", + dest="threshold", + help="Threshold time in sec to refresh query results. Default is 86400s", + type=int, + default=86400, + ) + parser.add_option( + "-o", + "--override", + dest="override", + help="Override previous cache requests if cache empty results are returned from das", + action="store_true", + default=False, + ) + parser.add_option( + "-j", + "--jobs", + dest="jobs", + help="Parallel das_client queries to run. Default is equal to cpu count but max value is 32", + type=int, + default=-1, + ) + parser.add_option( + "-s", + "--store", + dest="store", + help="Name of object store directory to store the das queries results", + default=None, + ) + parser.add_option( + "-c", + "--client", + dest="client", + help="Das client to use either das_client or dasgoclient", + default="das_client", + ) + parser.add_option("-q", "--query", dest="query", help="Only process this query", default=None) + parser.add_option( + "-d", "--debug", dest="debug", help="Run debug mode", action="store_true", default=False + ) - xopts = environ['DAS_CLIENT_OPTIONS'] if 'DAS_CLIENT_OPTIONS' in environ else "" - opts, args = parser.parse_args() - if (not opts.store): parser.error("Missing store directory path to store das queries objects.") + xopts = environ["DAS_CLIENT_OPTIONS"] if "DAS_CLIENT_OPTIONS" in environ else "" + opts, args = parser.parse_args() + if not opts.store: + parser.error("Missing store directory path to store das queries objects.") - query_sha = {} - if opts.query: - import hashlib - query = re.sub("= ","=",re.sub(" =","=",re.sub(" +"," ",opts.query.strip()))) - query_sha[query] = hashlib.sha256(query.encode()).hexdigest() - else: - err, qout = run_cmd("find %s -name '*.query' -type f" % opts.store) - for qfile in qout.split("\n"): - sha = basename(qfile).replace(".query","") - if not sha: continue - qs = {} - rewrite = False - for query in [line.rstrip('\n').strip() for line in open(qfile)]: - if not "=" in query: continue - if "--query " in query: - query = query.split("--query ")[1].split("'")[1] - rewrite = True - query = re.sub("= ","=",re.sub(" =","=",re.sub(" +"," ",query))) - query_sha[query]=sha - qs[query]=1 - if rewrite: - ofile = open(qfile, 'w') - if ofile: - for q in qs: ofile.write("%s\n" % q) - ofile.close() + query_sha = {} + if opts.query: + import hashlib - xqueries = {} - for query in query_sha: - if 'site=T2_CH_CERN' in query: - query = re.sub(" +"," ",query.replace('site=T2_CH_CERN','').strip()) - if not query in query_sha: - from hashlib import sha256 - sha = sha256(query.encode()).hexdigest() - xqueries[query] = sha - qdir = join(opts.store, sha[:2]) - run_cmd("mkdir -p %s" % qdir) - with open(join(qdir, sha+'.query'), "w") as ofile: - ofile.write("%s\n" % query) + query = re.sub("= ", "=", re.sub(" =", "=", re.sub(" +", " ", opts.query.strip()))) + query_sha[query] = hashlib.sha256(query.encode()).hexdigest() + else: + err, qout = run_cmd("find %s -name '*.query' -type f" % opts.store) + for qfile in qout.split("\n"): + sha = basename(qfile).replace(".query", "") + if not sha: + continue + qs = {} + rewrite = False + for query in [line.rstrip("\n").strip() for line in open(qfile)]: + if not "=" in query: + continue + if "--query " in query: + query = query.split("--query ")[1].split("'")[1] + rewrite = True + query = re.sub("= ", "=", re.sub(" =", "=", re.sub(" +", " ", query))) + query_sha[query] = sha + qs[query] = 1 + if rewrite: + ofile = open(qfile, "w") + if ofile: + for q in qs: + ofile.write("%s\n" % q) + ofile.close() + + xqueries = {} + for query in query_sha: + if "site=T2_CH_CERN" in query: + query = re.sub(" +", " ", query.replace("site=T2_CH_CERN", "").strip()) + if not query in query_sha: + from hashlib import sha256 - for query in xqueries: - query_sha[query] = xqueries[query] - print("Added new query: %s => %s" % (query_sha[query], query)) - tqueries = len(query_sha) - print("Found %s unique queries" % (tqueries)) - jobs = opts.jobs - if jobs <= 0: - e, o = run_cmd("nproc") - jobs = int(o) - if jobs>32: jobs=32 - print("Parallel jobs:", jobs) + sha = sha256(query.encode()).hexdigest() + xqueries[query] = sha + qdir = join(opts.store, sha[:2]) + run_cmd("mkdir -p %s" % qdir) + with open(join(qdir, sha + ".query"), "w") as ofile: + ofile.write("%s\n" % query) - run_cmd("mkdir -p %s" % opts.store) - threads = [] - nquery = 0 - inCache = 0 - DasSearch = 0 - error = 0 - cleanup_timestamps (opts.store) - timestramps_file = join (opts.store, "timestamps.json") - timestramps = read_timestramps (timestramps_file) - vold_caches = {} - run_queries = {} - vold_threshold = 90 - for query in query_sha: - nquery += 1 - sha = query_sha[query] - if sha in IGNORE_QUERIES: - print("IGNORED : %s" % sha) - continue - outfile = "%s/%s/%s" % (opts.store, sha[0:2], sha) - print("[%s/%s] Quering %s '%s'" % (nquery, tqueries, sha, query)) - vold = False - if exists(outfile): - xtime = 0 - fcount = 0 - if sha in timestramps: - xtime = timestramps[sha] - with open(outfile) as ofile: - fcount = len(ofile.readlines()) - dtime = int(time())-xtime - vdays = int(dtime/86400) - vold = (vdays>=vold_threshold) - print(" Days since last update:",vdays) - if (dtime<=opts.threshold) and (fcount>0): - jfile = "%s.json" % outfile - okcache=exists(jfile) - print(" JSON results found",sha,okcache) - if okcache: - try: - xdata = read_json (jfile) - if (not "status" in xdata) or (xdata['status'] != 'ok') or (not "data" in xdata): - okcache=False + for query in xqueries: + query_sha[query] = xqueries[query] + print("Added new query: %s => %s" % (query_sha[query], query)) + tqueries = len(query_sha) + print("Found %s unique queries" % (tqueries)) + jobs = opts.jobs + if jobs <= 0: + e, o = run_cmd("nproc") + jobs = int(o) + if jobs > 32: + jobs = 32 + print("Parallel jobs:", jobs) + + run_cmd("mkdir -p %s" % opts.store) + threads = [] + nquery = 0 + inCache = 0 + DasSearch = 0 + error = 0 + cleanup_timestamps(opts.store) + timestramps_file = join(opts.store, "timestamps.json") + timestramps = read_timestramps(timestramps_file) + vold_caches = {} + run_queries = {} + vold_threshold = 90 + for query in query_sha: + nquery += 1 + sha = query_sha[query] + if sha in IGNORE_QUERIES: + print("IGNORED : %s" % sha) + continue + outfile = "%s/%s/%s" % (opts.store, sha[0:2], sha) + print("[%s/%s] Quering %s '%s'" % (nquery, tqueries, sha, query)) + vold = False + if exists(outfile): + xtime = 0 + fcount = 0 + if sha in timestramps: + xtime = timestramps[sha] + with open(outfile) as ofile: + fcount = len(ofile.readlines()) + dtime = int(time()) - xtime + vdays = int(dtime / 86400) + vold = vdays >= vold_threshold + print(" Days since last update:", vdays) + if (dtime <= opts.threshold) and (fcount > 0): + jfile = "%s.json" % outfile + okcache = exists(jfile) + print(" JSON results found", sha, okcache) + if okcache: + try: + xdata = read_json(jfile) + if ( + (not "status" in xdata) + or (xdata["status"] != "ok") + or (not "data" in xdata) + ): + okcache = False + else: + for item in xdata["data"]: + if not okcache: + break + for x in field_map: + if not x in item: + continue + if len(item[x]) > 0: + continue + okcache = False + break + except IOError as e: + print( + " ERROR: [%s/%s] Reading json cached file %s" + % (nquery, tqueries, outfile) + ) + e, o = run_cmd("cat %s" % outfile) + print(o) + okcache = False + if okcache: + print( + " %s Found in cache with %s results (age: %s src)" % (sha, fcount, dtime) + ) + inCache += 1 + continue + else: + print(" Refreshing cache as previous Json was empty:", sha) + elif fcount > 0: + print(" Refreshing as cache expired (age: %s sec)" % dtime) + else: + print(" Retrying as cache with empty results found.") + else: + print(" No cache file found %s" % sha) + if vold: + vold_caches[query] = 1 + continue + else: + run_queries[query] = 1 + DasSearch += 1 + while True: + tcount = len(threads) + if tcount < jobs: + print(" Searching DAS (threads: %s)" % tcount) + try: + t = threading.Thread( + target=run_das_client, + args=(outfile, query, opts.override, opts.client, xopts), + ) + t.start() + threads.append(t) + sleep(0.1) + except Exception as e: + print("ERROR threading das query cache: caught exception: " + str(e)) + error += 1 + break else: - for item in xdata["data"]: - if not okcache: break - for x in field_map: - if not x in item: continue - if len(item[x])>0: continue - okcache=False - break - except IOError as e: - print(" ERROR: [%s/%s] Reading json cached file %s" % (nquery, tqueries, outfile)) - e, o = run_cmd("cat %s" % outfile) - print(o) - okcache=False - if okcache: - print(" %s Found in cache with %s results (age: %s src)" % (sha, fcount , dtime)) - inCache += 1 - continue - else: print(" Refreshing cache as previous Json was empty:", sha) - elif fcount>0: print(" Refreshing as cache expired (age: %s sec)" % dtime) - else: print(" Retrying as cache with empty results found.") - else: print(" No cache file found %s" % sha) - if vold: - vold_caches[query] = 1 - continue + threads = [t for t in threads if t.is_alive()] + sleep(0.5) + for t in threads: + t.join() + failed_queries = 0 + e, o = run_cmd("find %s -name '*.error'" % opts.store) + for f in o.split("\n"): + if not f.endswith(".error"): + continue + qf = f.replace(".error", ".query") + print("########################################") + e, o = run_cmd("cat %s ; cat %s" % (qf, f)) + print(o) + failed_queries += 1 + print("Total queries: %s" % tqueries) + print("Found in object store: %s" % inCache) + print("DAS Search: %s" % DasSearch) + print("Total Queries Failed:", failed_queries) + print("Caches older than %s days: %s" % (vold_threshold, len(vold_caches))) + # print(" ","\n ".join(list(vold_caches.keys()))) + # print("Queries which were run:",len(run_queries)) + # print(" ","\n ".join(list(run_queries.keys()))) + print("Process state:", error) + if not error: + update_timestamp(timestramps, timestramps_file, opts.store) else: - run_queries[query] = 1 - DasSearch += 1 - while True: - tcount = len(threads) - if(tcount < jobs): - print(" Searching DAS (threads: %s)" % tcount) - try: - t = threading.Thread(target=run_das_client, args=(outfile, query, opts.override, opts.client, xopts)) - t.start() - threads.append(t) - sleep(0.1) - except Exception as e: - print("ERROR threading das query cache: caught exception: " + str(e)) - error += 1 - break - else: - threads = [t for t in threads if t.is_alive()] - sleep(0.5) - for t in threads: t.join() - failed_queries = 0 - e , o = run_cmd("find %s -name '*.error'" % opts.store) - for f in o.split("\n"): - if not f.endswith(".error"): continue - qf = f.replace(".error",".query") - print("########################################") - e , o = run_cmd("cat %s ; cat %s" % (qf, f)) - print(o) - failed_queries += 1 - print("Total queries: %s" % tqueries) - print("Found in object store: %s" % inCache) - print("DAS Search: %s" % DasSearch) - print("Total Queries Failed:",failed_queries) - print("Caches older than %s days: %s" % (vold_threshold, len(vold_caches))) - #print(" ","\n ".join(list(vold_caches.keys()))) - #print("Queries which were run:",len(run_queries)) - #print(" ","\n ".join(list(run_queries.keys()))) - print("Process state:",error) - if not error:update_timestamp(timestramps, timestramps_file, opts.store) - else: cleanup_timestamps (opts.store) - exit(error) + cleanup_timestamps(opts.store) + exit(error) diff --git a/das-utils/ib-datasets.py b/das-utils/ib-datasets.py index 81810a191d56..2bf7abc332c0 100755 --- a/das-utils/ib-datasets.py +++ b/das-utils/ib-datasets.py @@ -4,36 +4,63 @@ from time import time import json, sys -cmsbot_dir=None -if __file__: cmsbot_dir=dirname(dirname(abspath(__file__))) -else: cmsbot_dir=dirname(dirname(abspath(argv[0]))) -sys.path.insert(0,cmsbot_dir) +cmsbot_dir = None +if __file__: + cmsbot_dir = dirname(dirname(abspath(__file__))) +else: + cmsbot_dir = dirname(dirname(abspath(argv[0]))) +sys.path.insert(0, cmsbot_dir) from es_utils import es_query from _py2with3compatibility import run_cmd if __name__ == "__main__": - from optparse import OptionParser - parser = OptionParser(usage="%prog ") - parser.add_option("-r", "--release", dest="release", help="Release filter", type=str, default=".*") - parser.add_option("-a", "--architecture", dest="arch", help="SCRAM_ARCH filter. Production arch for a release cycle is used if found otherwise slc6_amd64_gcc530", type=str, default=None) - parser.add_option("-d", "--days", dest="days", help="Files access in last n days", type=int, default=7) - parser.add_option("-j", "--job", dest="job", help="Parallel jobs to run", type=int, default=4) - parser.add_option("-p", "--page", dest="page_size", help="Page size, default 0 means no page and get all data in one go", type=int, default=0) - opts, args = parser.parse_args() - - if not opts.arch: - if opts.release==".*": opts.arch=".*" - else: - script_path = abspath(dirname(argv[0])) - err, out = run_cmd("grep 'RELEASE_QUEUE=%s;' %s/config.map | grep -v 'DISABLED=1;' | grep 'PROD_ARCH=1;' | tr ';' '\n' | grep 'SCRAM_ARCH=' | sed 's|.*=||'" % (opts.release, script_path)) - if err: opts.arch="slc6_amd64_gcc530" - else: opts.arch=out - if opts.release!=".*": opts.release=opts.release+".*" + from optparse import OptionParser - end_time = int(time()*1000) - start_time = end_time -int(86400*1000*opts.days) - query = "release:/%s/ AND architecture:/%s/" % (opts.release.lower(), opts.arch) - es_data = es_query('ib-dataset-*', query, start_time,end_time,scroll=True,fields=["lfn"]) - print(json.dumps(es_data, indent=2, sort_keys=True, separators=(',',': '))) + parser = OptionParser(usage="%prog ") + parser.add_option( + "-r", "--release", dest="release", help="Release filter", type=str, default=".*" + ) + parser.add_option( + "-a", + "--architecture", + dest="arch", + help="SCRAM_ARCH filter. Production arch for a release cycle is used if found otherwise slc6_amd64_gcc530", + type=str, + default=None, + ) + parser.add_option( + "-d", "--days", dest="days", help="Files access in last n days", type=int, default=7 + ) + parser.add_option("-j", "--job", dest="job", help="Parallel jobs to run", type=int, default=4) + parser.add_option( + "-p", + "--page", + dest="page_size", + help="Page size, default 0 means no page and get all data in one go", + type=int, + default=0, + ) + opts, args = parser.parse_args() + if not opts.arch: + if opts.release == ".*": + opts.arch = ".*" + else: + script_path = abspath(dirname(argv[0])) + err, out = run_cmd( + "grep 'RELEASE_QUEUE=%s;' %s/config.map | grep -v 'DISABLED=1;' | grep 'PROD_ARCH=1;' | tr ';' '\n' | grep 'SCRAM_ARCH=' | sed 's|.*=||'" + % (opts.release, script_path) + ) + if err: + opts.arch = "slc6_amd64_gcc530" + else: + opts.arch = out + if opts.release != ".*": + opts.release = opts.release + ".*" + + end_time = int(time() * 1000) + start_time = end_time - int(86400 * 1000 * opts.days) + query = "release:/%s/ AND architecture:/%s/" % (opts.release.lower(), opts.arch) + es_data = es_query("ib-dataset-*", query, start_time, end_time, scroll=True, fields=["lfn"]) + print(json.dumps(es_data, indent=2, sort_keys=True, separators=(",", ": "))) diff --git a/das-utils/ib-eos-files.py b/das-utils/ib-eos-files.py index d0e5ea1516fb..632d76192ea4 100755 --- a/das-utils/ib-eos-files.py +++ b/das-utils/ib-eos-files.py @@ -7,6 +7,7 @@ from time import sleep, time import re import sys + sys.path.append(dirname(dirname(abspath(__file__)))) # in order to import cms-bot level modules from _py2with3compatibility import getstatusoutput @@ -14,213 +15,295 @@ eos_base = "/eos/cms/store/user/cmsbuild" opts = None + def get_alive_threads(threads): - alive = [] - for t in threads: - if t.is_alive(): alive.append(t) - return alive + alive = [] + for t in threads: + if t.is_alive(): + alive.append(t) + return alive + try: - CMS_BOT_DIR = dirname(abspath(__file__)) -except Exception as e : - from sys import argv - CMS_BOT_DIR = dirname( abspath(argv[0])) - -def run_cmd(cmd, exit_on_error=True,debug=True): - if debug: print(">> %s" % cmd) - err, out = getstatusoutput(cmd) - if err: - if exit_on_error: - print(out) - exit(1) - return err, out + CMS_BOT_DIR = dirname(abspath(__file__)) +except Exception as e: + from sys import argv + + CMS_BOT_DIR = dirname(abspath(argv[0])) + + +def run_cmd(cmd, exit_on_error=True, debug=True): + if debug: + print(">> %s" % cmd) + err, out = getstatusoutput(cmd) + if err: + if exit_on_error: + print(out) + exit(1) + return err, out + def get_lfns_from_kibana(days=7): - print("Getting information from CMS Elasticsearch....") - kibana_file = "lfn_kibana.json" - cmd = "PYTHONPATH=%s/.. %s/ib-datasets.py --days %s > %s; cat %s" % (CMS_BOT_DIR, CMS_BOT_DIR, days, kibana_file, kibana_file) - if exists(kibana_file): cmd = "cat %s" % kibana_file - err, from_kibaba = run_cmd(cmd) - print("Collecting unique LFN from Kibana ....") - used_lfns = {} - for hit in json.loads(from_kibaba)["hits"]["hits"]: - if not "_source" in hit: continue - if not "lfn" in hit["_source"]: continue - lfn = hit["_source"]["lfn"].strip() - if (not lfn) or ("/store/user/cmsbuild" in lfn): continue - used_lfns[lfn]=1 - return list(used_lfns.keys()) + print("Getting information from CMS Elasticsearch....") + kibana_file = "lfn_kibana.json" + cmd = "PYTHONPATH=%s/.. %s/ib-datasets.py --days %s > %s; cat %s" % ( + CMS_BOT_DIR, + CMS_BOT_DIR, + days, + kibana_file, + kibana_file, + ) + if exists(kibana_file): + cmd = "cat %s" % kibana_file + err, from_kibaba = run_cmd(cmd) + print("Collecting unique LFN from Kibana ....") + used_lfns = {} + for hit in json.loads(from_kibaba)["hits"]["hits"]: + if not "_source" in hit: + continue + if not "lfn" in hit["_source"]: + continue + lfn = hit["_source"]["lfn"].strip() + if (not lfn) or ("/store/user/cmsbuild" in lfn): + continue + used_lfns[lfn] = 1 + return list(used_lfns.keys()) + def get_lfns_from_das(lfn_per_query=1): - if lfn_per_query<1: return [] - print("Getting information from DAS queries....") - err, out = run_cmd("test -d cms-sw.github.io || git clone --depth 1 https://github.com/cms-sw/cms-sw.github.io.git") - err, qfiles = run_cmd("ls cms-sw.github.io/das_queries/*/*.query") - used_lfns = {} - for qfile in qfiles.split("\n"): - lfn_file = qfile.strip()[:-6] - if not exists(lfn_file): continue - lfn_count = 0 - err, out = run_cmd("grep '/store/' %s" % lfn_file,debug=False, exit_on_error=False) - for lfn in out.split("\n"): - if not "/store/" in lfn: continue - lfn = lfn.strip("\n").replace('"',"").replace(',',"").strip(" ") - used_lfns[lfn]=1 - lfn_count += 1 - if lfn_count>=lfn_per_query: break - return list(used_lfns.keys()) + if lfn_per_query < 1: + return [] + print("Getting information from DAS queries....") + err, out = run_cmd( + "test -d cms-sw.github.io || git clone --depth 1 https://github.com/cms-sw/cms-sw.github.io.git" + ) + err, qfiles = run_cmd("ls cms-sw.github.io/das_queries/*/*.query") + used_lfns = {} + for qfile in qfiles.split("\n"): + lfn_file = qfile.strip()[:-6] + if not exists(lfn_file): + continue + lfn_count = 0 + err, out = run_cmd("grep '/store/' %s" % lfn_file, debug=False, exit_on_error=False) + for lfn in out.split("\n"): + if not "/store/" in lfn: + continue + lfn = lfn.strip("\n").replace('"', "").replace(",", "").strip(" ") + used_lfns[lfn] = 1 + lfn_count += 1 + if lfn_count >= lfn_per_query: + break + return list(used_lfns.keys()) + def get_lfns_for_cmsbuild_eos(lfn_per_query=1, days=7): - das_lfns = get_lfns_from_das(lfn_per_query) - kibana_lfns = get_lfns_from_kibana(days) - eos_lfns = {} - for lfn in kibana_lfns+das_lfns: eos_lfns[lfn.strip()]=1 - print("LFNs from Kibana: %s" % len(kibana_lfns)) - print("LFNs from DAS Queries: %s" % len(das_lfns)) - print("Total LFNs: %s" % len(eos_lfns)) - return list(eos_lfns.keys()) + das_lfns = get_lfns_from_das(lfn_per_query) + kibana_lfns = get_lfns_from_kibana(days) + eos_lfns = {} + for lfn in kibana_lfns + das_lfns: + eos_lfns[lfn.strip()] = 1 + print("LFNs from Kibana: %s" % len(kibana_lfns)) + print("LFNs from DAS Queries: %s" % len(das_lfns)) + print("Total LFNs: %s" % len(eos_lfns)) + return list(eos_lfns.keys()) + def copy_to_eos(lfn, log_file): - cmd = "%s/copy-ib-lfn-to-eos.sh %s %s >%s 2>&1" % (CMS_BOT_DIR, lfn, opts.redirector,log_file) - run_cmd(cmd,exit_on_error=False,debug=False) - e, o = run_cmd("grep ' echo ALL_OK' %s" % log_file, exit_on_error=False,debug=False) - if 'ALL_OK' in o: - print(" Success: %s" % lfn) - else: - print(" Failed: %s" % lfn) - return + cmd = "%s/copy-ib-lfn-to-eos.sh %s %s >%s 2>&1" % (CMS_BOT_DIR, lfn, opts.redirector, log_file) + run_cmd(cmd, exit_on_error=False, debug=False) + e, o = run_cmd("grep ' echo ALL_OK' %s" % log_file, exit_on_error=False, debug=False) + if "ALL_OK" in o: + print(" Success: %s" % lfn) + else: + print(" Failed: %s" % lfn) + return + def kill_xrootd(lfn): - print(" Requested to kill %s" % lfn) - err, out = run_cmd("pgrep -l -f '.*/copy-ib-lfn-to-eos.sh %s .*'" % lfn) - pids = "" - for process in out.split("\n"): - if 'pgrep ' in process: continue - items = process.split(" ",1) - pids = pids+" "+process.split(" ",1)[0] - if pids: - print(" Killing %s" % pids) - run_cmd("kill -9 %s" % pids,exit_on_error=False,debug=False) - run_cmd("%s rm %s%s.tmp" % (eos_cmd,eos_base, lfn),exit_on_error=False,debug=False) + print(" Requested to kill %s" % lfn) + err, out = run_cmd("pgrep -l -f '.*/copy-ib-lfn-to-eos.sh %s .*'" % lfn) + pids = "" + for process in out.split("\n"): + if "pgrep " in process: + continue + items = process.split(" ", 1) + pids = pids + " " + process.split(" ", 1)[0] + if pids: + print(" Killing %s" % pids) + run_cmd("kill -9 %s" % pids, exit_on_error=False, debug=False) + run_cmd("%s rm %s%s.tmp" % (eos_cmd, eos_base, lfn), exit_on_error=False, debug=False) + def eos_exists(eos_file): - err, out = run_cmd("%s stat -f %s" % (eos_cmd, eos_file),exit_on_error=False,debug=False) - if err: return False - return True + err, out = run_cmd("%s stat -f %s" % (eos_cmd, eos_file), exit_on_error=False, debug=False) + if err: + return False + return True + def eos_rename(name, new_name): - print(" Rename: %s -> %s" % (name, new_name)) - err, out = run_cmd("%s file rename %s %s" % (eos_cmd, name, new_name),exit_on_error=False,debug=False) - if err: - print(out) - return False - return True + print(" Rename: %s -> %s" % (name, new_name)) + err, out = run_cmd( + "%s file rename %s %s" % (eos_cmd, name, new_name), exit_on_error=False, debug=False + ) + if err: + print(out) + return False + return True + def eos_size(eos_file): - if not eos_exists(eos_file): return -1 - err, out = run_cmd("%s ls -l %s | awk '{print $5}'" % (eos_cmd, eos_file), debug=True,exit_on_error=False) - if err or not re.match("^\d+$",out): return -1 - return int(out) + if not eos_exists(eos_file): + return -1 + err, out = run_cmd( + "%s ls -l %s | awk '{print $5}'" % (eos_cmd, eos_file), debug=True, exit_on_error=False + ) + if err or not re.match("^\d+$", out): + return -1 + return int(out) + def check_dead_transfers(threads, info, progress_check=600, init_transfer_wait=600): - thds_done = False - for t in threads: - if not t.is_alive(): - thds_done = True - continue - lfn = t.name - pcheck = int(time())-info[lfn][0] - if pcheckinfo[lfn][1]: - info[lfn][1]=pcheck - mtime = getmtime(info[lfn][3]) - err, out = run_cmd("grep '\[ *[1-9][0-9]*\%%\]' %s | tail -1" % info[lfn][3],debug=False,exit_on_error=False) - out = re.sub("^.*\[","",re.sub("\].*$","", out.split("\n")[-1].split("\r")[-1])) - if mtime!=info[lfn][2]: - info[lfn][2]=mtime - print(" In progress: %s %s" % (lfn,out)) - else: - print(" Transfer stopped: %s %s" % (lfn, out)) - kill_xrootd(lfn) - thds_done = True - return thds_done + thds_done = False + for t in threads: + if not t.is_alive(): + thds_done = True + continue + lfn = t.name + pcheck = int(time()) - info[lfn][0] + if pcheck < init_transfer_wait: + continue + pcheck = int((pcheck - init_transfer_wait) / progress_check) + if pcheck > info[lfn][1]: + info[lfn][1] = pcheck + mtime = getmtime(info[lfn][3]) + err, out = run_cmd( + "grep '\[ *[1-9][0-9]*\%%\]' %s | tail -1" % info[lfn][3], + debug=False, + exit_on_error=False, + ) + out = re.sub("^.*\[", "", re.sub("\].*$", "", out.split("\n")[-1].split("\r")[-1])) + if mtime != info[lfn][2]: + info[lfn][2] = mtime + print(" In progress: %s %s" % (lfn, out)) + else: + print(" Transfer stopped: %s %s" % (lfn, out)) + kill_xrootd(lfn) + thds_done = True + return thds_done + def copy_lfns_to_eos(eos_lfns): - threads = [] - all_logs = {} - logdir = "logs" - run_cmd("rm -rf %s && mkdir -p %s" % (logdir , logdir)) - job_monitor = {} - already_done =0 - total_lfns = len(eos_lfns) - eos_lfns_to_copy = [] - for lfn in eos_lfns: - if not lfn.endswith('.root'): - already_done += 1 - print("IGNORE (%s/%s): %s" % (already_done, total_lfns, lfn)) - continue - eos_file = "%s%s" % (eos_base, lfn) - if eos_exists(eos_file) or (eos_exists(eos_file+".unused") and eos_rename(eos_file+".unused", eos_file)): - already_done += 1 - print("OK (%s/%s): %s" % (already_done, total_lfns, lfn)) - elif opts.dryRun: - print("DryRun: Copy %s -> %s" % (lfn, eos_file)) - continue - else: - eos_lfns_to_copy.append(lfn) - for lfn in eos_lfns_to_copy: - eos_file = "%s%s" % (eos_base, lfn) - while True: - threads = get_alive_threads(threads) - if(len(threads) < opts.jobs): - log_file=logdir+"/"+sha256(lfn.encode()).hexdigest()+".log" - all_logs[log_file]=lfn - print("Copy (%s/%s): %s" % (already_done+len(all_logs), total_lfns, lfn)) - t = Thread(name=lfn,target=copy_to_eos, args=(lfn, log_file)) - job_monitor[lfn]=[int(time()), 0, 0,log_file] - t.start() - threads.append(t) - break - elif not check_dead_transfers(threads, job_monitor): + threads = [] + all_logs = {} + logdir = "logs" + run_cmd("rm -rf %s && mkdir -p %s" % (logdir, logdir)) + job_monitor = {} + already_done = 0 + total_lfns = len(eos_lfns) + eos_lfns_to_copy = [] + for lfn in eos_lfns: + if not lfn.endswith(".root"): + already_done += 1 + print("IGNORE (%s/%s): %s" % (already_done, total_lfns, lfn)) + continue + eos_file = "%s%s" % (eos_base, lfn) + if eos_exists(eos_file) or ( + eos_exists(eos_file + ".unused") and eos_rename(eos_file + ".unused", eos_file) + ): + already_done += 1 + print("OK (%s/%s): %s" % (already_done, total_lfns, lfn)) + elif opts.dryRun: + print("DryRun: Copy %s -> %s" % (lfn, eos_file)) + continue + else: + eos_lfns_to_copy.append(lfn) + for lfn in eos_lfns_to_copy: + eos_file = "%s%s" % (eos_base, lfn) + while True: + threads = get_alive_threads(threads) + if len(threads) < opts.jobs: + log_file = logdir + "/" + sha256(lfn.encode()).hexdigest() + ".log" + all_logs[log_file] = lfn + print("Copy (%s/%s): %s" % (already_done + len(all_logs), total_lfns, lfn)) + t = Thread(name=lfn, target=copy_to_eos, args=(lfn, log_file)) + job_monitor[lfn] = [int(time()), 0, 0, log_file] + t.start() + threads.append(t) + break + elif not check_dead_transfers(threads, job_monitor): + sleep(10) + while len(threads) > 0: sleep(10) - while len(threads)>0: - sleep(10) - threads = get_alive_threads(threads) - check_dead_transfers(threads, job_monitor) - total_failed = 0 - total_copied = 0 - for log in all_logs: - lfn = all_logs[log] - err, out = run_cmd("cat %s" % log,debug=False) - err, out = getstatusoutput("grep '^ALL_OK$' %s | wc -l" % log) - if out=="0": - total_failed+=1 - print("FAIL (%s/%s): %s" % (already_done+total_copied+total_failed, total_lfns, lfn)) - err, out = getstatusoutput("cat %s" % log) - print(out) - print("###################################") - else: - total_copied += 1 - print("OK (%s/%s): %s" % (already_done+total_copied+total_failed, total_lfns, lfn)) - run_cmd("rm -rf %s" % logdir) - print("Total LFNs: %s" % total_lfns) - print("Already available: %s" % already_done) - print("Newly fetched: %s" % total_copied) - print("Error: %s" % total_failed) - return total_failed==0 + threads = get_alive_threads(threads) + check_dead_transfers(threads, job_monitor) + total_failed = 0 + total_copied = 0 + for log in all_logs: + lfn = all_logs[log] + err, out = run_cmd("cat %s" % log, debug=False) + err, out = getstatusoutput("grep '^ALL_OK$' %s | wc -l" % log) + if out == "0": + total_failed += 1 + print( + "FAIL (%s/%s): %s" % (already_done + total_copied + total_failed, total_lfns, lfn) + ) + err, out = getstatusoutput("cat %s" % log) + print(out) + print("###################################") + else: + total_copied += 1 + print("OK (%s/%s): %s" % (already_done + total_copied + total_failed, total_lfns, lfn)) + run_cmd("rm -rf %s" % logdir) + print("Total LFNs: %s" % total_lfns) + print("Already available: %s" % already_done) + print("Newly fetched: %s" % total_copied) + print("Error: %s" % total_failed) + return total_failed == 0 + if __name__ == "__main__": - from optparse import OptionParser - parser = OptionParser(usage="%prog ") - parser.add_option("-r", "--redirector", dest="redirector", help="Xroot reditrector", type=str, default="root://cms-xrd-global.cern.ch") - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not actually download the files", default=False) - parser.add_option("-f", "--file-per-das", dest="files_per_das", help="Number of files per das query need to be copy to EOS", type=int, default=1) - parser.add_option("-j", "--jobs", dest="jobs", help="Parallel jobs to run", type=int, default=4) - parser.add_option("-d", "--days", dest="days", help="Files access in last n days via kibana", type=int, default=7) - opts, args = parser.parse_args() - - all_OK = copy_lfns_to_eos(get_lfns_for_cmsbuild_eos(opts.files_per_das, opts.days)) - if not all_OK: exit(1) - exit(0) + from optparse import OptionParser + + parser = OptionParser(usage="%prog ") + parser.add_option( + "-r", + "--redirector", + dest="redirector", + help="Xroot reditrector", + type=str, + default="root://cms-xrd-global.cern.ch", + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not actually download the files", + default=False, + ) + parser.add_option( + "-f", + "--file-per-das", + dest="files_per_das", + help="Number of files per das query need to be copy to EOS", + type=int, + default=1, + ) + parser.add_option( + "-j", "--jobs", dest="jobs", help="Parallel jobs to run", type=int, default=4 + ) + parser.add_option( + "-d", + "--days", + dest="days", + help="Files access in last n days via kibana", + type=int, + default=7, + ) + opts, args = parser.parse_args() + all_OK = copy_lfns_to_eos(get_lfns_for_cmsbuild_eos(opts.files_per_das, opts.days)) + if not all_OK: + exit(1) + exit(0) diff --git a/das-utils/order-das-files.py b/das-utils/order-das-files.py index c4e9b15c898a..ad16763d299b 100755 --- a/das-utils/order-das-files.py +++ b/das-utils/order-das-files.py @@ -3,33 +3,38 @@ from sys import stdin, exit from os.path import dirname, abspath + sys.path.append(dirname(dirname(abspath(__file__)))) # in order to import cms-bot level modules from _py2with3compatibility import run_cmd all_dasfiles = [] -new_order = [] +new_order = [] for line in stdin: - line = line.strip("\n") - if line.startswith("/store/"): all_dasfiles.append(line) - else: new_order.append(line) + line = line.strip("\n") + if line.startswith("/store/"): + all_dasfiles.append(line) + else: + new_order.append(line) if not all_dasfiles: - print("\n".join(new_order)) - exit(0) + print("\n".join(new_order)) + exit(0) eos_cmd = "EOS_MGM_URL=root://eoscms.cern.ch /usr/bin/eos" -EOS_BASE="/eos/cms/store/user/cmsbuild/store" +EOS_BASE = "/eos/cms/store/user/cmsbuild/store" eos_base_len = len(EOS_BASE) -err, eos_files = run_cmd("%s find -f %s | sort" % (eos_cmd,EOS_BASE)) +err, eos_files = run_cmd("%s find -f %s | sort" % (eos_cmd, EOS_BASE)) if err: - print("\n".join(new_order)) - exit(0) + print("\n".join(new_order)) + exit(0) new_order = [] for eos_file in eos_files.split("\n"): - eos_file="/store"+eos_file[eos_base_len:] - if eos_file in all_dasfiles: new_order.append(eos_file) + eos_file = "/store" + eos_file[eos_base_len:] + if eos_file in all_dasfiles: + new_order.append(eos_file) for das_file in all_dasfiles: - if not das_file in new_order: new_order.append(das_file) + if not das_file in new_order: + new_order.append(das_file) print("\n".join(new_order)) diff --git a/deprecate-releases b/deprecate-releases deleted file mode 100755 index 6a9eab75c3d1..000000000000 --- a/deprecate-releases +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python - -# A script to generate the list of releases to deprecate, via a set of regexp. -# TODO unused -from __future__ import print_function -from _py2with3compatibility import urlopen -from xml.sax import make_parser, handler -import re -from optparse import OptionParser -from json import load - -INCIPIT = """ -Hi All, - -Below are the lists of releases which are being proposed to be deprecated. If you -would like any on this list kept please email me (and NOT post to this -announcement-only HyperNews forum) the following information so we can give it -the correct consideration: - -1. Release - -2. Reason to keep it - -3. Reason you are unable to move to the latest releases in the cycle (as we -expect you should be able to, for example from 3_8_6 to 3_8_7_patch2) - -4. When it can be removed - -In a week I will repost the final list to be deprecated. They will not -be removed for at least another week after that. - --- -Ciao, -Giulio -""" - -WHITELIST = ["CMSSW_5_2_7_hltpatch1"] - -class ReleasesDeprecator(handler.ContentHandler): - def __init__(self, whitelist): - self.whitelist = whitelist - self.survival = [] - self.devel_deprecated = [] - self.prod_deprecated = [] - self.rules = ["3_[0-9]_[0-9].*", "[45]_[0-9]_[0-9]+.*pre[0-9].*", "6_1_[0-9]+.*pre[0-9].*", "6_2_0_.*pre[0-3]", "4_3_.*", ".*_TS.*", - "CMSSW_[45]_.*cand.*", "5_2_[0-8].*", "5_[0-1]_.*", - "5_3_11_patch[1-5]", "4_4_5_patch[1-4]", "5_3_2_patch[1-4]", "5_3_7_patch[1-6]", "6_0_.*", "6_2_0_p1_gcc481", - "4_2_.*SLHC.*", "4_2_8_p7rootfix", "5_3_1[2-3]_patch[1-2]", "4_2_4_g94p03c", - "5_3_[8-9]_patch[1-2]", "6_1_0", "4_4_2_p10JEmalloc", "6_1_2_SLHC[1357].*", "6_2_0_SLHC[12].*", "5_3_3_patch[12]"] - - def startElement(self, name, attrs): - if not name == "project": - return - release = attrs["label"] - prod = attrs["type"] - if release in self.whitelist: - self.survival.append(release) - return - for r in self.rules: - rule = "CMSSW_" + r - if re.match(rule, release) and prod == "Development": - self.devel_deprecated.append(release) - return - if re.match(rule, release) and prod == "Production": - self.prod_deprecated.append(release) - return - self.survival.append(release) - - -if __name__ == "__main__": - parser = OptionParser() - parser.add_option("--exclude", "-e", - help="Contents of https://cms-pdmv.cern.ch/mcm/search/?db_name=campaigns&page=-1", - dest="exclude_list", default=None) - opts, args = parser.parse_args() - whitelist = set() - if opts.exclude_list: - exclude_list = load(open(opts.exclude_list)) - excludedReleases = set([x["cmssw_release"] for x in exclude_list["results"]]).union(WHITELIST) - excludedBaseReleases = set([re.sub("_[a-zA-Z0-9]*patch[0-9]*", "", x) for x in excludedReleases]) - whitelist = excludedBaseReleases.union(excludedReleases) - print(whitelist) - releases = urlopen("https://cmstags.cern.ch/tc/ReleasesXML?anytype=1") - parser = make_parser() - handler = ReleasesDeprecator(whitelist) - parser.setContentHandler(handler) - parser.parse(releases) - print(INCIPIT) - print("\n\n# The following **production** releases will be removed:\n\n%s" % "\n".join(sorted(handler.prod_deprecated))) - print("\n\n# The following **development** releases will be removed:\n\n%s" % "\n".join(sorted(handler.devel_deprecated))) - print("\n\n# The following releases will be untouched:\n\n%s" % "\n".join(sorted(handler.survival))) diff --git a/deprecate-releases b/deprecate-releases new file mode 120000 index 000000000000..d87efb744561 --- /dev/null +++ b/deprecate-releases @@ -0,0 +1 @@ +deprecate-releases.py \ No newline at end of file diff --git a/deprecate-releases.py b/deprecate-releases.py new file mode 100755 index 000000000000..0af056f517d2 --- /dev/null +++ b/deprecate-releases.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python + +# A script to generate the list of releases to deprecate, via a set of regexp. +# TODO unused +from __future__ import print_function +from _py2with3compatibility import urlopen +from xml.sax import make_parser, handler +import re +from optparse import OptionParser +from json import load + +INCIPIT = """ +Hi All, + +Below are the lists of releases which are being proposed to be deprecated. If you +would like any on this list kept please email me (and NOT post to this +announcement-only HyperNews forum) the following information so we can give it +the correct consideration: + +1. Release + +2. Reason to keep it + +3. Reason you are unable to move to the latest releases in the cycle (as we +expect you should be able to, for example from 3_8_6 to 3_8_7_patch2) + +4. When it can be removed + +In a week I will repost the final list to be deprecated. They will not +be removed for at least another week after that. + +-- +Ciao, +Giulio +""" + +WHITELIST = ["CMSSW_5_2_7_hltpatch1"] + + +class ReleasesDeprecator(handler.ContentHandler): + def __init__(self, whitelist): + self.whitelist = whitelist + self.survival = [] + self.devel_deprecated = [] + self.prod_deprecated = [] + self.rules = [ + "3_[0-9]_[0-9].*", + "[45]_[0-9]_[0-9]+.*pre[0-9].*", + "6_1_[0-9]+.*pre[0-9].*", + "6_2_0_.*pre[0-3]", + "4_3_.*", + ".*_TS.*", + "CMSSW_[45]_.*cand.*", + "5_2_[0-8].*", + "5_[0-1]_.*", + "5_3_11_patch[1-5]", + "4_4_5_patch[1-4]", + "5_3_2_patch[1-4]", + "5_3_7_patch[1-6]", + "6_0_.*", + "6_2_0_p1_gcc481", + "4_2_.*SLHC.*", + "4_2_8_p7rootfix", + "5_3_1[2-3]_patch[1-2]", + "4_2_4_g94p03c", + "5_3_[8-9]_patch[1-2]", + "6_1_0", + "4_4_2_p10JEmalloc", + "6_1_2_SLHC[1357].*", + "6_2_0_SLHC[12].*", + "5_3_3_patch[12]", + ] + + def startElement(self, name, attrs): + if not name == "project": + return + release = attrs["label"] + prod = attrs["type"] + if release in self.whitelist: + self.survival.append(release) + return + for r in self.rules: + rule = "CMSSW_" + r + if re.match(rule, release) and prod == "Development": + self.devel_deprecated.append(release) + return + if re.match(rule, release) and prod == "Production": + self.prod_deprecated.append(release) + return + self.survival.append(release) + + +if __name__ == "__main__": + parser = OptionParser() + parser.add_option( + "--exclude", + "-e", + help="Contents of https://cms-pdmv.cern.ch/mcm/search/?db_name=campaigns&page=-1", + dest="exclude_list", + default=None, + ) + opts, args = parser.parse_args() + whitelist = set() + if opts.exclude_list: + exclude_list = load(open(opts.exclude_list)) + excludedReleases = set([x["cmssw_release"] for x in exclude_list["results"]]).union( + WHITELIST + ) + excludedBaseReleases = set( + [re.sub("_[a-zA-Z0-9]*patch[0-9]*", "", x) for x in excludedReleases] + ) + whitelist = excludedBaseReleases.union(excludedReleases) + print(whitelist) + releases = urlopen("https://cmstags.cern.ch/tc/ReleasesXML?anytype=1") + parser = make_parser() + handler = ReleasesDeprecator(whitelist) + parser.setContentHandler(handler) + parser.parse(releases) + print(INCIPIT) + print( + "\n\n# The following **production** releases will be removed:\n\n%s" + % "\n".join(sorted(handler.prod_deprecated)) + ) + print( + "\n\n# The following **development** releases will be removed:\n\n%s" + % "\n".join(sorted(handler.devel_deprecated)) + ) + print( + "\n\n# The following releases will be untouched:\n\n%s" + % "\n".join(sorted(handler.survival)) + ) diff --git a/deprecate_releases.py b/deprecate_releases.py index 0e595ecfa01c..70e646381978 100755 --- a/deprecate_releases.py +++ b/deprecate_releases.py @@ -1,15 +1,16 @@ #!/bin/env python from __future__ import print_function import sys + if len(sys.argv) < 3: - print("Usage: %s releases.map cmssw_version [cmssw_version [...]]" % sys.argv[0]) - sys.exit(1) + print("Usage: %s releases.map cmssw_version [cmssw_version [...]]" % sys.argv[0]) + sys.exit(1) release_map = sys.argv[1] deprecate_list = sys.argv[2:] -fd = open(release_map,'r') +fd = open(release_map, "r") for line in fd.readlines(): - release = line.split(';label=',1)[1].split(";",1)[0] - if release in deprecate_list: - line = line.replace('Announced','Deprecated') - print(line, end=' ') + release = line.split(";label=", 1)[1].split(";", 1)[0] + if release in deprecate_list: + line = line.replace("Announced", "Deprecated") + print(line, end=" ") diff --git a/docker/check-repositories.py b/docker/check-repositories.py index 0dd6c13c7a70..cec5c5321868 100755 --- a/docker/check-repositories.py +++ b/docker/check-repositories.py @@ -3,59 +3,78 @@ import sys from os.path import exists, dirname, abspath import yaml, json + try: - from yaml import CLoader as Loader, CDumper as Dumper + from yaml import CLoader as Loader, CDumper as Dumper except ImportError: - from yaml import Loader, Dumper + from yaml import Loader, Dumper from sys import exit from optparse import OptionParser + sys.path.append(dirname(dirname(abspath(__file__)))) # in order to import cms-bot level modules from _py2with3compatibility import run_cmd + def get_repos(user, cache): - if user not in cache: - cache[user] = [] - url = 'https://hub.docker.com/v2/repositories/%s?page_size=100' % user - while True: - e , o = run_cmd('curl -s -L %s' % url) - repo_data = json.loads(o) - if "results" in repo_data: - for r in repo_data["results"]: - cache[user].append(r["name"]) - if "next" in repo_data and repo_data["next"]: - url = repo_data["next"] - else: - break - return cache[user] + if user not in cache: + cache[user] = [] + url = "https://hub.docker.com/v2/repositories/%s?page_size=100" % user + while True: + e, o = run_cmd("curl -s -L %s" % url) + repo_data = json.loads(o) + if "results" in repo_data: + for r in repo_data["results"]: + cache[user].append(r["name"]) + if "next" in repo_data and repo_data["next"]: + url = repo_data["next"] + else: + break + return cache[user] + def process(repos, dryRun, cache): - for data in repos: - for u in data: - existing_repos = get_repos(u, cache) - for r in data[u]: - if r not in existing_repos: - print("%s/%s NEW" % (u, r)) + for data in repos: + for u in data: + existing_repos = get_repos(u, cache) + for r in data[u]: + if r not in existing_repos: + print("%s/%s NEW" % (u, r)) + if __name__ == "__main__": - parser = OptionParser(usage="%prog ") - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) - parser.add_option("-r", "--repo-list", dest="repo_list", help="Yaml file with list of repositories to create under docker hun", type=str, default=None) - opts, args = parser.parse_args() - - repos = {} - if not opts.repo_list: - parser.error("Missing repository list file, please use -r|--repo-list option") - - if opts.repo_list.startswith('https://'): - e, o = run_cmd('curl -s -L %s' % opts.repo_list) - if e: - print (o) - exit(1) - repos = yaml.load_all(o, Loader=Loader) - elif exists(opts.repo_list): - repos = yaml.load(open(opts.repo_list), Loader=Loader) - else: - print ("Error: No such file: %s" % opts.repo_list) - exit (1) - repo_cache = {} - process(repos, opts.dryRun, repo_cache) + parser = OptionParser(usage="%prog ") + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, + ) + parser.add_option( + "-r", + "--repo-list", + dest="repo_list", + help="Yaml file with list of repositories to create under docker hun", + type=str, + default=None, + ) + opts, args = parser.parse_args() + + repos = {} + if not opts.repo_list: + parser.error("Missing repository list file, please use -r|--repo-list option") + + if opts.repo_list.startswith("https://"): + e, o = run_cmd("curl -s -L %s" % opts.repo_list) + if e: + print(o) + exit(1) + repos = yaml.load_all(o, Loader=Loader) + elif exists(opts.repo_list): + repos = yaml.load(open(opts.repo_list), Loader=Loader) + else: + print("Error: No such file: %s" % opts.repo_list) + exit(1) + repo_cache = {} + process(repos, opts.dryRun, repo_cache) diff --git a/es-cleanup-indexes b/es-cleanup-indexes deleted file mode 100755 index bb06bc40d58c..000000000000 --- a/es-cleanup-indexes +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env python - -# A script which deletes elasticsearch logs when they are older that the specified amount -# of units. - - -# Each config line has the folloing format: -# -# - Format of the index, including the datetime part. This will -# be used to determine the age of the index. -# - The time period in which an index is considered to be valid. -# You can use y, m, d , H, M suffix to specify years, months, -# days, hours, minutes. -# - The action to perform when the index is old enough. For the moment it can be -# - delete: delete the index. - -from __future__ import print_function -from argparse import ArgumentParser -from _py2with3compatibility import run_cmd -from datetime import datetime -import re -from os import getenv - -CONFIG = [ - ["mesos-offers-%Y.%m.%d", "1M", "delete"], - ["mesos-info-%Y.%m.%d.%H%M", "1H", "delete"], - ["mesos-info-%Y.%m.%d", "2d", "delete"], - ["mesos-logs-%%{loglevel}-%Y.%m.%d", "2d", "delete"], - ["mesos-warning-%Y.%m.%d", "1d", "delete"], - ["mesos-error-%Y.%m.%d", "1d", "delete"], - ["mesos-logs-i-%Y.%m.%d", "1d", "delete"], - ["mesos-logs-w-%Y.%m.%d", "1d", "delete"], - ["mesos-logs-e-%Y.%m.%d", "1d", "delete"], - ["mesos-logs-f-%Y.%m.%d", "1d", "delete"], - ["ib-files-%Y.%m", "1m", "delete"], - ["stats-condweb-%Y.%m.%d", "2d", "delete"], - ["stats-condweb-access-%Y.%m.%d", "2d", "delete"], - ["stats-condweb-error-%Y.%m", "1m", "delete"], - ["ib-matrix.%Y.%m", "1m", "delete"], - ["ib-matrix.%Y-%W-%w", "14d", "delete"], - ["ib-scram-stats-%Y.%m.%d", "7d", "delete"], -] - -def format(s, **kwds): - return s % kwds - -timeunits = { - "Y": 60*60*24*356, - "m": 60*60*24*30, - "d": 60*60*24, - "H": 60*60, - "M": 60 -} - -if __name__ == "__main__": - # First get all the available indices - parser = ArgumentParser() - parser.add_argument("--proxy", help="the proxy to use") - parser.add_argument("--server", help="the elasticsearch entrypoint") - parser.add_argument("--user", help="user to be used for authentication") - parser.add_argument("--auth-file", dest="authFile", help="file containing the authentication token") - parser.add_argument("--dry-run", "-n", dest="dryrun", action="store_true", - default=False, help="the elasticsearch entrypoint") - args = parser.parse_args() - proxy_string = "" - if args.proxy: - proxy_string = "--socks5-hostname " + args.proxy - - if args.user and args.authFile: - user_string = "--user %s:%s" % (args.user, open(args.authFile).read().strip()) - elif getenv("ES_AUTH"): - user_string = "--user %s" % getenv("ES_AUTH") - - cmd = format("curl -s %(proxy_string)s %(server)s/_cat/indices " - "%(user_string)s", - proxy_string=proxy_string, - server=args.server, - user_string=user_string) - err, out = run_cmd(cmd) - if err: - print("Error while getting indices") - print(out) - exit(0) - indices = [re.split("[ ]+", l)[2] for l in out.split("\n")] - for c in CONFIG: - pattern, timeout, action = c - m = re.match("([0-9]*)([YmdHM])", timeout) - if not m: - print("Malformed timeout %s" % timeout) - exit(1) - time, unit = m.groups() - timedelta = int(time)*timeunits[unit] - for index in indices: - try: - d = datetime.strptime(index, pattern) - except ValueError as e: - continue - print(index, "matches", pattern) - now = datetime.now() - td = (now-d) - total_seconds = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 - if total_seconds < timedelta: - print(index, "is recent enough. Keeping.") - continue - print(index, "is older than", timeout, ". Deleting") - if not args.dryrun: - cmd = format("curl -s -X DELETE %(proxy_string)s %(server)s/%(index)s" - " %(user_string)s", - proxy_string=proxy_string, - server=args.server, - index=index, - user_string=user_string) - err, out = run_cmd(cmd) - if err: - print("Error while deleting.") - print(out) - exit(1) - print(index, "deleted.") diff --git a/es-cleanup-indexes b/es-cleanup-indexes new file mode 120000 index 000000000000..60a5b3481d25 --- /dev/null +++ b/es-cleanup-indexes @@ -0,0 +1 @@ +es-cleanup-indexes.py \ No newline at end of file diff --git a/es-cleanup-indexes.py b/es-cleanup-indexes.py new file mode 100755 index 000000000000..d5e59bdd22d8 --- /dev/null +++ b/es-cleanup-indexes.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python + +# A script which deletes elasticsearch logs when they are older that the specified amount +# of units. + + +# Each config line has the folloing format: +# +# - Format of the index, including the datetime part. This will +# be used to determine the age of the index. +# - The time period in which an index is considered to be valid. +# You can use y, m, d , H, M suffix to specify years, months, +# days, hours, minutes. +# - The action to perform when the index is old enough. For the moment it can be +# - delete: delete the index. + +from __future__ import print_function +from argparse import ArgumentParser +from _py2with3compatibility import run_cmd +from datetime import datetime +import re +from os import getenv + +CONFIG = [ + ["mesos-offers-%Y.%m.%d", "1M", "delete"], + ["mesos-info-%Y.%m.%d.%H%M", "1H", "delete"], + ["mesos-info-%Y.%m.%d", "2d", "delete"], + ["mesos-logs-%%{loglevel}-%Y.%m.%d", "2d", "delete"], + ["mesos-warning-%Y.%m.%d", "1d", "delete"], + ["mesos-error-%Y.%m.%d", "1d", "delete"], + ["mesos-logs-i-%Y.%m.%d", "1d", "delete"], + ["mesos-logs-w-%Y.%m.%d", "1d", "delete"], + ["mesos-logs-e-%Y.%m.%d", "1d", "delete"], + ["mesos-logs-f-%Y.%m.%d", "1d", "delete"], + ["ib-files-%Y.%m", "1m", "delete"], + ["stats-condweb-%Y.%m.%d", "2d", "delete"], + ["stats-condweb-access-%Y.%m.%d", "2d", "delete"], + ["stats-condweb-error-%Y.%m", "1m", "delete"], + ["ib-matrix.%Y.%m", "1m", "delete"], + ["ib-matrix.%Y-%W-%w", "14d", "delete"], + ["ib-scram-stats-%Y.%m.%d", "7d", "delete"], +] + + +def format(s, **kwds): + return s % kwds + + +timeunits = { + "Y": 60 * 60 * 24 * 356, + "m": 60 * 60 * 24 * 30, + "d": 60 * 60 * 24, + "H": 60 * 60, + "M": 60, +} + +if __name__ == "__main__": + # First get all the available indices + parser = ArgumentParser() + parser.add_argument("--proxy", help="the proxy to use") + parser.add_argument("--server", help="the elasticsearch entrypoint") + parser.add_argument("--user", help="user to be used for authentication") + parser.add_argument( + "--auth-file", dest="authFile", help="file containing the authentication token" + ) + parser.add_argument( + "--dry-run", + "-n", + dest="dryrun", + action="store_true", + default=False, + help="the elasticsearch entrypoint", + ) + args = parser.parse_args() + proxy_string = "" + if args.proxy: + proxy_string = "--socks5-hostname " + args.proxy + + if args.user and args.authFile: + user_string = "--user %s:%s" % (args.user, open(args.authFile).read().strip()) + elif getenv("ES_AUTH"): + user_string = "--user %s" % getenv("ES_AUTH") + + cmd = format( + "curl -s %(proxy_string)s %(server)s/_cat/indices " "%(user_string)s", + proxy_string=proxy_string, + server=args.server, + user_string=user_string, + ) + err, out = run_cmd(cmd) + if err: + print("Error while getting indices") + print(out) + exit(0) + indices = [re.split("[ ]+", l)[2] for l in out.split("\n")] + for c in CONFIG: + pattern, timeout, action = c + m = re.match("([0-9]*)([YmdHM])", timeout) + if not m: + print("Malformed timeout %s" % timeout) + exit(1) + time, unit = m.groups() + timedelta = int(time) * timeunits[unit] + for index in indices: + try: + d = datetime.strptime(index, pattern) + except ValueError as e: + continue + print(index, "matches", pattern) + now = datetime.now() + td = now - d + total_seconds = ( + td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6 + ) / 10**6 + if total_seconds < timedelta: + print(index, "is recent enough. Keeping.") + continue + print(index, "is older than", timeout, ". Deleting") + if not args.dryrun: + cmd = format( + "curl -s -X DELETE %(proxy_string)s %(server)s/%(index)s" " %(user_string)s", + proxy_string=proxy_string, + server=args.server, + index=index, + user_string=user_string, + ) + err, out = run_cmd(cmd) + if err: + print("Error while deleting.") + print(out) + exit(1) + print(index, "deleted.") diff --git a/es-reindex b/es-reindex deleted file mode 100755 index a495d52a80ad..000000000000 --- a/es-reindex +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env python - -# A script which creates a new index using a new mapping and create an alias -# for the old index name. - -from __future__ import print_function -from _py2with3compatibility import run_cmd, Request, HTTPSHandler, build_opener, install_opener -from argparse import ArgumentParser -import json -import base64 -from os import getenv -# Avoid checking for certificate since we execute this only inside CERN. -# Drop the following stanza if not. -import ssl -if hasattr(ssl, '_create_unverified_context'): - ssl._create_default_https_context = ssl._create_unverified_context - -def format(s, **kwds): - return s % kwds - -def writeToES(server, data): - url = format("%(server)s/_bulk", server=server) - handler= HTTPSHandler(debuglevel=0) - opener = build_opener(handler) - install_opener(opener) - - new_request = Request(url, data) - new_request.get_method = lambda : "PUT" - base64string = base64.encodestring(getenv("ES_AUTH")).replace('\n', '') - new_request.add_header("Authorization", "Basic %s" % base64string) - try: - response = opener.open(new_request) - print(response.read()) - except Exception as e: - print(e) - exit(1) - -# - Get the index via scan and scroll. -# - Push items to the new index. -# - Delete old index. -# - Create alias from the old index to the new one. -if __name__ == "__main__": - # First get all the available indices - parser = ArgumentParser() - parser.add_argument("--server", "-s", dest="server", - default="localhost:9200", help="the elasticsearch server") - parser.add_argument("--dry-run", "-n", dest="dryrun", action="store_true", - default=False, help="do not change the DB.") - parser.add_argument("source") - parser.add_argument("dest") - args = parser.parse_args() - proxy_string = "" - - if not getenv("ES_AUTH"): - print("ES_AUTH not specified") - exit(1) - - user_string = "--user %s" % getenv("ES_AUTH") - - query = { - "query": { "match_all": {}}, - "size": 1000 - } - cmd = format("curl -s %(user_string)s '%(server)s/%(source)s/_search?search_type=scan&scroll=1m' -d'%(query)s'", - source=args.source, - query=json.dumps(query), - server=args.server, - user_string=user_string) - print(cmd) - err, out = run_cmd(cmd) - if err: - print("Error while getting indices") - print(out) - exit(0) - result = json.loads(out) - - while True: - cmd = format("curl -s %(user_string)s '%(server)s/_search/scroll?scroll=1m' -d'%(scroll_id)s'", - source=args.source, - query=json.dumps(query), - server=args.server, - user_string=user_string, - scroll_id=result["_scroll_id"]) - err, out = run_cmd(cmd) - if err: - print("Error while getting entries") - print(out) - exit(1) - result = json.loads(out) - if result.get("status", 200) != 200: - print(out) - exit(1) - - # Exit when there are not results - if not len(result["hits"]["hits"]): - break - line = "" - for item in result["hits"]["hits"]: - cmd = format('{ "create": { "_index": "%(index)s", "_type": "%(obj_type)s", "_id": "%(obj_id)s" }}', - index=args.dest, - obj_type=item["_type"], - obj_id=item["_id"] - ) - payload = json.dumps(item["_source"]) - line += cmd+"\n"+payload + "\n" - if len(line) < 500000: - continue - # print json.dumps(json.loads(cmd)) - # print json.dumps(json.loads(payload)) - writeToES(server=args.server, data=line) - line = "" - if line: - writeToES(server=args.server, data=line) diff --git a/es-reindex b/es-reindex new file mode 120000 index 000000000000..b8f3f64f7452 --- /dev/null +++ b/es-reindex @@ -0,0 +1 @@ +es-reindex.py \ No newline at end of file diff --git a/es-reindex.py b/es-reindex.py new file mode 100755 index 000000000000..541835a0418a --- /dev/null +++ b/es-reindex.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python + +# A script which creates a new index using a new mapping and create an alias +# for the old index name. + +from __future__ import print_function +from _py2with3compatibility import run_cmd, Request, HTTPSHandler, build_opener, install_opener +from argparse import ArgumentParser +import json +import base64 +from os import getenv + +# Avoid checking for certificate since we execute this only inside CERN. +# Drop the following stanza if not. +import ssl + +if hasattr(ssl, "_create_unverified_context"): + ssl._create_default_https_context = ssl._create_unverified_context + + +def format(s, **kwds): + return s % kwds + + +def writeToES(server, data): + url = format("%(server)s/_bulk", server=server) + handler = HTTPSHandler(debuglevel=0) + opener = build_opener(handler) + install_opener(opener) + + new_request = Request(url, data) + new_request.get_method = lambda: "PUT" + base64string = base64.encodestring(getenv("ES_AUTH")).replace("\n", "") + new_request.add_header("Authorization", "Basic %s" % base64string) + try: + response = opener.open(new_request) + print(response.read()) + except Exception as e: + print(e) + exit(1) + + +# - Get the index via scan and scroll. +# - Push items to the new index. +# - Delete old index. +# - Create alias from the old index to the new one. +if __name__ == "__main__": + # First get all the available indices + parser = ArgumentParser() + parser.add_argument( + "--server", "-s", dest="server", default="localhost:9200", help="the elasticsearch server" + ) + parser.add_argument( + "--dry-run", + "-n", + dest="dryrun", + action="store_true", + default=False, + help="do not change the DB.", + ) + parser.add_argument("source") + parser.add_argument("dest") + args = parser.parse_args() + proxy_string = "" + + if not getenv("ES_AUTH"): + print("ES_AUTH not specified") + exit(1) + + user_string = "--user %s" % getenv("ES_AUTH") + + query = {"query": {"match_all": {}}, "size": 1000} + cmd = format( + "curl -s %(user_string)s '%(server)s/%(source)s/_search?search_type=scan&scroll=1m' -d'%(query)s'", + source=args.source, + query=json.dumps(query), + server=args.server, + user_string=user_string, + ) + print(cmd) + err, out = run_cmd(cmd) + if err: + print("Error while getting indices") + print(out) + exit(0) + result = json.loads(out) + + while True: + cmd = format( + "curl -s %(user_string)s '%(server)s/_search/scroll?scroll=1m' -d'%(scroll_id)s'", + source=args.source, + query=json.dumps(query), + server=args.server, + user_string=user_string, + scroll_id=result["_scroll_id"], + ) + err, out = run_cmd(cmd) + if err: + print("Error while getting entries") + print(out) + exit(1) + result = json.loads(out) + if result.get("status", 200) != 200: + print(out) + exit(1) + + # Exit when there are not results + if not len(result["hits"]["hits"]): + break + line = "" + for item in result["hits"]["hits"]: + cmd = format( + '{ "create": { "_index": "%(index)s", "_type": "%(obj_type)s", "_id": "%(obj_id)s" }}', + index=args.dest, + obj_type=item["_type"], + obj_id=item["_id"], + ) + payload = json.dumps(item["_source"]) + line += cmd + "\n" + payload + "\n" + if len(line) < 500000: + continue + # print json.dumps(json.loads(cmd)) + # print json.dumps(json.loads(payload)) + writeToES(server=args.server, data=line) + line = "" + if line: + writeToES(server=args.server, data=line) diff --git a/es/es_close_indexes.py b/es/es_close_indexes.py index e7339fe2b527..5db283d6cbfd 100755 --- a/es/es_close_indexes.py +++ b/es/es_close_indexes.py @@ -4,58 +4,71 @@ import sys, re # TODO are these script used? -cmsbot_dir=None -if __file__: cmsbot_dir=dirname(dirname(abspath(__file__))) -else: cmsbot_dir=dirname(dirname(abspath(sys.argv[0]))) -sys.path.insert(0,cmsbot_dir) +cmsbot_dir = None +if __file__: + cmsbot_dir = dirname(dirname(abspath(__file__))) +else: + cmsbot_dir = dirname(dirname(abspath(sys.argv[0]))) +sys.path.insert(0, cmsbot_dir) from es_utils import get_indexes, close_index, find_indexes, open_index from time import time -try: weeks=int(sys.argv[1]) -except: weeks=20 -ignore_index=[] + +try: + weeks = int(sys.argv[1]) +except: + weeks = 20 +ignore_index = [] for idx in sys.argv[2:]: - ignore_index.append(idx) -cur_week=int(((time()/86400)+4)/7) -idxs=[] -odxs=[] + ignore_index.append(idx) +cur_week = int(((time() / 86400) + 4) / 7) +idxs = [] +odxs = [] try: - if sys.argv[2]: - for ix in sys.argv[2:]: - ixs = find_indexes(ix) - if not "open" in ixs: continue - for i in ixs["open"]: idxs.append(i) + if sys.argv[2]: + for ix in sys.argv[2:]: + ixs = find_indexes(ix) + if not "open" in ixs: + continue + for i in ixs["open"]: + idxs.append(i) except: - types = {"close":{}, "open":{}} - rest = {"close":[], "open":[]} - ixs = find_indexes('cmssdt-*') - for k in ixs: - for idx in ixs[k]: - m = re.match("^(.+)[_-]([\d]+)$",idx) - if m: - ix = m.group(1) - wk = m.group(2) - if not k in types: types[k]={} - if not ix in types[k]:types[k][ix]=[] - types[k][ix].append(wk) - if ix in ignore_index: continue - if (k == "open") and ((cur_week-int(wk))>weeks): idxs.append(idx) - if (k == "close") and ((cur_week-int(wk))<=weeks): odxs.append(idx) - else: - if not k in rest: rest[k]=[] - rest[k].append(idx) - for k in rest: - print("REST:", k, ":", sorted(rest[k])) - for k in types: - for ix in sorted(types[k].keys()): - print("WEEK:",k,":",ix,sorted(types[k][ix])) + types = {"close": {}, "open": {}} + rest = {"close": [], "open": []} + ixs = find_indexes("cmssdt-*") + for k in ixs: + for idx in ixs[k]: + m = re.match("^(.+)[_-]([\d]+)$", idx) + if m: + ix = m.group(1) + wk = m.group(2) + if not k in types: + types[k] = {} + if not ix in types[k]: + types[k][ix] = [] + types[k][ix].append(wk) + if ix in ignore_index: + continue + if (k == "open") and ((cur_week - int(wk)) > weeks): + idxs.append(idx) + if (k == "close") and ((cur_week - int(wk)) <= weeks): + odxs.append(idx) + else: + if not k in rest: + rest[k] = [] + rest[k].append(idx) + for k in rest: + print("REST:", k, ":", sorted(rest[k])) + for k in types: + for ix in sorted(types[k].keys()): + print("WEEK:", k, ":", ix, sorted(types[k][ix])) for idx in sorted(idxs): - print("Closing ",idx) - close_index(idx) - print(" ",get_indexes(idx).strip()) + print("Closing ", idx) + close_index(idx) + print(" ", get_indexes(idx).strip()) for idx in sorted(odxs): - print("Opening ",idx) - open_index(idx) - print(" ",get_indexes(idx).strip()) + print("Opening ", idx) + open_index(idx) + print(" ", get_indexes(idx).strip()) diff --git a/es/es_delete_indexes.py b/es/es_delete_indexes.py index 3811ba0747b8..f9d41e13271c 100755 --- a/es/es_delete_indexes.py +++ b/es/es_delete_indexes.py @@ -3,16 +3,19 @@ from os.path import dirname, abspath import sys -cmsbot_dir=None -if __file__: cmsbot_dir=dirname(dirname(abspath(__file__))) -else: cmsbot_dir=dirname(dirname(abspath(sys.argv[0]))) -sys.path.insert(0,cmsbot_dir) +cmsbot_dir = None +if __file__: + cmsbot_dir = dirname(dirname(abspath(__file__))) +else: + cmsbot_dir = dirname(dirname(abspath(sys.argv[0]))) +sys.path.insert(0, cmsbot_dir) from es_utils import delete_index, find_indexes for i in sys.argv[1:]: - idxs = find_indexes(i) - if not 'close' in idxs: continue - for ix in sorted(idxs['close']): - print("Deleting ", ix) - delete_index(ix) + idxs = find_indexes(i) + if not "close" in idxs: + continue + for ix in sorted(idxs["close"]): + print("Deleting ", ix) + delete_index(ix) diff --git a/es/es_get_templates.py b/es/es_get_templates.py index 30ab5d5aa046..d4ab9d1deca2 100755 --- a/es/es_get_templates.py +++ b/es/es_get_templates.py @@ -3,27 +3,30 @@ from os.path import dirname, abspath, join import sys -cmsbot_dir=None -if __file__: cmsbot_dir=dirname(dirname(abspath(__file__))) -else: cmsbot_dir=dirname(dirname(abspath(sys.argv[0]))) -sys.path.insert(0,cmsbot_dir) +cmsbot_dir = None +if __file__: + cmsbot_dir = dirname(dirname(abspath(__file__))) +else: + cmsbot_dir = dirname(dirname(abspath(sys.argv[0]))) +sys.path.insert(0, cmsbot_dir) from _py2with3compatibility import run_cmd import json from es_utils import get_template tmpl = json.loads(get_template()) -if 'proxy-error' in tmpl: - print("Error: ", tmpl['proxy-error']) - sys.exit(1) +if "proxy-error" in tmpl: + print("Error: ", tmpl["proxy-error"]) + sys.exit(1) -tmpl_dir="%s/es/templates" % cmsbot_dir +tmpl_dir = "%s/es/templates" % cmsbot_dir run_cmd("mkdir -p %s" % tmpl_dir) for t in tmpl: - if not t.startswith("cmssdt-"): continue - tfile = join(tmpl_dir,t+".json") - print("Saving: ", tfile) - ref = open(tfile,"w") - if ref: - json.dump(tmpl[t],ref,indent=2, sort_keys=True, separators=(',',': ')) - ref.close() + if not t.startswith("cmssdt-"): + continue + tfile = join(tmpl_dir, t + ".json") + print("Saving: ", tfile) + ref = open(tfile, "w") + if ref: + json.dump(tmpl[t], ref, indent=2, sort_keys=True, separators=(",", ": ")) + ref.close() diff --git a/es/es_git_repo_size.py b/es/es_git_repo_size.py index ba792327f1e5..21b27cd989b6 100755 --- a/es/es_git_repo_size.py +++ b/es/es_git_repo_size.py @@ -4,35 +4,37 @@ from time import time from os.path import dirname, abspath -if __file__: cmsbot_dir=dirname(dirname(abspath(__file__))) -else: cmsbot_dir=dirname(dirname(abspath(sys.argv[0]))) -sys.path.insert(0,cmsbot_dir) + +if __file__: + cmsbot_dir = dirname(dirname(abspath(__file__))) +else: + cmsbot_dir = dirname(dirname(abspath(sys.argv[0]))) +sys.path.insert(0, cmsbot_dir) from es_utils import send_payload from _py2with3compatibility import run_cmd repo = sys.argv[1] -e , o = run_cmd("git clone --bare https://github.com/%s.git repo" % repo) +e, o = run_cmd("git clone --bare https://github.com/%s.git repo" % repo) if e: - print(o) - sys.exit(1) + print(o) + sys.exit(1) -e , size = run_cmd("du -k -s -c repo/objects/pack/ | grep total | awk '{print $1}'") +e, size = run_cmd("du -k -s -c repo/objects/pack/ | grep total | awk '{print $1}'") if e: - print(size) - sys.exit(1) + print(size) + sys.exit(1) -e , o = run_cmd("ls -d repo/objects/pack/pack-*.pack") +e, o = run_cmd("ls -d repo/objects/pack/pack-*.pack") if e: - print(o) - sys.exit(1) + print(o) + sys.exit(1) rid = o.split("/")[-1][5:-5] payload = {} payload["repository"] = repo payload["size"] = int(size) -payload["@timestamp"] = int(time()*1000) +payload["@timestamp"] = int(time() * 1000) index = "git-repository-size" document = "stats" -send_payload(index,document,rid,json.dumps(payload)) - +send_payload(index, document, rid, json.dumps(payload)) diff --git a/es/es_open_indexes.py b/es/es_open_indexes.py index 73993d942105..73863480ddf6 100755 --- a/es/es_open_indexes.py +++ b/es/es_open_indexes.py @@ -3,20 +3,23 @@ import sys from os.path import dirname, abspath -cmsbot_dir=None -if __file__: cmsbot_dir=dirname(dirname(abspath(__file__))) -else: cmsbot_dir=dirname(dirname(abspath(sys.argv[0]))) -sys.path.insert(0,cmsbot_dir) +cmsbot_dir = None +if __file__: + cmsbot_dir = dirname(dirname(abspath(__file__))) +else: + cmsbot_dir = dirname(dirname(abspath(sys.argv[0]))) +sys.path.insert(0, cmsbot_dir) from es_utils import get_indexes, open_index, find_indexes from time import time -cur_week=int(((time()/86400)+4)/7) -for i in sys.argv[1:]: - idxs = find_indexes(i) - if not 'close' in idxs: continue - for ix in sorted(idxs['close']): - print("Opening ", ix) - open_index(ix) - print(get_indexes(ix)) +cur_week = int(((time() / 86400) + 4) / 7) +for i in sys.argv[1:]: + idxs = find_indexes(i) + if not "close" in idxs: + continue + for ix in sorted(idxs["close"]): + print("Opening ", ix) + open_index(ix) + print(get_indexes(ix)) diff --git a/es/es_send_templates.py b/es/es_send_templates.py index 90206e917619..75ff5d25b8d7 100755 --- a/es/es_send_templates.py +++ b/es/es_send_templates.py @@ -4,18 +4,20 @@ import sys from os.path import dirname, abspath, join, exists -cmsbot_dir=None -if __file__: cmsbot_dir=dirname(dirname(abspath(__file__))) -else: cmsbot_dir=dirname(dirname(abspath(sys.argv[0]))) -sys.path.insert(0,cmsbot_dir) +cmsbot_dir = None +if __file__: + cmsbot_dir = dirname(dirname(abspath(__file__))) +else: + cmsbot_dir = dirname(dirname(abspath(sys.argv[0]))) +sys.path.insert(0, cmsbot_dir) from es_utils import send_template for tmpl in sys.argv[1:]: - tmplfile = join(cmsbot_dir,'es', 'templates',tmpl+'.json') - if not exists (tmplfile): - print("ERROR: No such file: ", tmplfile) - sys.exit(1) - payload = json.load(open(tmplfile)) - if not send_template(tmpl, payload=json.dumps(payload)): sys.exit(1) - + tmplfile = join(cmsbot_dir, "es", "templates", tmpl + ".json") + if not exists(tmplfile): + print("ERROR: No such file: ", tmplfile) + sys.exit(1) + payload = json.load(open(tmplfile)) + if not send_template(tmpl, payload=json.dumps(payload)): + sys.exit(1) diff --git a/es/es_show_indexes.py b/es/es_show_indexes.py index 7c03182eb07f..6302e74a4d28 100755 --- a/es/es_show_indexes.py +++ b/es/es_show_indexes.py @@ -3,18 +3,20 @@ import sys from os.path import dirname, abspath -cmsbot_dir=None -if __file__: cmsbot_dir=dirname(dirname(abspath(__file__))) -else: cmsbot_dir=dirname(dirname(abspath(sys.argv[0]))) -sys.path.insert(0,cmsbot_dir) +cmsbot_dir = None +if __file__: + cmsbot_dir = dirname(dirname(abspath(__file__))) +else: + cmsbot_dir = dirname(dirname(abspath(sys.argv[0]))) +sys.path.insert(0, cmsbot_dir) from es_utils import get_indexes, find_indexes from time import time -cur_week=int(((time()/86400)+4)/7) -for i in sys.argv[1:]: - idxs = find_indexes(i) - for k in idxs: - for ix in sorted(idxs[k]): - print(get_indexes(ix)) +cur_week = int(((time() / 86400) + 4) / 7) +for i in sys.argv[1:]: + idxs = find_indexes(i) + for k in idxs: + for ix in sorted(idxs[k]): + print(get_indexes(ix)) diff --git a/es_cmsdoxygen_apache.py b/es_cmsdoxygen_apache.py index bbe706990f28..e02dd1ad0c08 100755 --- a/es_cmsdoxygen_apache.py +++ b/es_cmsdoxygen_apache.py @@ -8,43 +8,49 @@ from json import dumps from logwatch import logwatch, run_cmd, LOGWATCH_APACHE_IGNORE_AGENTS -def process (line, count): - for agent in LOGWATCH_APACHE_IGNORE_AGENTS: - if agent in line: return True - payload = {} - items = line.split(" ") - if len(items)<10: return True - if not (items[3][0]=='[' and items[4][-1]==']'): return True - payload["ip"]=items[0] - payload["ident"]=items[1] - payload["auth"]=items[2] - payload["verb"]=items[5][1:] - payload["request"]=items[6] - payload["httpversion"]=items[7][:-1] - payload["response"]=items[8] - try: - payload["bytes"]=int(items[9]) - except: - payload["bytes"]=0 - tsec = mktime(datetime.strptime(items[3][1:],'%d/%b/%Y:%H:%M:%S').timetuple()) - week = str(int(tsec/(86400*7))) - payload["@timestamp"]=int(tsec*1000) - if len(items)>10: payload["referrer"]=items[10][1:-1] - if len(items)>11 and re.match('^"[0-9]+(\.[0-9]+)+"$', items[11]): - payload["ip"]=items[11][1:-1] - if len(items)>12: - agent = " ".join(items[12:]).replace('"','') - payload["agent"] = agent - payload["agent_type"]=agent.replace(" ","-").split("/",1)[0].upper() - id = sha1(line.encode()).hexdigest() - if (count%1000)==0: - print("Processed entries",count) - return send_payload("apache-cmsdoxygen-"+week,"access_log", id, dumps(payload)) -count=run_cmd("pgrep -l -x -f '^python3 .*/es_cmsdoxygen_apache.py$' | wc -l",False) -if int(count)>1: exit(0) -logs = run_cmd("ls -rt /var/log/httpd/sdt-access_log* | grep -v '[.]gz$'").split("\n") -log = logwatch("httpd",log_dir="/data/es") -s,c=log.process(logs, process) -print("Total entries processed",c) +def process(line, count): + for agent in LOGWATCH_APACHE_IGNORE_AGENTS: + if agent in line: + return True + payload = {} + items = line.split(" ") + if len(items) < 10: + return True + if not (items[3][0] == "[" and items[4][-1] == "]"): + return True + payload["ip"] = items[0] + payload["ident"] = items[1] + payload["auth"] = items[2] + payload["verb"] = items[5][1:] + payload["request"] = items[6] + payload["httpversion"] = items[7][:-1] + payload["response"] = items[8] + try: + payload["bytes"] = int(items[9]) + except: + payload["bytes"] = 0 + tsec = mktime(datetime.strptime(items[3][1:], "%d/%b/%Y:%H:%M:%S").timetuple()) + week = str(int(tsec / (86400 * 7))) + payload["@timestamp"] = int(tsec * 1000) + if len(items) > 10: + payload["referrer"] = items[10][1:-1] + if len(items) > 11 and re.match('^"[0-9]+(\.[0-9]+)+"$', items[11]): + payload["ip"] = items[11][1:-1] + if len(items) > 12: + agent = " ".join(items[12:]).replace('"', "") + payload["agent"] = agent + payload["agent_type"] = agent.replace(" ", "-").split("/", 1)[0].upper() + id = sha1(line.encode()).hexdigest() + if (count % 1000) == 0: + print("Processed entries", count) + return send_payload("apache-cmsdoxygen-" + week, "access_log", id, dumps(payload)) + +count = run_cmd("pgrep -l -x -f '^python3 .*/es_cmsdoxygen_apache.py$' | wc -l", False) +if int(count) > 1: + exit(0) +logs = run_cmd("ls -rt /var/log/httpd/sdt-access_log* | grep -v '[.]gz$'").split("\n") +log = logwatch("httpd", log_dir="/data/es") +s, c = log.process(logs, process) +print("Total entries processed", c) diff --git a/es_cmsrep_apache.py b/es_cmsrep_apache.py index 248aa6721baa..a3fa718d9879 100755 --- a/es_cmsrep_apache.py +++ b/es_cmsrep_apache.py @@ -7,63 +7,86 @@ from json import dumps from logwatch import logwatch, run_cmd, LOGWATCH_APACHE_IGNORE_AGENTS -def process (line, count): - for agent in LOGWATCH_APACHE_IGNORE_AGENTS: - if agent in line: return True - payload = {} - items = line.split(" ") - if len(items)<12: return True - if not (items[3][0]=='[' and items[4][-1]==']'): return True - payload["ip"]=items[0] - payload["ident"]=items[1] - payload["auth"]=items[2] - payload["verb"]=items[5][1:] - payload["request"]=items[6] - payload["httpversion"]=items[7][:-1] - payload["response"]=items[8] - try: - payload["bytes"]=int(items[9]) - except: - payload["bytes"]=0 - payload["referrer"]=items[10][1:-1] - agent = " ".join(items[11:]).replace('"','') - if "CMSPKG-v" in agent: agent = agent.replace("-v","/") - payload["agent"]=agent - payload["agent_type"]=agent.replace(" ","-").split("/",1)[0].upper() - tsec = mktime(datetime.strptime(items[3][1:],'%d/%b/%Y:%H:%M:%S').timetuple()) - week = str(int(tsec/(86400*7))) - payload["@timestamp"]=int(tsec*1000) - id = sha1(line.encode()).hexdigest() - if (count%1000)==0: print("Processed entries",count) - if not send_payload("apache-cmsrep-"+week,"access_log", id, dumps(payload)): - return False - if payload["verb"] != "GET": return True - items = payload["request"].replace("/cms/cpt/Software/download/","/cmssw/",1).split("/") - if len(items)<6: return True - if items[3] == "apt": items[3]="PRMS" - if items[3] != "RPMS": return True - pkg, cmspkg, arch, repo, dev = items[-1], "apt", "" , "", 0 - if "?" in pkg: - pkg, pkgopts = pkg.split("?",1) - if "version=" in pkgopts: cmspkg = pkgopts.split("version=",1)[1].split("&",1)[0] - if not pkg.endswith(".rpm"): return True - if (items[1] == "cgi-bin") and items[2].startswith("cmspkg"): - if len(items)<8: return True - if items[2].endswith('-dev'): dev=1 - repo, arch = items[4], items[5] - elif items[1] == "cmssw": - repo, arch = items[2], items[4] - else: - return True - from _py2with3compatibility import unquote - xpayload = {'dev' : dev, 'repository' : unquote(repo), 'architecture' : unquote(arch), 'package' : unquote(pkg).split("-1-",1)[0], 'cmspkg' : unquote(cmspkg)} - for x in ["@timestamp","ip"]: xpayload[x] = payload[x] - return send_payload("cmspkg-access-"+week,"rpm-packages", id, dumps(xpayload)) -count=run_cmd("pgrep -l -x -f '^python3 .*/es_cmsrep_apache.py$' | wc -l",False) -if int(count)>1: exit(0) -logs = run_cmd("ls -rt /var/log/httpd/cmsrep-non-ssl_access.log* | grep -v '[.]gz$'").split("\n") -log = logwatch("httpd",log_dir="/data/es") -s,c=log.process(logs, process) -print("Total entries processed",c) +def process(line, count): + for agent in LOGWATCH_APACHE_IGNORE_AGENTS: + if agent in line: + return True + payload = {} + items = line.split(" ") + if len(items) < 12: + return True + if not (items[3][0] == "[" and items[4][-1] == "]"): + return True + payload["ip"] = items[0] + payload["ident"] = items[1] + payload["auth"] = items[2] + payload["verb"] = items[5][1:] + payload["request"] = items[6] + payload["httpversion"] = items[7][:-1] + payload["response"] = items[8] + try: + payload["bytes"] = int(items[9]) + except: + payload["bytes"] = 0 + payload["referrer"] = items[10][1:-1] + agent = " ".join(items[11:]).replace('"', "") + if "CMSPKG-v" in agent: + agent = agent.replace("-v", "/") + payload["agent"] = agent + payload["agent_type"] = agent.replace(" ", "-").split("/", 1)[0].upper() + tsec = mktime(datetime.strptime(items[3][1:], "%d/%b/%Y:%H:%M:%S").timetuple()) + week = str(int(tsec / (86400 * 7))) + payload["@timestamp"] = int(tsec * 1000) + id = sha1(line.encode()).hexdigest() + if (count % 1000) == 0: + print("Processed entries", count) + if not send_payload("apache-cmsrep-" + week, "access_log", id, dumps(payload)): + return False + if payload["verb"] != "GET": + return True + items = payload["request"].replace("/cms/cpt/Software/download/", "/cmssw/", 1).split("/") + if len(items) < 6: + return True + if items[3] == "apt": + items[3] = "PRMS" + if items[3] != "RPMS": + return True + pkg, cmspkg, arch, repo, dev = items[-1], "apt", "", "", 0 + if "?" in pkg: + pkg, pkgopts = pkg.split("?", 1) + if "version=" in pkgopts: + cmspkg = pkgopts.split("version=", 1)[1].split("&", 1)[0] + if not pkg.endswith(".rpm"): + return True + if (items[1] == "cgi-bin") and items[2].startswith("cmspkg"): + if len(items) < 8: + return True + if items[2].endswith("-dev"): + dev = 1 + repo, arch = items[4], items[5] + elif items[1] == "cmssw": + repo, arch = items[2], items[4] + else: + return True + from _py2with3compatibility import unquote + + xpayload = { + "dev": dev, + "repository": unquote(repo), + "architecture": unquote(arch), + "package": unquote(pkg).split("-1-", 1)[0], + "cmspkg": unquote(cmspkg), + } + for x in ["@timestamp", "ip"]: + xpayload[x] = payload[x] + return send_payload("cmspkg-access-" + week, "rpm-packages", id, dumps(xpayload)) + +count = run_cmd("pgrep -l -x -f '^python3 .*/es_cmsrep_apache.py$' | wc -l", False) +if int(count) > 1: + exit(0) +logs = run_cmd("ls -rt /var/log/httpd/cmsrep-non-ssl_access.log* | grep -v '[.]gz$'").split("\n") +log = logwatch("httpd", log_dir="/data/es") +s, c = log.process(logs, process) +print("Total entries processed", c) diff --git a/es_cmssdt_apache.py b/es_cmssdt_apache.py index 18ba52e2b0f3..db6559f54bcb 100755 --- a/es_cmssdt_apache.py +++ b/es_cmssdt_apache.py @@ -8,48 +8,56 @@ from json import dumps from logwatch import logwatch, run_cmd, LOGWATCH_APACHE_IGNORE_AGENTS -def process (line, count): - for agent in LOGWATCH_APACHE_IGNORE_AGENTS: - if agent in line: return True - payload = {} - items = line.split(" ") - if len(items)<10: return True - if not (items[3][0]=='[' and items[4][-1]==']'): return True - payload["ip"]=items[0] - payload["ident"]=items[1] - payload["auth"]=items[2] - payload["verb"]=items[5][1:] - payload["request"]=items[6] - payload["httpversion"]=items[7][:-1] - payload["response"]=items[8] - try: - payload["bytes"]=int(items[9]) - except: - payload["bytes"]=0 - tsec = mktime(datetime.strptime(items[3][1:],'%d/%b/%Y:%H:%M:%S').timetuple()) - week = str(int(tsec/(86400*7))) - payload["@timestamp"]=int(tsec*1000) - if len(items)>10: payload["referrer"]=items[10][1:-1] - if len(items)>11 and re.match('^"[0-9]+(\.[0-9]+)+"$', items[11]): - payload["ip"]=items[11][1:-1] - if len(items)>12: - agent = " ".join(items[12:]).replace('"','') - payload["agent"] = agent - payload["agent_type"]=agent.replace(" ","-").split("/",1)[0].upper() - id = sha1(line.encode()).hexdigest() - if (count%1000)==0: print("Processed entries",count) - if not send_payload("apache-cmssdt-"+week,"access_log", id, dumps(payload)): - return False - if payload["request"].startswith("/SDT/releases.map?release="): - xpayload = dict(item.split("=") for item in payload["request"].split("?",1)[1].split("&")) - for x in ["@timestamp","ip"]: xpayload[x] = payload[x] - return send_payload("scram-access-"+week,"cmssw-releases", id, dumps(xpayload)) - return True -count=run_cmd("pgrep -l -x -f '^python3 .*/es_cmssdt_apache.py$' | wc -l",False) -if int(count)>1: exit(0) -logs = run_cmd("ls -rt /var/log/httpd/sdt-access_log* | grep -v '[.]gz$'").split("\n") -log = logwatch("httpd",log_dir="/data/es") -s,c=log.process(logs, process) -print("Total entries processed",c) +def process(line, count): + for agent in LOGWATCH_APACHE_IGNORE_AGENTS: + if agent in line: + return True + payload = {} + items = line.split(" ") + if len(items) < 10: + return True + if not (items[3][0] == "[" and items[4][-1] == "]"): + return True + payload["ip"] = items[0] + payload["ident"] = items[1] + payload["auth"] = items[2] + payload["verb"] = items[5][1:] + payload["request"] = items[6] + payload["httpversion"] = items[7][:-1] + payload["response"] = items[8] + try: + payload["bytes"] = int(items[9]) + except: + payload["bytes"] = 0 + tsec = mktime(datetime.strptime(items[3][1:], "%d/%b/%Y:%H:%M:%S").timetuple()) + week = str(int(tsec / (86400 * 7))) + payload["@timestamp"] = int(tsec * 1000) + if len(items) > 10: + payload["referrer"] = items[10][1:-1] + if len(items) > 11 and re.match('^"[0-9]+(\.[0-9]+)+"$', items[11]): + payload["ip"] = items[11][1:-1] + if len(items) > 12: + agent = " ".join(items[12:]).replace('"', "") + payload["agent"] = agent + payload["agent_type"] = agent.replace(" ", "-").split("/", 1)[0].upper() + id = sha1(line.encode()).hexdigest() + if (count % 1000) == 0: + print("Processed entries", count) + if not send_payload("apache-cmssdt-" + week, "access_log", id, dumps(payload)): + return False + if payload["request"].startswith("/SDT/releases.map?release="): + xpayload = dict(item.split("=") for item in payload["request"].split("?", 1)[1].split("&")) + for x in ["@timestamp", "ip"]: + xpayload[x] = payload[x] + return send_payload("scram-access-" + week, "cmssw-releases", id, dumps(xpayload)) + return True + +count = run_cmd("pgrep -l -x -f '^python3 .*/es_cmssdt_apache.py$' | wc -l", False) +if int(count) > 1: + exit(0) +logs = run_cmd("ls -rt /var/log/httpd/sdt-access_log* | grep -v '[.]gz$'").split("\n") +log = logwatch("httpd", log_dir="/data/es") +s, c = log.process(logs, process) +print("Total entries processed", c) diff --git a/es_doxygen_apache.py b/es_doxygen_apache.py index 257cc2f01ab4..fe761f7444de 100755 --- a/es_doxygen_apache.py +++ b/es_doxygen_apache.py @@ -8,44 +8,51 @@ from json import dumps from logwatch import logwatch, run_cmd, LOGWATCH_APACHE_IGNORE_AGENTS -def process (line, count): - for agent in LOGWATCH_APACHE_IGNORE_AGENTS: - if agent in line: return True - payload = {} - items = line.split(" ") - if len(items)<10: return True - if not (items[3][0]=='[' and items[4][-1]==']'): return True - payload["ip"]=items[0] - payload["ident"]=items[1] - payload["auth"]=items[2] - payload["verb"]=items[5][1:] - payload["request"]=items[6] - payload["httpversion"]=items[7][:-1] - payload["response"]=items[8] - try: - payload["bytes"]=int(items[9]) - except: - payload["bytes"]=0 - tsec = mktime(datetime.strptime(items[3][1:],'%d/%b/%Y:%H:%M:%S').timetuple()) - week = str(int(tsec/(86400*7))) - payload["@timestamp"]=int(tsec*1000) - if len(items)>10: payload["referrer"]=items[10][1:-1] - if len(items)>11 and re.match('^"[0-9]+(\.[0-9]+)+"$', items[11]): - payload["ip"]=items[11][1:-1] - if len(items)>12: - agent = " ".join(items[12:]).replace('"','') - payload["agent"] = agent - payload["agent_type"]=agent.replace(" ","-").split("/",1)[0].upper() - id = sha1(line.encode()).hexdigest() - if (count%1000)==0: print("Processed entries",count) - if not send_payload("apache-doxygen-"+week,"access_log", id, dumps(payload)): - return False - return True -count=run_cmd("pgrep -l -x -f '^python3 .*/es_doxygen_apache.py$' | wc -l",False) -if int(count)>1: exit(0) -logs = run_cmd("ls -rt /var/log/httpd/access_log* | grep -v '[.]gz$'").split("\n") -log = logwatch("httpd",log_dir="/data/es") -s,c=log.process(logs, process) -print("Total entries processed",c) +def process(line, count): + for agent in LOGWATCH_APACHE_IGNORE_AGENTS: + if agent in line: + return True + payload = {} + items = line.split(" ") + if len(items) < 10: + return True + if not (items[3][0] == "[" and items[4][-1] == "]"): + return True + payload["ip"] = items[0] + payload["ident"] = items[1] + payload["auth"] = items[2] + payload["verb"] = items[5][1:] + payload["request"] = items[6] + payload["httpversion"] = items[7][:-1] + payload["response"] = items[8] + try: + payload["bytes"] = int(items[9]) + except: + payload["bytes"] = 0 + tsec = mktime(datetime.strptime(items[3][1:], "%d/%b/%Y:%H:%M:%S").timetuple()) + week = str(int(tsec / (86400 * 7))) + payload["@timestamp"] = int(tsec * 1000) + if len(items) > 10: + payload["referrer"] = items[10][1:-1] + if len(items) > 11 and re.match('^"[0-9]+(\.[0-9]+)+"$', items[11]): + payload["ip"] = items[11][1:-1] + if len(items) > 12: + agent = " ".join(items[12:]).replace('"', "") + payload["agent"] = agent + payload["agent_type"] = agent.replace(" ", "-").split("/", 1)[0].upper() + id = sha1(line.encode()).hexdigest() + if (count % 1000) == 0: + print("Processed entries", count) + if not send_payload("apache-doxygen-" + week, "access_log", id, dumps(payload)): + return False + return True + +count = run_cmd("pgrep -l -x -f '^python3 .*/es_doxygen_apache.py$' | wc -l", False) +if int(count) > 1: + exit(0) +logs = run_cmd("ls -rt /var/log/httpd/access_log* | grep -v '[.]gz$'").split("\n") +log = logwatch("httpd", log_dir="/data/es") +s, c = log.process(logs, process) +print("Total entries processed", c) diff --git a/es_externals_stats.py b/es_externals_stats.py index 1ba477b84a02..59366d881480 100644 --- a/es_externals_stats.py +++ b/es_externals_stats.py @@ -3,7 +3,6 @@ from es_utils import es_send_external_stats if __name__ == "__main__": - stats_json_f = argv[1] opts_json_f = argv[2] es_send_external_stats(stats_json_f, opts_json_f, 1) diff --git a/es_hypernews.py b/es_hypernews.py index 1f00588369a2..a15b082dcf56 100755 --- a/es_hypernews.py +++ b/es_hypernews.py @@ -1,46 +1,50 @@ #!/usr/bin/env python3 import sys, os, re -from datetime import datetime,timedelta +from datetime import datetime, timedelta from _py2with3compatibility import run_cmd from es_utils import send_payload from hashlib import sha1 from json import dumps from time import time -apache_log_dir="/var/log/httpd" +apache_log_dir = "/var/log/httpd" ssl_error_log = "ssl_error_log" -search_for=" Timeout waiting for output from CGI script " +search_for = " Timeout waiting for output from CGI script " filter_search = "" process_all = False -files_to_process=[] -cmd_to_get_logs = "ls -rt "+os.path.join(apache_log_dir,ssl_error_log)+"*" -if len(sys.argv)==1: - process_all = True - cmd_to_get_logs = cmd_to_get_logs + " | tail -2" - prev_hour = datetime.now()-timedelta(hours=1) - filter_search = " | grep '"+prev_hour.strftime("^\[%a %b %d %H:[0-5][0-9]:[0-5][0-9] %Y\] ")+"'" +files_to_process = [] +cmd_to_get_logs = "ls -rt " + os.path.join(apache_log_dir, ssl_error_log) + "*" +if len(sys.argv) == 1: + process_all = True + cmd_to_get_logs = cmd_to_get_logs + " | tail -2" + prev_hour = datetime.now() - timedelta(hours=1) + filter_search = ( + " | grep '" + prev_hour.strftime("^\[%a %b %d %H:[0-5][0-9]:[0-5][0-9] %Y\] ") + "'" + ) err, out = run_cmd(cmd_to_get_logs) if err: - print(out) - sys.exit(1) -ReTime = re.compile('^\[[A-Za-z]{3} ([A-Za-z]{3} [0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} [0-9]{4})\] \[[^\]]+\] \[client (.+)\]\s(.+)') + print(out) + sys.exit(1) +ReTime = re.compile( + "^\[[A-Za-z]{3} ([A-Za-z]{3} [0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} [0-9]{4})\] \[[^\]]+\] \[client (.+)\]\s(.+)" +) for log in out.split("\n"): - find_cmd = "grep '%s' %s %s" % (search_for, log, filter_search) - err, out = run_cmd(find_cmd) - for line in out.split("\n"): - m = ReTime.match(line) - if m: - tsec = int(datetime.strptime(m.group(1), "%b %d %H:%M:%S %Y").strftime('%s')) - week = str(int(tsec/(86400*7))) - timestamp = tsec*1000 - payload = {} - payload['@timestamp'] = timestamp - payload['ip'] = m.group(2) - payload['message'] = line - id = sha1((str(timestamp) + m.group(2)).encode()).hexdigest() - send_payload("hypernews-"+week,"hn-timeouts",id, dumps(payload)) + find_cmd = "grep '%s' %s %s" % (search_for, log, filter_search) + err, out = run_cmd(find_cmd) + for line in out.split("\n"): + m = ReTime.match(line) + if m: + tsec = int(datetime.strptime(m.group(1), "%b %d %H:%M:%S %Y").strftime("%s")) + week = str(int(tsec / (86400 * 7))) + timestamp = tsec * 1000 + payload = {} + payload["@timestamp"] = timestamp + payload["ip"] = m.group(2) + payload["message"] = line + id = sha1((str(timestamp) + m.group(2)).encode()).hexdigest() + send_payload("hypernews-" + week, "hn-timeouts", id, dumps(payload)) payload = {} -payload['@timestamp'] = int(time()*1000) -send_payload("hypernews","hn-heartbeat",str(payload['@timestamp']), dumps(payload)) +payload["@timestamp"] = int(time() * 1000) +send_payload("hypernews", "hn-heartbeat", str(payload["@timestamp"]), dumps(payload)) diff --git a/es_hypernews_log.py b/es_hypernews_log.py index 8bbf06b1625f..87fba8aae15d 100755 --- a/es_hypernews_log.py +++ b/es_hypernews_log.py @@ -6,61 +6,73 @@ def cust_strip(str_in): - return str_in.lstrip('/').rstrip(':"') + return str_in.lstrip("/").rstrip(':"') def prs_tprl(str_in): - str_out = list(map(str.strip,str_in.split(':'))) - val = int(str_out[0].split('/')[0]) - key = str_out[1].split(' ')[0] - return key,val + str_out = list(map(str.strip, str_in.split(":"))) + val = int(str_out[0].split("/")[0]) + key = str_out[1].split(" ")[0] + return key, val def rm_extra(str_in): - str_out = str_in.split(' ') - return str_out[-4].lstrip('/').rstrip('"'),int(str_out[-2]) + str_out = str_in.split(" ") + return str_out[-4].lstrip("/").rstrip('"'), int(str_out[-2]) def map_int_val(pair): - key , val = pair - return key , int(val) + key, val = pair + return key, int(val) -#get relevant info -match_hn = re.compile('.*\|\/.*emails') -match_tmp = re.compile('.*\|\/.*Time\(s\)') +# get relevant info +match_hn = re.compile(".*\|\/.*emails") +match_tmp = re.compile(".*\|\/.*Time\(s\)") temp_fails = [] egrps = [] payload = {} -err , cmd_out = run_cmd('logwatch --range yesterday --detail 10 --service sendmail') +err, cmd_out = run_cmd("logwatch --range yesterday --detail 10 --service sendmail") if err: - sys.exit(1) -for line in cmd_out.split('\n'): - if re.match(match_tmp,line): temp_fails.append(line) - elif 'Messages To Recipients:' in line: msgs_num = line - elif 'Addressed Recipients:' in line : adrpts = line - elif 'Bytes Transferred:' in line: byttr = line - elif re.match(match_hn,line): egrps.append(line) + sys.exit(1) +for line in cmd_out.split("\n"): + if re.match(match_tmp, line): + temp_fails.append(line) + elif "Messages To Recipients:" in line: + msgs_num = line + elif "Addressed Recipients:" in line: + adrpts = line + elif "Bytes Transferred:" in line: + byttr = line + elif re.match(match_hn, line): + egrps.append(line) -#process info +# process info yesterday = datetime.date.today() - datetime.timedelta(1) timestp = int(yesterday.strftime("%s")) * 1000 -temp_fails = dict(list(map(map_int_val,list((dict([list(map(cust_strip,x.split(' ')[-3:-1])) for x in temp_fails])).items())))) -msgs_num= list(map(str.strip,msgs_num.split(':'))) -adrpts = list(map(str.strip, adrpts.split(':'))) -byttr = list(map(str.strip, byttr.split(':'))) -egrp_emails = dict(list(map(rm_extra,[x.strip() for x in egrps]))) -#send info -payload['@timestamp'] = timestp -payload['msgs_to_rcpts'] = msgs_num[1] -payload['addressed_rcpts'] = adrpts[1] -payload['byts_transfd'] = byttr[1] -total=0 +temp_fails = dict( + list( + map( + map_int_val, + list((dict([list(map(cust_strip, x.split(" ")[-3:-1])) for x in temp_fails])).items()), + ) + ) +) +msgs_num = list(map(str.strip, msgs_num.split(":"))) +adrpts = list(map(str.strip, adrpts.split(":"))) +byttr = list(map(str.strip, byttr.split(":"))) +egrp_emails = dict(list(map(rm_extra, [x.strip() for x in egrps]))) +# send info +payload["@timestamp"] = timestp +payload["msgs_to_rcpts"] = msgs_num[1] +payload["addressed_rcpts"] = adrpts[1] +payload["byts_transfd"] = byttr[1] +total = 0 for k in egrp_emails: - payload['hn-'+k] = egrp_emails[k] - total += egrp_emails[k] -payload['posts'] = total -payload['forums'] = len(egrp_emails) + payload["hn-" + k] = egrp_emails[k] + total += egrp_emails[k] +payload["posts"] = total +payload["forums"] = len(egrp_emails) for k in temp_fails: - payload['fail-'+k] = temp_fails[k] -send_payload("hypernews","mailinfo",timestp, dumps(payload)) + payload["fail-" + k] = temp_fails[k] +send_payload("hypernews", "mailinfo", timestp, dumps(payload)) diff --git a/es_ib_build_stats.py b/es_ib_build_stats.py index ed4f14bfc58b..6cc0dc67f1b7 100755 --- a/es_ib_build_stats.py +++ b/es_ib_build_stats.py @@ -8,97 +8,110 @@ from hashlib import sha1 from cmsutils import cmsswIB2Week -ReDate = re.compile("^.* DATE=[A-Z][a-z]{2}\s+([A-Z][a-z]{2}\s+[0-9]{1,2}\s+\d\d:\d\d:\d\d\s+)[A-Z]{3,4}\s+(\d\d\d\d)") +ReDate = re.compile( + "^.* DATE=[A-Z][a-z]{2}\s+([A-Z][a-z]{2}\s+[0-9]{1,2}\s+\d\d:\d\d:\d\d\s+)[A-Z]{3,4}\s+(\d\d\d\d)" +) ReUpload = re.compile("^.*sync-back\s+upload\s+(.*\s|)cmssw(-patch|)(\s.*|)$") ReRel = re.compile("^[+]\s+RELEASE_FORMAT=(CMSSW_.+)") ReArch = re.compile("^[+]\s+ARCHITECTURE=(.+)") ReType = re.compile(".+specs-only\s+build\s+(cmssw-patch).*") ReFinish = re.compile("Finished:\s+[A-Z]+") -ReReleaseQueue = re.compile('(.*_X)') +ReReleaseQueue = re.compile("(.*_X)") + def process_build_any_ib(logFile): - rel = "" - arch = "" - uploadTime=0 - stime=0 - upload=False - jstart = 0 - jend=0 - patch = 0 - finished = False - with open(logFile) as f: - for line in f: - line = line.strip() - if not jstart: - m=ReDate.match(line) - if m: - jstart = datetime.strptime(m.group(1)+m.group(2), "%b %d %H:%M:%S %Y") - continue - if not arch: - m=ReArch.match(line) - if m: arch=m.group(1) - continue - if not rel: - m=ReRel.match(line) - if m: rel=m.group(1) - continue - if ReFinish.match(line): - finished = True - if "ABORTED" in line: return True - break - if ReUpload.match(line): - upload=True - print("Upload: ",stime,line) - continue - if ReType.match(line): patch=1 - m=ReDate.match(line) - if not m: continue - xtime = datetime.strptime(m.group(1)+m.group(2), "%b %d %H:%M:%S %Y") - jend = xtime - if not upload: - stime = xtime - else: - upload=False - dtime = xtime - stime - uploadTime += dtime.seconds - print("FINISHED: ",finished,rel, arch,uploadTime,jstart,upload,patch) - if not rel or not arch or not finished: return finished - urlx = logFile.split("/") - url = "https://cmssdt.cern.ch/jenkins/job/build-any-ib/"+logFile.split("/")[-2]+"/console" - timestp = getmtime(logFile) - ttime=0 - if jend and jstart: - dtime = jend - jstart - ttime = dtime.seconds - print(ttime, uploadTime, rel, arch, patch, url) - payload = {} - payload["release"] = rel - payload["architecture"] = arch - payload["total_time"] = ttime - payload["upload_time"] = uploadTime - payload["patch"] = patch - payload["release_queue"] = ReReleaseQueue.match(rel).group(1) - payload["@timestamp"] = int(timestp*1000) - payload["url"]=url - week, rel_sec = cmsswIB2Week(rel) - print(payload) - id = sha1((rel + arch).encode()).hexdigest() - send_payload("jenkins-ibs-"+week,"timings",id,json.dumps(payload)) - return finished - -force=False + rel = "" + arch = "" + uploadTime = 0 + stime = 0 + upload = False + jstart = 0 + jend = 0 + patch = 0 + finished = False + with open(logFile) as f: + for line in f: + line = line.strip() + if not jstart: + m = ReDate.match(line) + if m: + jstart = datetime.strptime(m.group(1) + m.group(2), "%b %d %H:%M:%S %Y") + continue + if not arch: + m = ReArch.match(line) + if m: + arch = m.group(1) + continue + if not rel: + m = ReRel.match(line) + if m: + rel = m.group(1) + continue + if ReFinish.match(line): + finished = True + if "ABORTED" in line: + return True + break + if ReUpload.match(line): + upload = True + print("Upload: ", stime, line) + continue + if ReType.match(line): + patch = 1 + m = ReDate.match(line) + if not m: + continue + xtime = datetime.strptime(m.group(1) + m.group(2), "%b %d %H:%M:%S %Y") + jend = xtime + if not upload: + stime = xtime + else: + upload = False + dtime = xtime - stime + uploadTime += dtime.seconds + print("FINISHED: ", finished, rel, arch, uploadTime, jstart, upload, patch) + if not rel or not arch or not finished: + return finished + urlx = logFile.split("/") + url = "https://cmssdt.cern.ch/jenkins/job/build-any-ib/" + logFile.split("/")[-2] + "/console" + timestp = getmtime(logFile) + ttime = 0 + if jend and jstart: + dtime = jend - jstart + ttime = dtime.seconds + print(ttime, uploadTime, rel, arch, patch, url) + payload = {} + payload["release"] = rel + payload["architecture"] = arch + payload["total_time"] = ttime + payload["upload_time"] = uploadTime + payload["patch"] = patch + payload["release_queue"] = ReReleaseQueue.match(rel).group(1) + payload["@timestamp"] = int(timestp * 1000) + payload["url"] = url + week, rel_sec = cmsswIB2Week(rel) + print(payload) + id = sha1((rel + arch).encode()).hexdigest() + send_payload("jenkins-ibs-" + week, "timings", id, json.dumps(payload)) + return finished + + +force = False try: - x=sys.argv[1] - force=True + x = sys.argv[1] + force = True except: - pass -err, logs = run_cmd("find /build/jobs/build-any-ib/builds -follow -maxdepth 2 -mindepth 2 -name log -type f") -logs = logs.split('\n') + pass +err, logs = run_cmd( + "find /build/jobs/build-any-ib/builds -follow -maxdepth 2 -mindepth 2 -name log -type f" +) +logs = logs.split("\n") for logFile in logs: - flagFile = logFile + '.ib-build' - if force or (not exists(flagFile)): - print("Processing ",logFile) - done = True - if re.match("^.+/builds/\d+/log$",logFile): - done = process_build_any_ib(logFile) - if done: run_cmd('touch "' + flagFile + '"') + flagFile = logFile + ".ib-build" + if force or (not exists(flagFile)): + print("Processing ", logFile) + done = True + if re.match("^.+/builds/\d+/log$", logFile): + done = process_build_any_ib(logFile) + if done: + run_cmd('touch "' + flagFile + '"') diff --git a/es_ibs_log.py b/es_ibs_log.py index ac8620594631..df8cf7b97bb5 100755 --- a/es_ibs_log.py +++ b/es_ibs_log.py @@ -2,7 +2,7 @@ from __future__ import print_function from hashlib import sha1 -import os, json, datetime, sys +import os, json, datetime, sys from glob import glob from os.path import exists, dirname, getmtime from es_utils import send_payload @@ -13,227 +13,267 @@ def sha1hexdigest(data): - return sha1(data.encode()).hexdigest() + return sha1(data.encode()).hexdigest() + def send_unittest_dataset(datasets, payload, id, index, doc): - for ds in datasets: - print("Processing ",ds) - if not 'root://' in ds: continue - ds_items = ds.split("?",1) - ds_items.append("") - ibeos = "/store/user/cmsbuild" - if ibeos in ds_items[0]: ds_items[0] = ds_items[0].replace(ibeos,"") - else: ibeos="" - payload["protocol"]=ds_items[0].split("/store/",1)[0]+ibeos - payload["protocol_opts"]=ds_items[1] - payload["lfn"]="/store/"+ds_items[0].split("/store/",1)[1].strip() - print ("Sending",index, doc, sha1hexdigest(id + ds), json.dumps(payload)) - send_payload(index, doc, sha1hexdigest(id + ds), json.dumps(payload)) + for ds in datasets: + print("Processing ", ds) + if not "root://" in ds: + continue + ds_items = ds.split("?", 1) + ds_items.append("") + ibeos = "/store/user/cmsbuild" + if ibeos in ds_items[0]: + ds_items[0] = ds_items[0].replace(ibeos, "") + else: + ibeos = "" + payload["protocol"] = ds_items[0].split("/store/", 1)[0] + ibeos + payload["protocol_opts"] = ds_items[1] + payload["lfn"] = "/store/" + ds_items[0].split("/store/", 1)[1].strip() + print("Sending", index, doc, sha1hexdigest(id + ds), json.dumps(payload)) + send_payload(index, doc, sha1hexdigest(id + ds), json.dumps(payload)) + def process_unittest_log(logFile): - t = getmtime(logFile) - timestp = int(t*1000) - pathInfo = logFile.split('/') - architecture = pathInfo[4] - release = pathInfo[8] - week, rel_sec = cmsswIB2Week (release) - package = pathInfo[-3]+"/"+ pathInfo[-2] - payload = {"type" : "unittest"} - payload["release"]=release - payload["architecture"]=architecture - payload["@timestamp"]=timestp - config_list = [] - custom_rule_set = [ - {"str_to_match": "test (.*) had ERRORS", "name": "{0} failed", 'control_type': ResultTypeEnum.ISSUE }, - {"str_to_match": '===== Test "([^\s]+)" ====', "name": "{0}", 'control_type': ResultTypeEnum.TEST } - ] - with open(logFile, encoding="ascii", errors="ignore") as f: - utname = None - datasets = [] - xid = None - for index, l in enumerate(f): - l = l.strip() - config_list = add_exception_to_config(l,index,config_list,custom_rule_set) - if l.startswith('===== Test "') and l.endswith('" ===='): - if utname: send_unittest_dataset(datasets, payload, xid, "ib-dataset-"+week, "unittest-dataset") + t = getmtime(logFile) + timestp = int(t * 1000) + pathInfo = logFile.split("/") + architecture = pathInfo[4] + release = pathInfo[8] + week, rel_sec = cmsswIB2Week(release) + package = pathInfo[-3] + "/" + pathInfo[-2] + payload = {"type": "unittest"} + payload["release"] = release + payload["architecture"] = architecture + payload["@timestamp"] = timestp + config_list = [] + custom_rule_set = [ + { + "str_to_match": "test (.*) had ERRORS", + "name": "{0} failed", + "control_type": ResultTypeEnum.ISSUE, + }, + { + "str_to_match": '===== Test "([^\s]+)" ====', + "name": "{0}", + "control_type": ResultTypeEnum.TEST, + }, + ] + with open(logFile, encoding="ascii", errors="ignore") as f: + utname = None datasets = [] - utname = l.split('"')[1] - payload["name"] = "%s/%s" % (package, utname) - xid = sha1hexdigest(release + architecture + package + str(utname)) - elif " Initiating request to open file " in l: - try: - rootfile = l.split(" Initiating request to open file ")[1].split(" ")[0] - if (not "file:" in rootfile) and (not rootfile in datasets): datasets.append(rootfile) - except Exception as e: - print("ERROR: ",logFile,e) - traceback.print_exc(file=sys.stdout) - if datasets and xid: - send_unittest_dataset(datasets, payload, xid, "ib-dataset-"+week,"unittest-dataset") - transform_and_write_config_file(logFile + "-read_config", config_list) - return + xid = None + for index, l in enumerate(f): + l = l.strip() + config_list = add_exception_to_config(l, index, config_list, custom_rule_set) + if l.startswith('===== Test "') and l.endswith('" ===='): + if utname: + send_unittest_dataset( + datasets, payload, xid, "ib-dataset-" + week, "unittest-dataset" + ) + datasets = [] + utname = l.split('"')[1] + payload["name"] = "%s/%s" % (package, utname) + xid = sha1hexdigest(release + architecture + package + str(utname)) + elif " Initiating request to open file " in l: + try: + rootfile = l.split(" Initiating request to open file ")[1].split(" ")[0] + if (not "file:" in rootfile) and (not rootfile in datasets): + datasets.append(rootfile) + except Exception as e: + print("ERROR: ", logFile, e) + traceback.print_exc(file=sys.stdout) + if datasets and xid: + send_unittest_dataset(datasets, payload, xid, "ib-dataset-" + week, "unittest-dataset") + transform_and_write_config_file(logFile + "-read_config", config_list) + return + def process_addon_log(logFile): - t = getmtime(logFile) - timestp = int(t*1000) - pathInfo = logFile.split('/') - architecture = pathInfo[4] - release = pathInfo[8] - week, rel_sec = cmsswIB2Week (release) - datasets = [] - payload = {"type" : "addon"} - payload["release"]=release - payload["architecture"]=architecture - payload["@timestamp"]=timestp - payload["name"] = pathInfo[-1].split("-")[1].split("_cmsRun_")[0].split("_cmsDriver.py_")[0] - id = sha1hexdigest(release + architecture + "addon" + payload["name"]) - config_list = [] - with open(logFile, encoding="ascii", errors="ignore") as f: - for index, l in enumerate(f): - l = l.strip() - config_list = add_exception_to_config(l,index, config_list) - if " Initiating request to open file " in l: - try: - rootfile = l.split(" Initiating request to open file ")[1].split(" ")[0] - if (not "file:" in rootfile) and (not rootfile in datasets): datasets.append(rootfile) - except: pass - send_unittest_dataset(datasets, payload, id, "ib-dataset-"+week,"addon-dataset") - transform_and_write_config_file(logFile + "-read_config", config_list) - return + t = getmtime(logFile) + timestp = int(t * 1000) + pathInfo = logFile.split("/") + architecture = pathInfo[4] + release = pathInfo[8] + week, rel_sec = cmsswIB2Week(release) + datasets = [] + payload = {"type": "addon"} + payload["release"] = release + payload["architecture"] = architecture + payload["@timestamp"] = timestp + payload["name"] = pathInfo[-1].split("-")[1].split("_cmsRun_")[0].split("_cmsDriver.py_")[0] + id = sha1hexdigest(release + architecture + "addon" + payload["name"]) + config_list = [] + with open(logFile, encoding="ascii", errors="ignore") as f: + for index, l in enumerate(f): + l = l.strip() + config_list = add_exception_to_config(l, index, config_list) + if " Initiating request to open file " in l: + try: + rootfile = l.split(" Initiating request to open file ")[1].split(" ")[0] + if (not "file:" in rootfile) and (not rootfile in datasets): + datasets.append(rootfile) + except: + pass + send_unittest_dataset(datasets, payload, id, "ib-dataset-" + week, "addon-dataset") + transform_and_write_config_file(logFile + "-read_config", config_list) + return + def process_hlt_log(logFile): - t = getmtime(logFile) - timestp = int(t*1000) - pathInfo = logFile.split('/') - architecture = pathInfo[-2] - release = pathInfo[-3] - week, rel_sec = cmsswIB2Week (release) - datasets = [] - payload = {"type" : "hlt"} - payload["release"]=release - payload["architecture"]=architecture - payload["@timestamp"]=timestp - payload["name"] = pathInfo[-1][:-4] - id = sha1hexdigest(release + architecture + "hlt" + payload["name"]) - with open(logFile, encoding="ascii", errors="ignore") as f: - for index, l in enumerate(f): - l = l.strip() - if " Initiating request to open file " in l: - try: - rootfile = l.split(" Initiating request to open file ")[1].split(" ")[0] - if (not "file:" in rootfile) and (not rootfile in datasets): datasets.append(rootfile) - except: pass - send_unittest_dataset(datasets, payload, id, "ib-dataset-"+week,"hlt-dataset") - return + t = getmtime(logFile) + timestp = int(t * 1000) + pathInfo = logFile.split("/") + architecture = pathInfo[-2] + release = pathInfo[-3] + week, rel_sec = cmsswIB2Week(release) + datasets = [] + payload = {"type": "hlt"} + payload["release"] = release + payload["architecture"] = architecture + payload["@timestamp"] = timestp + payload["name"] = pathInfo[-1][:-4] + id = sha1hexdigest(release + architecture + "hlt" + payload["name"]) + with open(logFile, encoding="ascii", errors="ignore") as f: + for index, l in enumerate(f): + l = l.strip() + if " Initiating request to open file " in l: + try: + rootfile = l.split(" Initiating request to open file ")[1].split(" ")[0] + if (not "file:" in rootfile) and (not rootfile in datasets): + datasets.append(rootfile) + except: + pass + send_unittest_dataset(datasets, payload, id, "ib-dataset-" + week, "hlt-dataset") + return + def process_ib_utests(logFile): - t = getmtime(logFile) - timestp = int(t*1000) - payload = {} - pathInfo = logFile.split('/') - architecture = pathInfo[4] - release = pathInfo[8] - week, rel_sec = cmsswIB2Week (release) - index = "ibs-"+week - document = "unittests" - payload["release"] = release - release_queue = "_".join(release.split("_", -1)[:-1]).split("_", 3) - payload["release_queue"] = "_".join(release_queue[0:3]) - flavor = release_queue[-1] - if flavor == 'X': flavor = 'DEFAULT' - payload["flavor"] = flavor - payload["architecture"] = architecture - payload["@timestamp"] = timestp - - if exists(logFile): - with open(logFile, encoding="ascii", errors="ignore") as f: - try: - it = iter(f) - line = next(it) - while '--------' not in line: - line = next(it) - while True: - line=next(it).strip() - if ":" in line: - pkg = line.split(':')[0].strip() - payload["url"] = 'https://cmssdt.cern.ch/SDT/cgi-bin/buildlogs/'+ architecture +'/'+ release +'/unitTestLogs/' + pkg - line = next(it).strip() - while ':' not in line: - if "had ERRORS" in line: - payload["status"] = 1 - else: - payload["status"] = 0 - utest= line.split(' ')[0] - payload["package"] = pkg - payload["name"] = utest - id = sha1hexdigest(release + architecture + utest) - print("==> ", json.dumps(payload) + '\n') - send_payload(index,document,id,json.dumps(payload)) - line = next(it).strip() - except Exception as e: - print("ERROR: File processed: %s" % e) - else: - print("Invalid File Path") - -#get log files + t = getmtime(logFile) + timestp = int(t * 1000) + payload = {} + pathInfo = logFile.split("/") + architecture = pathInfo[4] + release = pathInfo[8] + week, rel_sec = cmsswIB2Week(release) + index = "ibs-" + week + document = "unittests" + payload["release"] = release + release_queue = "_".join(release.split("_", -1)[:-1]).split("_", 3) + payload["release_queue"] = "_".join(release_queue[0:3]) + flavor = release_queue[-1] + if flavor == "X": + flavor = "DEFAULT" + payload["flavor"] = flavor + payload["architecture"] = architecture + payload["@timestamp"] = timestp + + if exists(logFile): + with open(logFile, encoding="ascii", errors="ignore") as f: + try: + it = iter(f) + line = next(it) + while "--------" not in line: + line = next(it) + while True: + line = next(it).strip() + if ":" in line: + pkg = line.split(":")[0].strip() + payload["url"] = ( + "https://cmssdt.cern.ch/SDT/cgi-bin/buildlogs/" + + architecture + + "/" + + release + + "/unitTestLogs/" + + pkg + ) + line = next(it).strip() + while ":" not in line: + if "had ERRORS" in line: + payload["status"] = 1 + else: + payload["status"] = 0 + utest = line.split(" ")[0] + payload["package"] = pkg + payload["name"] = utest + id = sha1hexdigest(release + architecture + utest) + print("==> ", json.dumps(payload) + "\n") + send_payload(index, document, id, json.dumps(payload)) + line = next(it).strip() + except Exception as e: + print("ERROR: File processed: %s" % e) + else: + print("Invalid File Path") + + +# get log files logs = run_cmd("find /data/sdt/buildlogs -mindepth 6 -maxdepth 6 -name 'unitTests-summary.log'") -logs = logs[1].split('\n') -#process log files +logs = logs[1].split("\n") +# process log files for logFile in logs: - flagFile = logFile + '.checked' - if not exists(flagFile): - print("Working on ",logFile) - process_ib_utests(logFile) - os.system('touch "' + flagFile + '"') + flagFile = logFile + ".checked" + if not exists(flagFile): + print("Working on ", logFile) + process_ib_utests(logFile) + os.system('touch "' + flagFile + '"') logs = run_cmd("find /data/sdt/buildlogs -mindepth 6 -maxdepth 6 -name 'unitTestLogs.zip'") -logs = logs[1].split('\n') -#process zip log files +logs = logs[1].split("\n") +# process zip log files for logFile in logs: - flagFile = logFile + '.checked' - if not exists(flagFile): - utdir = dirname(logFile) - print("Working on ",logFile) - try: - err, utlogs = run_cmd("cd %s; rm -rf UT; mkdir UT; cd UT; unzip ../unitTestLogs.zip" % utdir) - err, utlogs = run_cmd("find %s/UT -name 'unitTest.log' -type f" % utdir) - if not err: - for utlog in utlogs.split("\n"): - process_unittest_log(utlog) - run_cmd("touch %s" % flagFile) - except Exception as e: - print("ERROR: ",logFile,e) - traceback.print_exc(file=sys.stdout) - run_cmd("cd %s/UT ; zip -r ../unitTestLogs.zip ." % utdir) - run_cmd("rm -rf %s/UT" % utdir) + flagFile = logFile + ".checked" + if not exists(flagFile): + utdir = dirname(logFile) + print("Working on ", logFile) + try: + err, utlogs = run_cmd( + "cd %s; rm -rf UT; mkdir UT; cd UT; unzip ../unitTestLogs.zip" % utdir + ) + err, utlogs = run_cmd("find %s/UT -name 'unitTest.log' -type f" % utdir) + if not err: + for utlog in utlogs.split("\n"): + process_unittest_log(utlog) + run_cmd("touch %s" % flagFile) + except Exception as e: + print("ERROR: ", logFile, e) + traceback.print_exc(file=sys.stdout) + run_cmd("cd %s/UT ; zip -r ../unitTestLogs.zip ." % utdir) + run_cmd("rm -rf %s/UT" % utdir) logs = run_cmd("find /data/sdt/buildlogs -mindepth 6 -maxdepth 6 -name 'addOnTests.zip'") -logs = logs[1].split('\n') -#process zip log files +logs = logs[1].split("\n") +# process zip log files for logFile in logs: - flagFile = logFile + '.checked' - if not exists(flagFile): - utdir = dirname(logFile) - print("Working on ",logFile) - try: - err, utlogs = run_cmd("cd %s; rm -rf AO; mkdir AO; cd AO; unzip ../addOnTests.zip" % utdir) - err, utlogs = run_cmd("find %s/AO -name '*.log' -type f" % utdir) - if not err: - for utlog in utlogs.split("\n"): - process_addon_log(utlog) - run_cmd("touch %s" % flagFile) - except Exception as e: - print("ERROR:",e) - run_cmd("cd %s/AO ; zip -r ../addOnTests.zip ." % utdir) - run_cmd("rm -rf %s/AO" % utdir) - -dirs = run_cmd("find /data/sdt/SDT/jenkins-artifacts/HLT-Validation -maxdepth 2 -mindepth 2 -type d")[1].split('\n') + flagFile = logFile + ".checked" + if not exists(flagFile): + utdir = dirname(logFile) + print("Working on ", logFile) + try: + err, utlogs = run_cmd( + "cd %s; rm -rf AO; mkdir AO; cd AO; unzip ../addOnTests.zip" % utdir + ) + err, utlogs = run_cmd("find %s/AO -name '*.log' -type f" % utdir) + if not err: + for utlog in utlogs.split("\n"): + process_addon_log(utlog) + run_cmd("touch %s" % flagFile) + except Exception as e: + print("ERROR:", e) + run_cmd("cd %s/AO ; zip -r ../addOnTests.zip ." % utdir) + run_cmd("rm -rf %s/AO" % utdir) + +dirs = run_cmd( + "find /data/sdt/SDT/jenkins-artifacts/HLT-Validation -maxdepth 2 -mindepth 2 -type d" +)[1].split("\n") for d in dirs: - flagFile = d + '.checked' - if exists(flagFile): continue - for logFile in glob(d+"/*.log"): - print("Working on ",logFile) - try: - process_hlt_log(logFile) - except Exception as e: - print("ERROR:",e) - run_cmd("touch %s" % flagFile) + flagFile = d + ".checked" + if exists(flagFile): + continue + for logFile in glob(d + "/*.log"): + print("Working on ", logFile) + try: + process_hlt_log(logFile) + except Exception as e: + print("ERROR:", e) + run_cmd("touch %s" % flagFile) diff --git a/es_iwyu_logs.py b/es_iwyu_logs.py index 8fa9e05a1d30..e785f7253c5a 100755 --- a/es_iwyu_logs.py +++ b/es_iwyu_logs.py @@ -1,27 +1,36 @@ #!/bin/env python from __future__ import print_function -import sys , json , os +import sys, json, os from es_utils import send_payload + timestp = os.path.getmtime(sys.argv[1]) -items = sys.argv[1].split('/')[:-1] +items = sys.argv[1].split("/")[:-1] arch = items[-1] rel = items[-2] try: - data = json.loads(open(sys.argv[1]).read().strip()) + data = json.loads(open(sys.argv[1]).read().strip()) except: - print('json file not found/processed') + print("json file not found/processed") payload = {} -payload['architecture'] = arch -payload['release'] = rel -payload['@timestamp'] = int(timestp*1000) -index = 'iwyu' -document = 'iwyu-stats' +payload["architecture"] = arch +payload["release"] = rel +payload["@timestamp"] = int(timestp * 1000) +index = "iwyu" +document = "iwyu-stats" id = False for item in data: - payload['package'] = item - files , includes , excludes = data[item] - payload['files'] = files - payload['includes'] = includes - payload['excludes'] = excludes - payload['url'] = 'https://cmssdt.cern.ch/SDT/cgi-bin/buildlogs/iwyu/'+ arch + '/'+ rel + '/' + item + '/index.html' - send_payload(index,document,id,json.dumps(payload)) + payload["package"] = item + files, includes, excludes = data[item] + payload["files"] = files + payload["includes"] = includes + payload["excludes"] = excludes + payload["url"] = ( + "https://cmssdt.cern.ch/SDT/cgi-bin/buildlogs/iwyu/" + + arch + + "/" + + rel + + "/" + + item + + "/index.html" + ) + send_payload(index, document, id, json.dumps(payload)) diff --git a/es_reindex_indexes_with_pattern.py b/es_reindex_indexes_with_pattern.py index 919df68e8b9f..65ca78c9f67d 100644 --- a/es_reindex_indexes_with_pattern.py +++ b/es_reindex_indexes_with_pattern.py @@ -6,60 +6,67 @@ from time import sleep if __name__ == "__main__": - pattern = sys.argv[1] - indexes = get_indexes('cmssdt-'+pattern+'*').splitlines() + indexes = get_indexes("cmssdt-" + pattern + "*").splitlines() indexes_name_only = [] opened_idxs_list = [] closed_idxs_list = [] for i in indexes: idx_list = opened_idxs_list - if 'open' not in i and 'green' not in i: + if "open" not in i and "green" not in i: idx_list = closed_idxs_list list_of_recs = i.split() print(list_of_recs) for j in list_of_recs: - if 'cmssdt-' in j: + if "cmssdt-" in j: indexes_name_only.append(j) idx_list.append(j) - print('list with open idxs', opened_idxs_list) - print('list with closed idx', closed_idxs_list) + print("list with open idxs", opened_idxs_list) + print("list with closed idx", closed_idxs_list) - print('indexes names only') + print("indexes names only") print(indexes_name_only) for i in indexes_name_only: print(i) current_idx = i - tmp_idx = i+'_temppp' - request_data = json.dumps({"source":{"index": current_idx }, "dest":{"index": tmp_idx} }) - print('request for reindex body: ', request_data) + tmp_idx = i + "_temppp" + request_data = json.dumps({"source": {"index": current_idx}, "dest": {"index": tmp_idx}}) + print("request for reindex body: ", request_data) - #open the index if its closed + # open the index if its closed if current_idx in closed_idxs_list: open_index(current_idx) # wait 5 seconds sleep(5) - request_finished_properly = send_request('_reindex/', request_data, method='POST') + request_finished_properly = send_request("_reindex/", request_data, method="POST") if request_finished_properly: - print('forward reindexing complete, delete') + print("forward reindexing complete, delete") delete_index(current_idx) else: - print('reindexing failed for ', current_idx, ' to ',tmp_idx, ', crash the jenkins job') + print( + "reindexing failed for ", current_idx, " to ", tmp_idx, ", crash the jenkins job" + ) exit(-1) - #wait 5 seconds + # wait 5 seconds sleep(5) - request_data = json.dumps({"source":{"index": tmp_idx }, "dest":{"index": current_idx}}) - request_finished_properly = send_request('_reindex/', request_data, method='POST') + request_data = json.dumps({"source": {"index": tmp_idx}, "dest": {"index": current_idx}}) + request_finished_properly = send_request("_reindex/", request_data, method="POST") if request_finished_properly: - print('reverse reindexing complete, delete the temp idx') + print("reverse reindexing complete, delete the temp idx") delete_index(tmp_idx) else: - print('reindexing failed for ', tmp_idx, ' to ', current_idx, ', crash the jenkins job, try manually') + print( + "reindexing failed for ", + tmp_idx, + " to ", + current_idx, + ", crash the jenkins job, try manually", + ) exit(-1) - #close the index if it was in the list of closed + # close the index if it was in the list of closed if current_idx in closed_idxs_list: close_index(current_idx) diff --git a/es_relval_log.py b/es_relval_log.py index 54af8069ed70..6f3fe77ad3f6 100755 --- a/es_relval_log.py +++ b/es_relval_log.py @@ -1,184 +1,216 @@ #!/usr/bin/env python3 from hashlib import sha1 -import os, sys,json , re +import os, sys, json, re from os.path import exists from es_utils import send_payload import xml.etree.ElementTree as ET from cmsutils import cmsswIB2Week -def find_step_cmd(cmdfile,step): - try: - cmd = '' - data=open(cmdfile,'r') - get = iter(data) - line = next(get) - while line: - if step=='step1' and 'das_client' in line: - while 'step1_dasquery.log' not in line: - cmd = cmd + line - line = next(get) - return (cmd + line).strip() - elif 'file:'+step in line: - return line.strip() - line=next(get) - except: - return None +def find_step_cmd(cmdfile, step): + try: + cmd = "" + data = open(cmdfile, "r") + get = iter(data) + line = next(get) + while line: + if step == "step1" and "das_client" in line: + while "step1_dasquery.log" not in line: + cmd = cmd + line + line = next(get) + return (cmd + line).strip() + elif "file:" + step in line: + return line.strip() + line = next(get) + except: + return None -def get_exit_code(workflow_log,step): - try: - d=open(workflow_log,'r') - for line in d: - if 'exit:' in line: - codes = list(map(int,line.split('exit:')[-1].strip().split())) - return int(codes[step-1]) - except: - pass - return -1 +def get_exit_code(workflow_log, step): + try: + d = open(workflow_log, "r") + for line in d: + if "exit:" in line: + codes = list(map(int, line.split("exit:")[-1].strip().split())) + return int(codes[step - 1]) + except: + pass + return -1 -def es_parse_jobreport(payload,logFile): - xmlFile = "/".join(logFile.split('/')[:-1]) + "/JobReport"+logFile.split('/')[-1].split("_")[0][-1]+".xml" - if not os.path.exists(xmlFile): - if not '/JobReport1.xml' in xmlFile: print("No JR File:",xmlFile) + +def es_parse_jobreport(payload, logFile): + xmlFile = ( + "/".join(logFile.split("/")[:-1]) + + "/JobReport" + + logFile.split("/")[-1].split("_")[0][-1] + + ".xml" + ) + if not os.path.exists(xmlFile): + if not "/JobReport1.xml" in xmlFile: + print("No JR File:", xmlFile) + return payload + payload["jobreport"] = "/".join(payload["url"].split("/")[:-1]) + "/" + xmlFile.split("/")[-1] + tree = ET.parse(xmlFile) + root = tree.getroot() + events_read = [] + total_events = [] + for i in root.getiterator("EventsRead"): + events_read.append(i.text) + for i in root.getiterator("TotalEvents"): + total_events.append(i.text) + if events_read: + payload["events_read"] = max(events_read) + if total_events: + payload["total_events"] = max(total_events) + reports_p = root.getiterator("PerformanceReport") + for i in reports_p: + summaries = i.getiterator("PerformanceSummary") + for j in summaries: + if j.get("Metric") == "SystemMemory" or j.get("Metric") == "StorageStatistics": + continue + if j.get("Metric") == "ApplicationMemory": + metrics_list = j.getchildren() + for i in metrics_list: + name = i.get("Name") + val = i.get("Value") + if "nan" in val: + val = "" + payload[name] = val + elif j.get("Metric") == "Timing": + metrics_list = j.getchildren() + for i in metrics_list: + val = i.get("Value") + if "nan" in val: + val = "" + elif "e" in val: + val = float(val) + payload[i.get("Name")] = val return payload - payload['jobreport'] = '/'.join(payload["url"].split('/')[:-1])+'/'+xmlFile.split('/')[-1] - tree = ET.parse(xmlFile) - root = tree.getroot() - events_read = [] - total_events = [] - for i in root.getiterator("EventsRead") : events_read.append(i.text) - for i in root.getiterator("TotalEvents") : total_events.append(i.text) - if events_read: payload["events_read"] = max(events_read) - if total_events: payload["total_events"] = max(total_events) - reports_p = root.getiterator('PerformanceReport') - for i in reports_p: - summaries = i.getiterator("PerformanceSummary") - for j in summaries: - if j.get("Metric") == "SystemMemory" or j.get("Metric") == "StorageStatistics": - continue - if j.get("Metric") == "ApplicationMemory": - metrics_list = j.getchildren() - for i in metrics_list: - name=i.get("Name") - val = i.get("Value") - if 'nan' in val: val='' - payload[name] = val - elif j.get("Metric") == "Timing": - metrics_list = j.getchildren() - for i in metrics_list: - val = i.get("Value") - if 'nan' in val: - val='' - elif 'e' in val: - val=float(val) - payload[i.get("Name")] = val - return payload + def es_parse_log(logFile): - t = os.path.getmtime(logFile) - timestp = int(t*1000) - payload = {} - pathInfo = logFile.split('/') - architecture = pathInfo[4] - release = pathInfo[8] - workflow = pathInfo[10].split('_')[0] - step = pathInfo[11].split('_')[0] - week, rel_sec = cmsswIB2Week(release) - index = "ib-matrix-" + week - document = "runTheMatrix-data" - id = sha1((release + architecture + workflow + str(step)).encode()).hexdigest() - logdir = '/'.join(logFile.split('/')[:-1]) - cmdfile = logdir + '/cmdLog' - cmd_step = find_step_cmd(cmdfile,step) - if cmd_step: payload["command"] = cmd_step - wf_log = logdir + '/workflow.log' - exitcode = get_exit_code(wf_log,int(step[-1])) - if exitcode != -1 : payload["exitcode"] = exitcode - payload["workflow"] = workflow - payload["release"] = release - payload["architecture"] = architecture - payload["step"] = step - payload["@timestamp"] = timestp - hostFile = "/".join(logFile.split('/')[:-1]) + "/hostname" - if os.path.exists (hostFile): - with open(hostFile,'r') as hname: - payload["hostname"] = hname.readlines()[0].strip() - exception = "" - error = "" - errors = [] - inException = False - inError = False - datasets = [] - error_count = 0 - if exists(logFile): - with open(logFile) as f: - lines = f.readlines() - payload["url"] = 'https://cmssdt.cern.ch/SDT/cgi-bin/buildlogs/'+pathInfo[4]+'/'+pathInfo[8]+'/pyRelValMatrixLogs/run/'+pathInfo[-2]+'/'+pathInfo[-1] - total_lines = len(lines) - for i in range(total_lines): - l = lines[i].strip() - if " Initiating request to open file " in l: - try: - rootfile = l.split(" Initiating request to open file ")[1].split(" ")[0] - if (not "file:" in rootfile) and (not rootfile in datasets): - #if (i+2) %s/threads.txt" % (o, partial_log_dirpath)) + e, o = run_cmd( + "grep ' --nThreads ' %s/*/cmdLog | tail -1 | sed 's|.* *--nThreads *||;s| .*||'" + % partial_log_dirpath + ) + if e: + print(o) + exit(1) + if not o: + o = "1" + run_cmd("echo %s > %s/threads.txt" % (o, partial_log_dirpath)) e, o = run_cmd("head -1 %s/threads.txt" % partial_log_dirpath) if e: - print(o) - exit(1) -cmsThreads = o.strip('\n') + print(o) + exit(1) +cmsThreads = o.strip("\n") e, o = run_cmd("ls -d %s/*" % partial_log_dirpath) threads = [] for wf in o.split("\n"): - if not isdir(wf): continue - if exists(join(wf,"wf_stats.done")): continue - wfnum = basename(wf).split("_",1)[0] - hostname="" - if exists(join(wf,"hostname")): - hostname=open(join(wf,"hostname")).read().split("\n")[0] - exit_codes={} - if exists(join(wf,"workflow.log")): - e, o = run_cmd("grep '^%s_' %s/workflow.log | head -1 | sed 's|.* exit: *||'" % (wfnum, wf)) - if not o: o="256" - istep=0 - for e in [ int(x) for x in o.strip().split(" ") if x ]: - istep+=1 - exit_codes["step%s" % istep ] = e - e, o = run_cmd("ls %s/step*.log | sed 's|^.*/||'" % wf) - steps = {} - for log in o.split("\n"): steps[log.split("_")[0]]="" - e, o = run_cmd("ls %s/wf_stats-step*.json" % wf) - for s in o.split("\n"): - step = s.split("/wf_stats-")[1][:-5] - if step in steps: steps[step]=s - for s in steps: - sfile =steps[s] - if sfile=="": continue - exit_code=-1 - if s in exit_codes: exit_code = exit_codes[s] - while True: - threads = [t for t in threads if t.is_alive()] - if(len(threads) >= jobs):sleep(0.1) - else: break - params={"cmsthreads":cmsThreads} - t = threading.Thread(target=es_send_resource_stats, args=(release, arch, wfnum, s, sfile, hostname, exit_code, params)) - t.start() - threads.append(t) - run_cmd("touch %s" % join(wf,"wf_stats.done")) -print("Active Threads:",len(threads)) -for t in threads: t.join() - + if not isdir(wf): + continue + if exists(join(wf, "wf_stats.done")): + continue + wfnum = basename(wf).split("_", 1)[0] + hostname = "" + if exists(join(wf, "hostname")): + hostname = open(join(wf, "hostname")).read().split("\n")[0] + exit_codes = {} + if exists(join(wf, "workflow.log")): + e, o = run_cmd( + "grep '^%s_' %s/workflow.log | head -1 | sed 's|.* exit: *||'" % (wfnum, wf) + ) + if not o: + o = "256" + istep = 0 + for e in [int(x) for x in o.strip().split(" ") if x]: + istep += 1 + exit_codes["step%s" % istep] = e + e, o = run_cmd("ls %s/step*.log | sed 's|^.*/||'" % wf) + steps = {} + for log in o.split("\n"): + steps[log.split("_")[0]] = "" + e, o = run_cmd("ls %s/wf_stats-step*.json" % wf) + for s in o.split("\n"): + step = s.split("/wf_stats-")[1][:-5] + if step in steps: + steps[step] = s + for s in steps: + sfile = steps[s] + if sfile == "": + continue + exit_code = -1 + if s in exit_codes: + exit_code = exit_codes[s] + while True: + threads = [t for t in threads if t.is_alive()] + if len(threads) >= jobs: + sleep(0.1) + else: + break + params = {"cmsthreads": cmsThreads} + t = threading.Thread( + target=es_send_resource_stats, + args=(release, arch, wfnum, s, sfile, hostname, exit_code, params), + ) + t.start() + threads.append(t) + run_cmd("touch %s" % join(wf, "wf_stats.done")) +print("Active Threads:", len(threads)) +for t in threads: + t.join() diff --git a/es_utils.py b/es_utils.py index 7f1fad094251..7348e7f1a6d9 100755 --- a/es_utils.py +++ b/es_utils.py @@ -9,15 +9,28 @@ from os import stat as tstat from time import time -CMSSDT_ES_QUERY="https://cmssdt.cern.ch/SDT/cgi-bin/es_query" -#ES_SERVER = 'https://es-cmssdt7.cern.ch:9203' -ES_SERVER = 'https://es-cmssdt1.cern.ch/es' +CMSSDT_ES_QUERY = "https://cmssdt.cern.ch/SDT/cgi-bin/es_query" +# ES_SERVER = 'https://es-cmssdt7.cern.ch:9203' +ES_SERVER = "https://es-cmssdt1.cern.ch/es" ES_NEW_SERVER = ES_SERVER ES_PASSWD = None -def format(s, **kwds): return s % kwds -def get_es_query(query="", start_time=0, end_time=0, page_start=0, page_size=10000, timestamp_field='@timestamp', lowercase_expanded_terms='false', fields=None): - es5_query_tmpl=""" + +def format(s, **kwds): + return s % kwds + + +def get_es_query( + query="", + start_time=0, + end_time=0, + page_start=0, + page_size=10000, + timestamp_field="@timestamp", + lowercase_expanded_terms="false", + fields=None, +): + es5_query_tmpl = """ { "_source": [%(fields_list)s], "query": { @@ -33,273 +46,403 @@ def get_es_query(query="", start_time=0, end_time=0, page_start=0, page_size=100 "size" : %(page_size)s } """ - if not fields: fields = ["*"] - fields_list = ",".join([ '"%s"' % f for f in fields]) - return format(es5_query_tmpl, **locals ()) + if not fields: + fields = ["*"] + fields_list = ",".join(['"%s"' % f for f in fields]) + return format(es5_query_tmpl, **locals()) + def get_ssl_context(): - sslcon = None - try: sslcon = ssl._create_unverified_context() - except Exception as e: sslcon = None - return sslcon + sslcon = None + try: + sslcon = ssl._create_unverified_context() + except Exception as e: + sslcon = None + return sslcon + def ssl_urlopen(url, data): - res = urlopen(url, data, context=get_ssl_context()).read() - if isinstance(res, bytes): - res = res.decode() + res = urlopen(url, data, context=get_ssl_context()).read() + if isinstance(res, bytes): + res = res.decode() + + return res - return res def resend_payload(hit): - print("Resending ",hit) - return send_payload(hit["_index"], "_doc", hit["_id"],json.dumps(hit["_source"])) + print("Resending ", hit) + return send_payload(hit["_index"], "_doc", hit["_id"], json.dumps(hit["_source"])) + def es_get_passwd(passwd_file=None): - global ES_PASSWD - if ES_PASSWD: return ES_PASSWD - for psfile in [passwd_file, getenv("CMS_ES_SECRET_FILE",None), "/data/secrets/cmssdt-es-secret", "/build/secrets/cmssdt-es-secret", "/var/lib/jenkins/secrets/cmssdt-es-secret", "/data/secrets/github_hook_secret_cmsbot"]: - if not psfile: continue - if exists(psfile): - passwd_file=psfile - break - ES_PASSWD = open(passwd_file,'r').read().strip() - return ES_PASSWD - -def send_request(uri, payload=None, passwd_file=None, method=None, es_ser=ES_SERVER, ignore_doc=False, ignore_new=False): - if (not ignore_new) and (ES_SERVER!=ES_NEW_SERVER) and (es_ser==ES_SERVER): - if not send_request(uri, payload, passwd_file, method, es_ser=ES_NEW_SERVER, ignore_doc=ignore_doc): - return False - header = {"Content-Type": "application/json"} - xuri = uri.split("/") - if (not ignore_doc) and (xuri[1] != "_doc"): - xuri[1] = "_doc" - uri = "/".join(xuri) - passwd=es_get_passwd(passwd_file) - if not passwd: return False - url = "%s/%s" % (es_ser,uri) - header['Authorization'] = 'Basic %s' % base64.b64encode(("cmssdt:%s" % passwd).encode()).decode() - try: - if payload is None: - request = Request(url, payload, header) - else: - request = Request(url, payload.encode(), header) - if method: request.get_method = lambda: method - content = urlopen(request, context=get_ssl_context()) - except Exception as e: - print("ERROR:",url,str(e)) - print(payload) - return False - print("OK:",url) - return True + global ES_PASSWD + if ES_PASSWD: + return ES_PASSWD + for psfile in [ + passwd_file, + getenv("CMS_ES_SECRET_FILE", None), + "/data/secrets/cmssdt-es-secret", + "/build/secrets/cmssdt-es-secret", + "/var/lib/jenkins/secrets/cmssdt-es-secret", + "/data/secrets/github_hook_secret_cmsbot", + ]: + if not psfile: + continue + if exists(psfile): + passwd_file = psfile + break + ES_PASSWD = open(passwd_file, "r").read().strip() + return ES_PASSWD + + +def send_request( + uri, + payload=None, + passwd_file=None, + method=None, + es_ser=ES_SERVER, + ignore_doc=False, + ignore_new=False, +): + if (not ignore_new) and (ES_SERVER != ES_NEW_SERVER) and (es_ser == ES_SERVER): + if not send_request( + uri, payload, passwd_file, method, es_ser=ES_NEW_SERVER, ignore_doc=ignore_doc + ): + return False + header = {"Content-Type": "application/json"} + xuri = uri.split("/") + if (not ignore_doc) and (xuri[1] != "_doc"): + xuri[1] = "_doc" + uri = "/".join(xuri) + passwd = es_get_passwd(passwd_file) + if not passwd: + return False + url = "%s/%s" % (es_ser, uri) + header["Authorization"] = ( + "Basic %s" % base64.b64encode(("cmssdt:%s" % passwd).encode()).decode() + ) + try: + if payload is None: + request = Request(url, payload, header) + else: + request = Request(url, payload.encode(), header) + if method: + request.get_method = lambda: method + content = urlopen(request, context=get_ssl_context()) + except Exception as e: + print("ERROR:", url, str(e)) + print(payload) + return False + print("OK:", url) + return True + def send_payload(index, document, id, payload, passwd_file=None): - if not index.startswith('cmssdt-'): index = 'cmssdt-' + index - uri = "%s/%s/" % (index,document) - if id: uri = uri+id - return send_request(uri, payload=payload, method="POST", passwd_file=passwd_file) + if not index.startswith("cmssdt-"): + index = "cmssdt-" + index + uri = "%s/%s/" % (index, document) + if id: + uri = uri + id + return send_request(uri, payload=payload, method="POST", passwd_file=passwd_file) + def send_template(name, payload, passwd_file=None): - if not name.startswith('cmssdt-'): name = 'cmssdt-' + name - uri = "_template/%s" % name - return send_request(uri, payload=payload, passwd_file=passwd_file, method='PUT', ignore_doc=True) + if not name.startswith("cmssdt-"): + name = "cmssdt-" + name + uri = "_template/%s" % name + return send_request( + uri, payload=payload, passwd_file=passwd_file, method="PUT", ignore_doc=True + ) + + +def delete_hit(hit, passwd_file=None): + uri = "%s/%s/%s" % (hit["_index"], "_doc", hit["_id"]) + if not send_request(uri, passwd_file=passwd_file, method="DELETE"): + return False + print("DELETE:", hit["_id"]) + return True -def delete_hit(hit,passwd_file=None): - uri = "%s/%s/%s" % (hit["_index"], "_doc", hit["_id"]) - if not send_request(uri, passwd_file=passwd_file, method='DELETE'): return False - print("DELETE:",hit["_id"]) - return True def get_payload(index, query, scroll=0): - data = {'index':index, 'query':query, 'scroll':scroll} - if scroll<=1: data['params'] = 'ignore_unavailable=true' - data["es_server"]=ES_SERVER - return ssl_urlopen(CMSSDT_ES_QUERY,json.dumps(data).encode("ascii","ignore")) + data = {"index": index, "query": query, "scroll": scroll} + if scroll <= 1: + data["params"] = "ignore_unavailable=true" + data["es_server"] = ES_SERVER + return ssl_urlopen(CMSSDT_ES_QUERY, json.dumps(data).encode("ascii", "ignore")) + def get_payload_wscroll(index, query, max_count=-1): - es_data = json.loads(get_payload(index, query,scroll=1)) - if 'proxy-error' in es_data: return es_data - es_data.pop("_shards", None) - if type(es_data['hits']['total']) == int: - scroll_size = es_data['hits']['total'] - else: - scroll_size = es_data['hits']['total']['value'] - scroll_id = es_data.pop('_scroll_id') - tcount = 0 - while ((scroll_size > 0) and ((max_count<0) or (tcount 0): es_data['hits']['hits']+=es_xdata['hits']['hits'] - return es_data - -def get_template(index=''): - data = {'index':index, 'api': '/_template', 'prefix': True} - return ssl_urlopen(CMSSDT_ES_QUERY,json.dumps(data).encode()) + es_data = json.loads(get_payload(index, query, scroll=1)) + if "proxy-error" in es_data: + return es_data + es_data.pop("_shards", None) + if type(es_data["hits"]["total"]) == int: + scroll_size = es_data["hits"]["total"] + else: + scroll_size = es_data["hits"]["total"]["value"] + scroll_id = es_data.pop("_scroll_id") + tcount = 0 + while (scroll_size > 0) and ((max_count < 0) or (tcount < max_count)): + query = '{"scroll_id": "%s","scroll":"1m"}' % scroll_id + es_xdata = json.loads(get_payload(index, query, scroll=2)) + if "proxy-error" in es_xdata: + return es_xdata + scroll_id = es_xdata.pop("_scroll_id") + scroll_size = len(es_xdata["hits"]["hits"]) + tcount += scroll_size + if scroll_size > 0: + es_data["hits"]["hits"] += es_xdata["hits"]["hits"] + return es_data + + +def get_template(index=""): + data = {"index": index, "api": "/_template", "prefix": True} + return ssl_urlopen(CMSSDT_ES_QUERY, json.dumps(data).encode()) + def find_indexes(index): - idxs = {} - for line in get_indexes(index).split("\n"): - line=re.sub("\s\s+"," ",line.strip()) - if not line: continue - data =line.split(" ") - idx = "" - st = data[0] - if st == "close": - idx = data[1] - else: - st = data[1] - idx = data[2] - if not st in idxs: idxs[st]=[] - idxs[st].append(idx) - return idxs + idxs = {} + for line in get_indexes(index).split("\n"): + line = re.sub("\s\s+", " ", line.strip()) + if not line: + continue + data = line.split(" ") + idx = "" + st = data[0] + if st == "close": + idx = data[1] + else: + st = data[1] + idx = data[2] + if not st in idxs: + idxs[st] = [] + idxs[st].append(idx) + return idxs + + +def get_indexes(index="cmssdt-*"): + data = {"index": index, "api": "/_cat", "prefix": True, "method": "GET"} + return ssl_urlopen(CMSSDT_ES_QUERY, json.dumps(data).encode()) -def get_indexes(index='cmssdt-*'): - data = {'index':index, 'api': '/_cat', 'prefix': True, 'method': 'GET'} - return ssl_urlopen(CMSSDT_ES_QUERY,json.dumps(data).encode()) def close_index(index): - if not index.startswith('cmssdt-'): index = 'cmssdt-' + index - send_request(index+'/_close',method='POST', ignore_doc=True, ignore_new=True) + if not index.startswith("cmssdt-"): + index = "cmssdt-" + index + send_request(index + "/_close", method="POST", ignore_doc=True, ignore_new=True) + def open_index(index): - if not index.startswith('cmssdt-'): index = 'cmssdt-' + index - send_request(index+'/_open',method='POST', ignore_doc=True, ignore_new=True) + if not index.startswith("cmssdt-"): + index = "cmssdt-" + index + send_request(index + "/_open", method="POST", ignore_doc=True, ignore_new=True) + def delete_index(index): - if not index.startswith('cmssdt-'): index = 'cmssdt-' + index - send_request(index+'/',method='DELETE', ignore_doc=True, ignore_new=True) - -def es_query(index,query,start_time,end_time,page_start=0,page_size=10000,timestamp_field="@timestamp", scroll=False, max_count=-1, fields=None): - query_str = get_es_query(query=query, start_time=start_time,end_time=end_time,page_start=page_start,page_size=page_size,timestamp_field=timestamp_field, fields=fields) - if scroll: return get_payload_wscroll(index, query_str, max_count) - return json.loads(get_payload(index, query_str)) - -def es_workflow_stats(es_hits,rss='rss_75', cpu='cpu_75'): - wf_stats = {} - for h in es_hits['hits']['hits']: - hit = h["_source"] - if 'time' not in hit: continue - wf = hit["workflow"] - step = hit["step"] - if not wf in wf_stats: wf_stats[wf]={} - if not step in wf_stats[wf]:wf_stats[wf][step]=[] - wf_stats[wf][step].append([hit['time'], hit[rss], hit[cpu], hit["rss_max"], hit["cpu_max"]]) - - for wf in wf_stats: - for step in wf_stats[wf]: - hits = wf_stats[wf][step] - thits = len(hits) - time_v = int(sum([h[0] for h in hits])/thits) - rss_v = int(sum([h[1] for h in hits])/thits) - cpu_v = int(sum([h[2] for h in hits])/thits) - rss_m = int(sum([h[3] for h in hits])/thits) - cpu_m = int(sum([h[4] for h in hits])/thits) - if rss_v<1024: rss_v = rss_m - if cpu_v<10: cpu_v = cpu_m - wf_stats[wf][step] = { "time" : time_v, - "rss" : rss_v, - "cpu" : cpu_v, - "rss_max" : rss_m, - "cpu_max" : cpu_m, - "rss_avg" : int((rss_v+rss_m)/2), - "cpu_avg" : int((cpu_v+cpu_m)/2) - } - return wf_stats + if not index.startswith("cmssdt-"): + index = "cmssdt-" + index + send_request(index + "/", method="DELETE", ignore_doc=True, ignore_new=True) + + +def es_query( + index, + query, + start_time, + end_time, + page_start=0, + page_size=10000, + timestamp_field="@timestamp", + scroll=False, + max_count=-1, + fields=None, +): + query_str = get_es_query( + query=query, + start_time=start_time, + end_time=end_time, + page_start=page_start, + page_size=page_size, + timestamp_field=timestamp_field, + fields=fields, + ) + if scroll: + return get_payload_wscroll(index, query_str, max_count) + return json.loads(get_payload(index, query_str)) + + +def es_workflow_stats(es_hits, rss="rss_75", cpu="cpu_75"): + wf_stats = {} + for h in es_hits["hits"]["hits"]: + hit = h["_source"] + if "time" not in hit: + continue + wf = hit["workflow"] + step = hit["step"] + if not wf in wf_stats: + wf_stats[wf] = {} + if not step in wf_stats[wf]: + wf_stats[wf][step] = [] + wf_stats[wf][step].append( + [hit["time"], hit[rss], hit[cpu], hit["rss_max"], hit["cpu_max"]] + ) + + for wf in wf_stats: + for step in wf_stats[wf]: + hits = wf_stats[wf][step] + thits = len(hits) + time_v = int(sum([h[0] for h in hits]) / thits) + rss_v = int(sum([h[1] for h in hits]) / thits) + cpu_v = int(sum([h[2] for h in hits]) / thits) + rss_m = int(sum([h[3] for h in hits]) / thits) + cpu_m = int(sum([h[4] for h in hits]) / thits) + if rss_v < 1024: + rss_v = rss_m + if cpu_v < 10: + cpu_v = cpu_m + wf_stats[wf][step] = { + "time": time_v, + "rss": rss_v, + "cpu": cpu_v, + "rss_max": rss_m, + "cpu_max": cpu_m, + "rss_avg": int((rss_v + rss_m) / 2), + "cpu_avg": int((cpu_v + cpu_m) / 2), + } + return wf_stats + def get_summary_stats_from_json_file(stats_dict_file_path, cpu_normalize): - with open(stats_dict_file_path, 'r') as stas_d_f: stats_dict = json.load(stas_d_f) - sdata = None - try: - xdata = {} - for stat in stats_dict: - for item in stat: - try: - xdata[item].append(stat[item]) - except: - xdata[item] = [] - xdata[item].append(stat[item]) - sdata = {} - for x in xdata: - data = sorted(xdata[x]) - if x in ["time", "num_threads", "processes", "num_fds"]: - sdata[x] = data[-1] - continue - if not x in ["rss", "vms", "pss", "uss", "shared", "data", "cpu"]: continue - dlen = len(data) - if (x == "cpu") and (cpu_normalize > 1) and (data[-1] > 100): - data = [d / cpu_normalize for d in data] - for t in ["min", "max", "avg", "median", "25", "75", "90"]: sdata[x + "_" + t] = 0 - if dlen > 0: - sdata[x + "_min"] = data[0] - sdata[x + "_max"] = data[-1] - if dlen > 1: - dlen2 = int(dlen / 2) - if (dlen % 2) == 0: - sdata[x + "_median"] = int((data[dlen2 - 1] + data[dlen2]) / 2) - else: - sdata[x + "_median"] = data[dlen2] - sdata[x + "_avg"] = int(sum(data) / dlen) - for t in [25, 75, 90]: - sdata[x + "_" + str(t)] = int(percentile(t, data, dlen)) - else: - for t in ["25", "75", "90", "avg", "median"]: - sdata[x + "_" + t] = data[0] - except Exception as e: - print(str(e)) - return sdata - -def es_send_resource_stats(release, arch, name, version, sfile, - hostname, exit_code, params=None, - cpu_normalize=1, index="relvals_stats_summary", doc="runtime-stats-summary"): - week, rel_sec = cmsswIB2Week(release) - rel_msec = rel_sec*1000 - if "_X_" in release: - release_queue = release.split("_X_",1)[0]+"_X" - else: - release_queue = "_".join(release.split("_")[:3])+"_X" - sdata = {"release": release, "release_queue": release_queue, "architecture": arch, - "step": version, "@timestamp": rel_msec, "workflow": name, - "hostname": hostname, "exit_code": exit_code} - average_stats = get_summary_stats_from_json_file(sfile, cpu_normalize) - sdata.update(average_stats) - if params: sdata.update(params) - idx = sha1((release + arch + name + version + str(rel_sec)).encode()).hexdigest() - try:send_payload(index+"-"+week,doc,idx,json.dumps(sdata)) - except Exception as e: print(str(e)) - -def es_send_external_stats(stats_dict_file_path, opts_dict_file_path, cpu_normalize=1, - es_index_name='externals_stats_summary', - es_doc_name='externals-stats-summary'): - file_stamp = int(tstat(stats_dict_file_path).st_mtime) # get the file stamp from the file - week = str((file_stamp / 86400 + 4) / 7) - with open(opts_dict_file_path, 'r') as opts_dict_f: opts_dict = json.load(opts_dict_f) - index_sha = sha1( (''.join([str(x) for x in opts_dict.values()])+stats_dict_file_path).encode() ).hexdigest() - sdata = get_summary_stats_from_json_file(stats_dict_file_path, cpu_normalize) - sdata.update(opts_dict) - sdata["@timestamp"]=file_stamp*1000 - try:send_payload(es_index_name+"-"+week, es_doc_name, index_sha, json.dumps(sdata)) - except Exception as e: print(str(e)) - -def getExternalsESstats(arch='*', externalsList='', lastNdays=30, page_size=0, fields=None): - externals_names='' - if externalsList == '': - externals_names="*" - else: - externals_names= (" OR ").join(externalsList.split(',')) - stats = es_query(index='externals_stats_summary-*', - query=format('architecture:%(architecture)s AND name.keyword:(%(names)s)', - architecture=arch, - names=externals_names), - start_time=1000*int(time()-(86400*lastNdays)), - end_time=1000*int(time()),scroll=True, fields=fields) - return stats['hits']['hits'] + with open(stats_dict_file_path, "r") as stas_d_f: + stats_dict = json.load(stas_d_f) + sdata = None + try: + xdata = {} + for stat in stats_dict: + for item in stat: + try: + xdata[item].append(stat[item]) + except: + xdata[item] = [] + xdata[item].append(stat[item]) + sdata = {} + for x in xdata: + data = sorted(xdata[x]) + if x in ["time", "num_threads", "processes", "num_fds"]: + sdata[x] = data[-1] + continue + if not x in ["rss", "vms", "pss", "uss", "shared", "data", "cpu"]: + continue + dlen = len(data) + if (x == "cpu") and (cpu_normalize > 1) and (data[-1] > 100): + data = [d / cpu_normalize for d in data] + for t in ["min", "max", "avg", "median", "25", "75", "90"]: + sdata[x + "_" + t] = 0 + if dlen > 0: + sdata[x + "_min"] = data[0] + sdata[x + "_max"] = data[-1] + if dlen > 1: + dlen2 = int(dlen / 2) + if (dlen % 2) == 0: + sdata[x + "_median"] = int((data[dlen2 - 1] + data[dlen2]) / 2) + else: + sdata[x + "_median"] = data[dlen2] + sdata[x + "_avg"] = int(sum(data) / dlen) + for t in [25, 75, 90]: + sdata[x + "_" + str(t)] = int(percentile(t, data, dlen)) + else: + for t in ["25", "75", "90", "avg", "median"]: + sdata[x + "_" + t] = data[0] + except Exception as e: + print(str(e)) + return sdata + + +def es_send_resource_stats( + release, + arch, + name, + version, + sfile, + hostname, + exit_code, + params=None, + cpu_normalize=1, + index="relvals_stats_summary", + doc="runtime-stats-summary", +): + week, rel_sec = cmsswIB2Week(release) + rel_msec = rel_sec * 1000 + if "_X_" in release: + release_queue = release.split("_X_", 1)[0] + "_X" + else: + release_queue = "_".join(release.split("_")[:3]) + "_X" + sdata = { + "release": release, + "release_queue": release_queue, + "architecture": arch, + "step": version, + "@timestamp": rel_msec, + "workflow": name, + "hostname": hostname, + "exit_code": exit_code, + } + average_stats = get_summary_stats_from_json_file(sfile, cpu_normalize) + sdata.update(average_stats) + if params: + sdata.update(params) + idx = sha1((release + arch + name + version + str(rel_sec)).encode()).hexdigest() + try: + send_payload(index + "-" + week, doc, idx, json.dumps(sdata)) + except Exception as e: + print(str(e)) + + +def es_send_external_stats( + stats_dict_file_path, + opts_dict_file_path, + cpu_normalize=1, + es_index_name="externals_stats_summary", + es_doc_name="externals-stats-summary", +): + file_stamp = int(tstat(stats_dict_file_path).st_mtime) # get the file stamp from the file + week = str((file_stamp / 86400 + 4) / 7) + with open(opts_dict_file_path, "r") as opts_dict_f: + opts_dict = json.load(opts_dict_f) + index_sha = sha1( + ("".join([str(x) for x in opts_dict.values()]) + stats_dict_file_path).encode() + ).hexdigest() + sdata = get_summary_stats_from_json_file(stats_dict_file_path, cpu_normalize) + sdata.update(opts_dict) + sdata["@timestamp"] = file_stamp * 1000 + try: + send_payload(es_index_name + "-" + week, es_doc_name, index_sha, json.dumps(sdata)) + except Exception as e: + print(str(e)) + + +def getExternalsESstats(arch="*", externalsList="", lastNdays=30, page_size=0, fields=None): + externals_names = "" + if externalsList == "": + externals_names = "*" + else: + externals_names = (" OR ").join(externalsList.split(",")) + stats = es_query( + index="externals_stats_summary-*", + query=format( + "architecture:%(architecture)s AND name.keyword:(%(names)s)", + architecture=arch, + names=externals_names, + ), + start_time=1000 * int(time() - (86400 * lastNdays)), + end_time=1000 * int(time()), + scroll=True, + fields=fields, + ) + return stats["hits"]["hits"] + # get a dict of stats with externals name as keys # create a default github file with stats so if elastic search fails, -def orderStatsByName(externalsStats=None): +def orderStatsByName(externalsStats=None): namedStats = {} for element in externalsStats: ext_name = element["_source"]["name"] @@ -311,50 +454,56 @@ def orderStatsByName(externalsStats=None): namedStats[ext] = sorted(namedStats[ext], key=lambda x: x["@timestamp"], reverse=True) return namedStats -def get_externals_build_stats(arch='*', lastNdays=30, cpus=0, memoryGB=0, default_key="90"): - default_keys= {"cpu": "cpu_" + default_key, "rss": "rss_" + default_key, "time": "time"} + +def get_externals_build_stats(arch="*", lastNdays=30, cpus=0, memoryGB=0, default_key="90"): + default_keys = {"cpu": "cpu_" + default_key, "rss": "rss_" + default_key, "time": "time"} all_data = {} - if cpus<=0: + if cpus <= 0: from cmsutils import MachineCPUCount - cpus=MachineCPUCount - if memoryGB<=0: + + cpus = MachineCPUCount + if memoryGB <= 0: from cmsutils import MachineMemoryGB, MachineCPUCount + memoryGB = MachineMemoryGB - mem=memoryGB*1024*1024*1024 + mem = memoryGB * 1024 * 1024 * 1024 fields = list(default_keys.values()) + ["name", "build_jobs"] items = getExternalsESstats(arch=arch, lastNdays=lastNdays, fields=fields) for item in items: - name=item['_source']['name'] - jobs=item['_source']['build_jobs'] + name = item["_source"]["name"] + jobs = item["_source"]["build_jobs"] if name not in all_data: all_data[name] = {} - for k in default_keys: all_data[name][k] = [] + for k in default_keys: + all_data[name][k] = [] for k in default_keys: xk = default_keys[k] - all_data[name][k].append(int(item['_source'][xk]*cpus/jobs)) + all_data[name][k].append(int(item["_source"][xk] * cpus / jobs)) default_res = 4 - if cpus<4: default_res=1 - elif cpus<8: default_res=2 - total_cpus = cpus*100 - data={"defaults": {"cpu": (50, total_cpus/default_res), - "rss": (int(mem/cpus), int(mem/default_res)), - "time": (1, 300) - }, - "resources":{"cpu": total_cpus, "rss": mem}, - "packages": {}, - "known": [("^.+-toolfile$", 0), - ("^data-.+$", 0), - ("^.+$", 1)] - } + if cpus < 4: + default_res = 1 + elif cpus < 8: + default_res = 2 + total_cpus = cpus * 100 + data = { + "defaults": { + "cpu": (50, total_cpus / default_res), + "rss": (int(mem / cpus), int(mem / default_res)), + "time": (1, 300), + }, + "resources": {"cpu": total_cpus, "rss": mem}, + "packages": {}, + "known": [("^.+-toolfile$", 0), ("^data-.+$", 0), ("^.+$", 1)], + } for name in all_data: - data["packages"][name] = {'cpu': 0, 'rss': 0, 'time': -1, "name": name} + data["packages"][name] = {"cpu": 0, "rss": 0, "time": -1, "name": name} for k in default_keys: if all_data[name][k]: - data["packages"][name][k] = int(sum(all_data[name][k])/len(all_data[name][k])) - #Default resources if no data found for a package - if data["packages"][name]['time']==-1: + data["packages"][name][k] = int(sum(all_data[name][k]) / len(all_data[name][k])) + # Default resources if no data found for a package + if data["packages"][name]["time"] == -1: idx = 1 for exp in data["known"]: if re.match(exp[0], name): @@ -362,18 +511,19 @@ def get_externals_build_stats(arch='*', lastNdays=30, cpus=0, memoryGB=0, defaul break for k in data["defaults"]: data["packages"][name][k] = data["defaults"][k][idx] - #for small package with build time 1 or less use min resources - elif data["packages"][name]['time']==0: + # for small package with build time 1 or less use min resources + elif data["packages"][name]["time"] == 0: for k in data["defaults"]: data["packages"][name][k] = data["defaults"][k][0] else: - #Make sure resources are not more than the total + # Make sure resources are not more than the total for k in data["defaults"]: - if k == "time": continue + if k == "time": + continue v = data["packages"][name][k] - if v>data["resources"][k]: + if v > data["resources"][k]: v = data["resources"][k] - elif v==0: + elif v == 0: v = data["defaults"][k][0] data["packages"][name][k] = v return data diff --git a/fix-backport-labels.py b/fix-backport-labels.py index 8a72fa14a241..8118d42b13e2 100755 --- a/fix-backport-labels.py +++ b/fix-backport-labels.py @@ -7,54 +7,79 @@ from socket import setdefaulttimeout from github_utils import api_rate_limits from cms_static import ISSUE_SEEN_MSG, CMSBUILD_GH_USER + setdefaulttimeout(120) import sys + SCRIPT_DIR = dirname(abspath(sys.argv[0])) parser = OptionParser(usage="%prog") -parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) -parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw") +parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, +) +parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default="cms-sw/cmssw", +) opts, args = parser.parse_args() -if len(args) != 0: parser.error("Too many/few arguments") +if len(args) != 0: + parser.error("Too many/few arguments") -repo_dir = join(SCRIPT_DIR,'repos',opts.repository.replace("-","_")) -if exists(join(repo_dir,"repo_config.py")): sys.path.insert(0,repo_dir) +repo_dir = join(SCRIPT_DIR, "repos", opts.repository.replace("-", "_")) +if exists(join(repo_dir, "repo_config.py")): + sys.path.insert(0, repo_dir) import repo_config from process_pr import get_backported_pr - + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) repo = gh.get_repo(opts.repository) -label = [ repo.get_label("backport") ] +label = [repo.get_label("backport")] issues = repo.get_issues(state="open", sort="updated", labels=label) - + for issue in issues: - if not issue.pull_request: continue - api_rate_limits(gh) - backport_pr=None - issue_body = issue.body.encode("ascii", "ignore").decode() if issue.body else "" - if (issue.user.login == CMSBUILD_GH_USER) and re.match(ISSUE_SEEN_MSG,issue_body.split("\n",1)[0].strip()): - backport_pr=get_backported_pr(issue_body) - else: - for comment in issue.get_comments(): - commenter = comment.user.login - comment_msg = comment.body.encode("ascii", "ignore").decode() - # The first line is an invariant. - comment_lines = [ l.strip() for l in comment_msg.split("\n") if l.strip() ] - first_line = comment_lines[0:1] - if not first_line: continue - first_line = first_line[0] - if (commenter == CMSBUILD_GH_USER) and re.match(ISSUE_SEEN_MSG, first_line): - backport_pr=get_backported_pr(comment_msg) - break - if backport_pr and re.match("^[1-9][0-9]+$",backport_pr): - print(issue.number, backport_pr) - try: - pr = repo.get_pull(int(backport_pr)) - print(" Backported PR merged:",pr.merged) - if pr.merged: - labels = list(set([x.name for x in issue.labels if x.name!="backport"]+["backport-ok"])) - if not opts.dryRun: issue.edit(labels=labels) - print(issue.number,"New Labels:",labels) - except Exception as e: - print(e) + if not issue.pull_request: + continue + api_rate_limits(gh) + backport_pr = None + issue_body = issue.body.encode("ascii", "ignore").decode() if issue.body else "" + if (issue.user.login == CMSBUILD_GH_USER) and re.match( + ISSUE_SEEN_MSG, issue_body.split("\n", 1)[0].strip() + ): + backport_pr = get_backported_pr(issue_body) + else: + for comment in issue.get_comments(): + commenter = comment.user.login + comment_msg = comment.body.encode("ascii", "ignore").decode() + # The first line is an invariant. + comment_lines = [l.strip() for l in comment_msg.split("\n") if l.strip()] + first_line = comment_lines[0:1] + if not first_line: + continue + first_line = first_line[0] + if (commenter == CMSBUILD_GH_USER) and re.match(ISSUE_SEEN_MSG, first_line): + backport_pr = get_backported_pr(comment_msg) + break + if backport_pr and re.match("^[1-9][0-9]+$", backport_pr): + print(issue.number, backport_pr) + try: + pr = repo.get_pull(int(backport_pr)) + print(" Backported PR merged:", pr.merged) + if pr.merged: + labels = list( + set([x.name for x in issue.labels if x.name != "backport"] + ["backport-ok"]) + ) + if not opts.dryRun: + issue.edit(labels=labels) + print(issue.number, "New Labels:", labels) + except Exception as e: + print(e) diff --git a/fix-igprof-sql.py b/fix-igprof-sql.py index 52f45662bf71..69bec804c759 100755 --- a/fix-igprof-sql.py +++ b/fix-igprof-sql.py @@ -3,32 +3,38 @@ import sys import re -unknown=0 +unknown = 0 + + def fix_file(line): - global unknown - m = re.match('^(\s*INSERT\s+INTO\s+files\s+VALUES\s+\((\d+),\s*["])([^"]*)(["].*$)',line) - if m: - xf = m.group(3) - if xf: - if xf[0]!="/": xf="unknown-"+m.group(2) - else: - unknown+=1 - xf="unknownfile-"+str(unknown) - line = m.group(1)+xf+m.group(4) - return line + global unknown + m = re.match('^(\s*INSERT\s+INTO\s+files\s+VALUES\s+\((\d+),\s*["])([^"]*)(["].*$)', line) + if m: + xf = m.group(3) + if xf: + if xf[0] != "/": + xf = "unknown-" + m.group(2) + else: + unknown += 1 + xf = "unknownfile-" + str(unknown) + line = m.group(1) + xf + m.group(4) + return line + -xline="" +xline = "" for line in open(sys.argv[1]).readlines(): - line=line.strip("\n") - if xline: - xline=xline+line - if line.endswith(");"): - line=fix_file(xline) - xline="" - else: continue - elif line.startswith('INSERT INTO files'): - if not line.endswith(");"): - xline=line - continue - else: line=fix_file(line) - print(line) + line = line.strip("\n") + if xline: + xline = xline + line + if line.endswith(");"): + line = fix_file(xline) + xline = "" + else: + continue + elif line.startswith("INSERT INTO files"): + if not line.endswith(");"): + xline = line + continue + else: + line = fix_file(line) + print(line) diff --git a/forward-pull-requests b/forward-pull-requests deleted file mode 100755 index 998dc314ff21..000000000000 --- a/forward-pull-requests +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python3 - -# Given a branch, gets all of its pull requests -# and recreates them in a different branch. - -from __future__ import print_function -from github import Github, GithubException -from sys import exit -from os.path import expanduser -from argparse import ArgumentParser -from datetime import datetime -from _py2with3compatibility import Request, urlopen -from time import sleep -from cms_static import GH_CMSSW_ORGANIZATION as gh_user -from cms_static import GH_CMSSW_REPO as gh_cmssw -from github_utils import get_ported_PRs -from socket import setdefaulttimeout -setdefaulttimeout(120) -import json - -if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument("-s", "--source") - parser.add_argument("-d", "--dest") - parser.add_argument("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default=gh_user+"/"+gh_cmssw) - parser.add_argument("--since", dest="since", default=None) - parser.add_argument("-n", "-dry-run", dest="dryRun", default=False, action="store_true") - parser.add_argument("pulls", nargs="*", type=int) - args = parser.parse_args() - - print(args) - if args.source == args.dest: - print("Source and destination branches are same") - exit(1) - elif (not args.source) or (not args.dest): - print("Missing source or destination branch") - exit(1) - - since = datetime (2000,1,1) - if args.since: - since = datetime.strptime(args.since, '%Y-%m-%dT%H:%M') - GH_TOKEN = open(expanduser("~/.github-token")).read().strip() - gh = Github(login_or_token=GH_TOKEN) - - try: - gh_repo = gh.get_repo(args.repository) - except: - print("Could not find repository.") - exit(1) - - gh_repo.get_branch(args.source) - gh_repo.get_branch(args.dest) - - pulls = args.pulls or gh_repo.get_pulls(base=args.source, state="open") - - done_prs_id = get_ported_PRs(gh_repo, args.source, args.dest) - for pr in pulls: - # If we just have the numeric Id, let's get the associated issue. - if type(pr) == int: - pr = gh_repo.get_pull(pr) - print("Checking ",pr.number) - if pr.number in done_prs_id: - print("Already ported as #",done_prs_id[pr.number]) - continue - if pr.created_at < since: - print("Older than ",args.since) - continue - print(pr.number, pr.head.user.login, pr.head.ref, pr.created_at) - newBody = pr.body + "\nAutomatically ported from " + args.source + " #%s (original by @%s).\nPlease wait for a new IB (12 to 24H) before requesting to test this PR." % (pr.number, str(pr.head.user.login)) - try: - newHead = "%s:%s" % (pr.head.user.login, pr.head.ref) - print('-----') - print("Porting %s" % pr.number) - print(pr.title) - print(newBody) - print(args.dest) - print(newHead) - print('---') - if args.dryRun: - print('ATTENTION: Not creating new PR on Github (dry-run)') - continue - params = { - "title": pr.title, - "body": newBody, - "head": newHead, - "base": args.dest, - "maintainer_can_modify": False - } - request = Request("https://api.github.com/repos/cms-sw/cmssw/pulls", - headers={"Authorization" : "token " + GH_TOKEN }) - request.get_method = lambda: 'POST' - newPR = json.loads(urlopen( request, json.dumps(params).encode()).read()) - print("New PR number", newPR['number']) - sleep(15) - except GithubException as e: - print("Error while processing: ", pr.number) - print(e) - continue diff --git a/forward-pull-requests b/forward-pull-requests new file mode 120000 index 000000000000..62a15a3aafc8 --- /dev/null +++ b/forward-pull-requests @@ -0,0 +1 @@ +forward-pull-requests.py \ No newline at end of file diff --git a/forward-pull-requests.py b/forward-pull-requests.py new file mode 100755 index 000000000000..55b14d3ad636 --- /dev/null +++ b/forward-pull-requests.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 + +# Given a branch, gets all of its pull requests +# and recreates them in a different branch. + +from __future__ import print_function +from github import Github, GithubException +from sys import exit +from os.path import expanduser +from argparse import ArgumentParser +from datetime import datetime +from _py2with3compatibility import Request, urlopen +from time import sleep +from cms_static import GH_CMSSW_ORGANIZATION as gh_user +from cms_static import GH_CMSSW_REPO as gh_cmssw +from github_utils import get_ported_PRs +from socket import setdefaulttimeout + +setdefaulttimeout(120) +import json + +if __name__ == "__main__": + parser = ArgumentParser() + parser.add_argument("-s", "--source") + parser.add_argument("-d", "--dest") + parser.add_argument( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default=gh_user + "/" + gh_cmssw, + ) + parser.add_argument("--since", dest="since", default=None) + parser.add_argument("-n", "-dry-run", dest="dryRun", default=False, action="store_true") + parser.add_argument("pulls", nargs="*", type=int) + args = parser.parse_args() + + print(args) + if args.source == args.dest: + print("Source and destination branches are same") + exit(1) + elif (not args.source) or (not args.dest): + print("Missing source or destination branch") + exit(1) + + since = datetime(2000, 1, 1) + if args.since: + since = datetime.strptime(args.since, "%Y-%m-%dT%H:%M") + GH_TOKEN = open(expanduser("~/.github-token")).read().strip() + gh = Github(login_or_token=GH_TOKEN) + + try: + gh_repo = gh.get_repo(args.repository) + except: + print("Could not find repository.") + exit(1) + + gh_repo.get_branch(args.source) + gh_repo.get_branch(args.dest) + + pulls = args.pulls or gh_repo.get_pulls(base=args.source, state="open") + + done_prs_id = get_ported_PRs(gh_repo, args.source, args.dest) + for pr in pulls: + # If we just have the numeric Id, let's get the associated issue. + if type(pr) == int: + pr = gh_repo.get_pull(pr) + print("Checking ", pr.number) + if pr.number in done_prs_id: + print("Already ported as #", done_prs_id[pr.number]) + continue + if pr.created_at < since: + print("Older than ", args.since) + continue + print(pr.number, pr.head.user.login, pr.head.ref, pr.created_at) + newBody = ( + pr.body + + "\nAutomatically ported from " + + args.source + + " #%s (original by @%s).\nPlease wait for a new IB (12 to 24H) before requesting to test this PR." + % (pr.number, str(pr.head.user.login)) + ) + try: + newHead = "%s:%s" % (pr.head.user.login, pr.head.ref) + print("-----") + print("Porting %s" % pr.number) + print(pr.title) + print(newBody) + print(args.dest) + print(newHead) + print("---") + if args.dryRun: + print("ATTENTION: Not creating new PR on Github (dry-run)") + continue + params = { + "title": pr.title, + "body": newBody, + "head": newHead, + "base": args.dest, + "maintainer_can_modify": False, + } + request = Request( + "https://api.github.com/repos/cms-sw/cmssw/pulls", + headers={"Authorization": "token " + GH_TOKEN}, + ) + request.get_method = lambda: "POST" + newPR = json.loads(urlopen(request, json.dumps(params).encode()).read()) + print("New PR number", newPR["number"]) + sleep(15) + except GithubException as e: + print("Error while processing: ", pr.number) + print(e) + continue diff --git a/forward_ports_map.py b/forward_ports_map.py index d59367553b9a..1ce2a7d07927 100644 --- a/forward_ports_map.py +++ b/forward_ports_map.py @@ -1,93 +1,91 @@ -#CMS GIT Repositories automatic forward port map -#FORMAT: -#GIT_REPO_FWPORTS[repo][source-branch]=[destination-branch[:strategy]] -#e.g -#GIT_REPO_FWPORTS["cmssw"]["CMSSW_7_6_X"]=["CMSSW_7_6_ROOT64_X", "CMSSW_8_0_X:ours"] +# CMS GIT Repositories automatic forward port map +# FORMAT: +# GIT_REPO_FWPORTS[repo][source-branch]=[destination-branch[:strategy]] +# e.g +# GIT_REPO_FWPORTS["cmssw"]["CMSSW_7_6_X"]=["CMSSW_7_6_ROOT64_X", "CMSSW_8_0_X:ours"] from releases import CMSSW_DEVEL_BRANCH -GIT_REPO_FWPORTS = {"cmsdist" : {},"cmssw" : {}} +GIT_REPO_FWPORTS = {"cmsdist": {}, "cmssw": {}} -#Added explicitly by Zygimantas Matonis -GIT_REPO_FWPORTS["cms-sw.github.io"] = { - "code": ["master"] -} +# Added explicitly by Zygimantas Matonis +GIT_REPO_FWPORTS["cms-sw.github.io"] = {"code": ["master"]} -#Forward port master branch to latest dev branch -#Master branch is always forward ported to one branch. -GIT_REPO_FWPORTS["cmssw"]["master"]=[CMSSW_DEVEL_BRANCH] +# Forward port master branch to latest dev branch +# Master branch is always forward ported to one branch. +GIT_REPO_FWPORTS["cmssw"]["master"] = [CMSSW_DEVEL_BRANCH] -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_7_1_X/stable"]=[] +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_7_1_X/stable"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_7_1_X/stable"].append("IB/CMSSW_7_1_X/pythia240") -GIT_REPO_FWPORTS["cmssw"]["CMSSW_7_1_X"]=[] +GIT_REPO_FWPORTS["cmssw"]["CMSSW_7_1_X"] = [] GIT_REPO_FWPORTS["cmssw"]["CMSSW_7_1_X"].append("CMSSW_7_1_PYTHIA240_X") -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_9_1_X/gcc530"]=[] +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_9_1_X/gcc530"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_9_1_X/gcc530"].append("IB/CMSSW_9_1_X/gcc630") -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_9_2_X/gcc530"]=[] +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_9_2_X/gcc530"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_9_2_X/gcc530"].append("IB/CMSSW_9_2_X/gcc630") -#Added explicitly by Shahzad MUZAFFAR -GIT_REPO_FWPORTS["cmssw"]["CMSSW_9_4_X"]=[] +# Added explicitly by Shahzad MUZAFFAR +GIT_REPO_FWPORTS["cmssw"]["CMSSW_9_4_X"] = [] GIT_REPO_FWPORTS["cmssw"]["CMSSW_9_4_X"].append("CMSSW_9_4_MAOD_X") GIT_REPO_FWPORTS["cmssw"]["CMSSW_9_4_X"].append("CMSSW_9_4_AN_X") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_10_1_X/gcc630"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_10_1_X/gcc630"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_10_1_X/gcc630"].append("IB/CMSSW_10_1_X/gcc700") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_0_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_0_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_0_X/master"].append("IB/CMSSW_11_0_X/cc8") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_1_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_1_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_1_X/master"].append("IB/CMSSW_11_1_X/cc8") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_2_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_2_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_2_X/master"].append("IB/CMSSW_11_2_X/cc8") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_3_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_3_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_11_3_X/master"].append("IB/CMSSW_11_3_X/cc8") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_0_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_0_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_0_X/master"].append("IB/CMSSW_12_0_X/cc8") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_1_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_1_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_1_X/master"].append("IB/CMSSW_12_1_X/cc8") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_2_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_2_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_2_X/master"].append("IB/CMSSW_12_2_X/cc8") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_5_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_5_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_5_X/master"].append("IB/CMSSW_12_5_X/g11") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_6_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_6_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_12_6_X/master"].append("IB/CMSSW_12_6_X/g11") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_0_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_0_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_0_X/master"].append("IB/CMSSW_13_0_X/cs9") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_1_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_1_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_1_X/master"].append("IB/CMSSW_13_1_X/cs9") -#Automatically added -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_2_X/master"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_2_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_2_X/master"].append("IB/CMSSW_13_2_X/cs9") -#Automatically added -GIT_REPO_FWPORTS["cmssw"]["CMSSW_13_3_X"]=[] +# Automatically added +GIT_REPO_FWPORTS["cmssw"]["CMSSW_13_3_X"] = [] GIT_REPO_FWPORTS["cmssw"]["CMSSW_13_3_X"].append("CMSSW_13_3_DEVEL_X") -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/master"]=[] +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/master"] = [] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/master"].append("IB/CMSSW_13_3_X/devel") GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/master"].append("IB/CMSSW_13_3_X/g11") GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/master"].append("IB/CMSSW_13_3_X/root628") @@ -100,7 +98,7 @@ GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/master"].append("IB/CMSSW_13_3_X/cudart") GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/master"].append("IB/CMSSW_13_3_X/g13") GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/master"].append("IB/CMSSW_13_3_X/g12_cpp20") -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/rootmaster"]=["IB/CMSSW_13_3_X/rootmodule"] -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/g4"]=["IB/CMSSW_13_3_X/g4_vecgeom"] -GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/root630"]=["IB/CMSSW_13_3_X/g13"] +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/rootmaster"] = ["IB/CMSSW_13_3_X/rootmodule"] +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/g4"] = ["IB/CMSSW_13_3_X/g4_vecgeom"] +GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/root630"] = ["IB/CMSSW_13_3_X/g13"] GIT_REPO_FWPORTS["cmsdist"]["IB/CMSSW_13_3_X/root630"].append("IB/CMSSW_13_3_X/g12_cpp20") diff --git a/gen-relval-jobs.py b/gen-relval-jobs.py index 3ad2ca47a494..fcbabb9f209a 100755 --- a/gen-relval-jobs.py +++ b/gen-relval-jobs.py @@ -14,52 +14,58 @@ from RelValArgs import GetMatrixOptions from runPyRelValThread import PyRelValsThread, splitWorkflows from cmsutils import doCmd + print("Diving workflows") workdir = sys.argv[1] RelValtimes = sys.argv[2] print(RelValtimes) try: - max_wf=int(sys.argv[3]) + max_wf = int(sys.argv[3]) except: - max_wf=100 + max_wf = 100 relval_args = GetMatrixOptions(environ["CMSSW_VERSION"], environ["SCRAM_ARCH"]) -if 'RELVAL_WORKFLOWS' in environ: relval_args=relval_args+' '+environ["RELVAL_WORKFLOWS"] -matrix = PyRelValsThread(1,environ["CMSSW_BASE"]) +if "RELVAL_WORKFLOWS" in environ: + relval_args = relval_args + " " + environ["RELVAL_WORKFLOWS"] +matrix = PyRelValsThread(1, environ["CMSSW_BASE"]) workflows = matrix.getWorkFlows(relval_args) if exists(RelValtimes): - owf = [] - max_tm=0 - with open(RelValtimes) as json_file: - try: json_data = json.load(json_file) - except: - print("Error reading RelVal Times") - json_data={"avg": []} - for tm_str in sorted(json_data["avg"],key=int, reverse=True): - tm=int(tm_str) - if tm > max_tm : max_tm=tm - for wf in json_data["avg"][tm_str]: - if wf in workflows: owf.append([wf,tm]) - uwf = [] - owfs = [ x[0] for x in owf ] - for wf in workflows: - if not wf in owfs: uwf.append([wf,max_tm]) - workflows = uwf + owf + owf = [] + max_tm = 0 + with open(RelValtimes) as json_file: + try: + json_data = json.load(json_file) + except: + print("Error reading RelVal Times") + json_data = {"avg": []} + for tm_str in sorted(json_data["avg"], key=int, reverse=True): + tm = int(tm_str) + if tm > max_tm: + max_tm = tm + for wf in json_data["avg"][tm_str]: + if wf in workflows: + owf.append([wf, tm]) + uwf = [] + owfs = [x[0] for x in owf] + for wf in workflows: + if not wf in owfs: + uwf.append([wf, max_tm]) + workflows = uwf + owf if workflows: - workflows = splitWorkflows(workflows, max_wf) - print(workflows) - on_grid = 0 - #if '_DEVEL_X' in environ['CMSSW_VERSION']: - # on_grid = 2 - total = len(workflows) - try: - for i in range(1, total+1): - wf=",".join(workflows[i-1]) - jobid = str(i)+"of"+str(total) - jobfile = workdir+"/ib-run-relval-"+jobid - doCmd("echo WORKFLOWS="+wf+" >"+jobfile) - doCmd("echo JOBID="+jobid+" >>"+jobfile) - if on_grid>0: - doCmd("echo 'SLAVE_LABELS=(condor&&cpu-8)' >>"+jobfile) - on_grid=on_grid-1 - except Exception as e: - print("Error " , e) + workflows = splitWorkflows(workflows, max_wf) + print(workflows) + on_grid = 0 + # if '_DEVEL_X' in environ['CMSSW_VERSION']: + # on_grid = 2 + total = len(workflows) + try: + for i in range(1, total + 1): + wf = ",".join(workflows[i - 1]) + jobid = str(i) + "of" + str(total) + jobfile = workdir + "/ib-run-relval-" + jobid + doCmd("echo WORKFLOWS=" + wf + " >" + jobfile) + doCmd("echo JOBID=" + jobid + " >>" + jobfile) + if on_grid > 0: + doCmd("echo 'SLAVE_LABELS=(condor&&cpu-8)' >>" + jobfile) + on_grid = on_grid - 1 + except Exception as e: + print("Error ", e) diff --git a/generate-categories-json b/generate-categories-json deleted file mode 100755 index 7ed7fe47e2d3..000000000000 --- a/generate-categories-json +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function -from categories import CMSSW_CATEGORIES, CMSSW_L2, CMSSW_L1 -import json -# Generates a json file sumarizing the categories, their packages, and conveners -# it asumes that categories.py from https://raw.githubusercontent.com/cms-sw/cms-bot/HEAD/categories.py -# is already downloaded - -# ------------------------------------------------------------------------------ -# Global Variables -# ----------------------------------------------------------------------------- -OUTPUT_FILE = 'categories.json' - -# ------------------------------------------------------------------------------ -# Start of execution -# ----------------------------------------------------------------------------- - -all_categories = list(CMSSW_CATEGORIES.keys()) -# schema of categories_to_people: -# { -# "" : [ "" , "person2" , ... , "personN" ] -# } -categories_to_people = {} - -for person in list(CMSSW_L2.keys()): - categories = CMSSW_L2[ person ] - for cat in categories: - if not categories_to_people.get( cat ): - categories_to_people[ cat ] = [] - categories_to_people[ cat ].append( person ) - -print('----------------') -print(categories_to_people) - -output = {} -output[ 'people_to_categories' ] = CMSSW_L2 -output[ 'categories_to_people' ] = categories_to_people -output[ 'categories_to_packages' ] = CMSSW_CATEGORIES -output[ 'L1' ] = CMSSW_L1 - -out_json = open( OUTPUT_FILE , "w" ) -json.dump( output , out_json , indent=4 ) -out_json.close() diff --git a/generate-categories-json b/generate-categories-json new file mode 120000 index 000000000000..016e91702ad7 --- /dev/null +++ b/generate-categories-json @@ -0,0 +1 @@ +generate-categories-json.py \ No newline at end of file diff --git a/generate-categories-json.py b/generate-categories-json.py new file mode 100755 index 000000000000..aef9c50ee58d --- /dev/null +++ b/generate-categories-json.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python + +from __future__ import print_function +from categories import CMSSW_CATEGORIES, CMSSW_L2, CMSSW_L1 +import json + +# Generates a json file sumarizing the categories, their packages, and conveners +# it asumes that categories.py from https://raw.githubusercontent.com/cms-sw/cms-bot/HEAD/categories.py +# is already downloaded + +# ------------------------------------------------------------------------------ +# Global Variables +# ----------------------------------------------------------------------------- +OUTPUT_FILE = "categories.json" + +# ------------------------------------------------------------------------------ +# Start of execution +# ----------------------------------------------------------------------------- + +all_categories = list(CMSSW_CATEGORIES.keys()) +# schema of categories_to_people: +# { +# "" : [ "" , "person2" , ... , "personN" ] +# } +categories_to_people = {} + +for person in list(CMSSW_L2.keys()): + categories = CMSSW_L2[person] + for cat in categories: + if not categories_to_people.get(cat): + categories_to_people[cat] = [] + categories_to_people[cat].append(person) + +print("----------------") +print(categories_to_people) + +output = {} +output["people_to_categories"] = CMSSW_L2 +output["categories_to_people"] = categories_to_people +output["categories_to_packages"] = CMSSW_CATEGORIES +output["L1"] = CMSSW_L1 + +out_json = open(OUTPUT_FILE, "w") +json.dump(output, out_json, indent=4) +out_json.close() diff --git a/generate-json-performance-charts b/generate-json-performance-charts deleted file mode 100755 index 4707f69b7398..000000000000 --- a/generate-json-performance-charts +++ /dev/null @@ -1,160 +0,0 @@ -#! /usr/bin/env python -from __future__ import print_function -from optparse import OptionParser -from os import listdir -from os import path -import re -import json - -#------------------------------------------------------------------------------------------------------------ -# This script reads a list of the workflows, steps and parameters for which you want to see the graphs -# It generates a json file with the correct structure and links to each graph, this json file is used -# to create the visualization -#------------------------------------------------------------------------------------------------------------ - -def get_wfs_ordered(base_dir): - workflows={} - check=re.compile("^[0-9]+.") - for wf in listdir(base_dir): - if check.match(wf): - wf_number = float(re.sub('_.*$', '', wf)) - workflows[wf_number]=wf - return [ workflows[wf_number] for wf_number in sorted(workflows.keys()) ] - - -def add_images_to_step(wf,step): - imgs = [] - for img_name in listdir('%s/%s/%s' % (BASE_DIR,wf['wf_name'],step['step_name'])): - if (img_name in RESULT_FILE_NAMES ): - img = {} - img['name'] = img_name - img['url'] = '%s/%s/%s/%s' % (BASE_URL,wf['wf_name'],step['step_name'],img['name']) - imgs.append(img) - print(img['name']) - step['imgs'] = imgs - -def add_steps_to_wf(wf): - steps = [] - for step_name in sorted(listdir('%s/%s' % (BASE_DIR,wf['wf_name']))): - if path.isdir('%s/%s/%s'% (BASE_DIR,wf['wf_name'],step_name) ): - step = {} - step['step_name'] = step_name - add_images_to_step(wf,step) - steps.append(step) - print(step_name) - wf['steps'] = steps - - -def get_workflows(): - workflows = [] - for wf_name in get_wfs_ordered(BASE_DIR): - if path.isdir('%s/%s/'% (BASE_DIR,wf_name) ) and not 'bootstrap' in wf_name: - print('Adding %s' % wf_name) - wf = {} - wf['wf_name'] = wf_name - add_steps_to_wf(wf) - workflows.append(wf) - print() - return workflows - -def print_workflows(wfs): - for wf in wfs: - print(wf['wf_name']) - for step in wf['steps']: - print('\t %s' % step['step_name']) - for img in step['imgs']: - print(img) - -def add_workflow(results,wf_name): - for wf in results['wfs']: - if wf['wf_name'] == wf_name: - return wf - - new_wf = {} - new_wf['wf_name'] = wf_name - results['wfs'].append(new_wf) - return new_wf - -def add_step(workflow,step_name): - - if not workflow.get('steps'): - workflow['steps'] = [] - - for step in workflow['steps']: - if step['step_name'] == step_name: - return step - - new_step = {} - new_step['step_name'] = step_name - workflow['steps'].append(new_step) - return new_step - -def add_param(step,param_name): - - if not step.get('imgs'): - step['imgs'] = [] - - for p in step['imgs']: - if p['name'] == param_name: - return p - - new_param = {} - new_param['name'] = param_name - step['imgs'].append(new_param) - return new_param - -def add_url_to_param(workflow,step,param): - step_number = step['step_name'].split('_')[0] - url = BASE_URL.replace('WORKFLOW',workflow['wf_name']).replace('STEP',step_number).replace('PARAM',param['name']) - url = url.replace('+','%2B') - print(url) - param['url'] = url - -#----------------------------------------------------------------------------------- -#---- Parser Options -#----------------------------------------------------------------------------------- -parser = OptionParser(usage="usage: %prog PLOTS_LIST \n PLOTS_LIST list of plots that you want to visualize") - -(options, args) = parser.parse_args() - -#----------------------------------------------------------------------------------- -#---- Start -#----------------------------------------------------------------------------------- - -if (len(args)<1): - print('you need to specify a list of plots') - parser.print_help() - exit() - -WF_LIST = args[0] - -GRAPH_PARAMS = '&from=-15days&fontBold=true&fontSize=12&lineWidth=5&title=PARAM&yMin=0' - -BASE_URL = 'https://cmsgraph.cern.ch/render?target=IBRelVals.slc6_amd64_gcc481.CMSSW_7_1_X.WORKFLOW.STEP.PARAM&height=800&width=800%s'%GRAPH_PARAMS - -result = {} - -lines = open(WF_LIST, "r").readlines() - -result['wfs'] = [] - -for l in lines: - if l.startswith("#"): - continue - else: - l = l.replace('\n','') - parts = l.split(' ') - wf_name = parts[0] - step_name = parts[1] - param_name = parts[2] - - workflow = add_workflow(result,wf_name) - step = add_step(workflow,step_name) - param = add_param(step,param_name) - add_url_to_param(workflow,step,param) - -print(result) - -out_json = open("plots_summary.json", "w") -json.dump(result,out_json,indent=4) -out_json.close() diff --git a/generate-json-performance-charts b/generate-json-performance-charts new file mode 120000 index 000000000000..70834609419a --- /dev/null +++ b/generate-json-performance-charts @@ -0,0 +1 @@ +generate-json-performance-charts.py \ No newline at end of file diff --git a/generate-json-performance-charts.py b/generate-json-performance-charts.py new file mode 100755 index 000000000000..69ee6642a99f --- /dev/null +++ b/generate-json-performance-charts.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python +from __future__ import print_function +from optparse import OptionParser +from os import listdir +from os import path +import re +import json + +# ------------------------------------------------------------------------------------------------------------ +# This script reads a list of the workflows, steps and parameters for which you want to see the graphs +# It generates a json file with the correct structure and links to each graph, this json file is used +# to create the visualization +# ------------------------------------------------------------------------------------------------------------ + + +def get_wfs_ordered(base_dir): + workflows = {} + check = re.compile("^[0-9]+.") + for wf in listdir(base_dir): + if check.match(wf): + wf_number = float(re.sub("_.*$", "", wf)) + workflows[wf_number] = wf + return [workflows[wf_number] for wf_number in sorted(workflows.keys())] + + +def add_images_to_step(wf, step): + imgs = [] + for img_name in listdir("%s/%s/%s" % (BASE_DIR, wf["wf_name"], step["step_name"])): + if img_name in RESULT_FILE_NAMES: + img = {} + img["name"] = img_name + img["url"] = "%s/%s/%s/%s" % (BASE_URL, wf["wf_name"], step["step_name"], img["name"]) + imgs.append(img) + print(img["name"]) + step["imgs"] = imgs + + +def add_steps_to_wf(wf): + steps = [] + for step_name in sorted(listdir("%s/%s" % (BASE_DIR, wf["wf_name"]))): + if path.isdir("%s/%s/%s" % (BASE_DIR, wf["wf_name"], step_name)): + step = {} + step["step_name"] = step_name + add_images_to_step(wf, step) + steps.append(step) + print(step_name) + wf["steps"] = steps + + +def get_workflows(): + workflows = [] + for wf_name in get_wfs_ordered(BASE_DIR): + if path.isdir("%s/%s/" % (BASE_DIR, wf_name)) and not "bootstrap" in wf_name: + print("Adding %s" % wf_name) + wf = {} + wf["wf_name"] = wf_name + add_steps_to_wf(wf) + workflows.append(wf) + print() + return workflows + + +def print_workflows(wfs): + for wf in wfs: + print(wf["wf_name"]) + for step in wf["steps"]: + print("\t %s" % step["step_name"]) + for img in step["imgs"]: + print(img) + + +def add_workflow(results, wf_name): + for wf in results["wfs"]: + if wf["wf_name"] == wf_name: + return wf + + new_wf = {} + new_wf["wf_name"] = wf_name + results["wfs"].append(new_wf) + return new_wf + + +def add_step(workflow, step_name): + if not workflow.get("steps"): + workflow["steps"] = [] + + for step in workflow["steps"]: + if step["step_name"] == step_name: + return step + + new_step = {} + new_step["step_name"] = step_name + workflow["steps"].append(new_step) + return new_step + + +def add_param(step, param_name): + if not step.get("imgs"): + step["imgs"] = [] + + for p in step["imgs"]: + if p["name"] == param_name: + return p + + new_param = {} + new_param["name"] = param_name + step["imgs"].append(new_param) + return new_param + + +def add_url_to_param(workflow, step, param): + step_number = step["step_name"].split("_")[0] + url = ( + BASE_URL.replace("WORKFLOW", workflow["wf_name"]) + .replace("STEP", step_number) + .replace("PARAM", param["name"]) + ) + url = url.replace("+", "%2B") + print(url) + param["url"] = url + + +# ----------------------------------------------------------------------------------- +# ---- Parser Options +# ----------------------------------------------------------------------------------- +parser = OptionParser( + usage="usage: %prog PLOTS_LIST \n PLOTS_LIST list of plots that you want to visualize" +) + +(options, args) = parser.parse_args() + +# ----------------------------------------------------------------------------------- +# ---- Start +# ----------------------------------------------------------------------------------- + +if len(args) < 1: + print("you need to specify a list of plots") + parser.print_help() + exit() + +WF_LIST = args[0] + +GRAPH_PARAMS = "&from=-15days&fontBold=true&fontSize=12&lineWidth=5&title=PARAM&yMin=0" + +BASE_URL = ( + "https://cmsgraph.cern.ch/render?target=IBRelVals.slc6_amd64_gcc481.CMSSW_7_1_X.WORKFLOW.STEP.PARAM&height=800&width=800%s" + % GRAPH_PARAMS +) + +result = {} + +lines = open(WF_LIST, "r").readlines() + +result["wfs"] = [] + +for l in lines: + if l.startswith("#"): + continue + else: + l = l.replace("\n", "") + parts = l.split(" ") + wf_name = parts[0] + step_name = parts[1] + param_name = parts[2] + + workflow = add_workflow(result, wf_name) + step = add_step(workflow, step_name) + param = add_param(step, param_name) + add_url_to_param(workflow, step, param) + +print(result) + +out_json = open("plots_summary.json", "w") +json.dump(result, out_json, indent=4) +out_json.close() diff --git a/generate-performance-summary b/generate-performance-summary deleted file mode 100755 index b131dced52c9..000000000000 --- a/generate-performance-summary +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -from _py2with3compatibility import run_cmd -from os.path import dirname -from xml.sax import parseString, ContentHandler -import argparse -import re -from sys import exit -import sys -import time -import pickle -import struct -import socket -from datetime import date, timedelta - -CARBON_SERVER = '0.0.0.0' -CARBON_PORT = 2004 - -class JobReportHandler(ContentHandler): - def __init__(self, what, step, architecture, release, workflow, timestamp): - ContentHandler.__init__(self) - self.counters = dict((k, "") for k in what) - self.step = step - self.architecture = architecture - self.release = release - self.workflow = workflow - self.timestamp = timestamp - self.metrics = [] - - def startElement(self, name, attrs): - if name != "Metric": - return - - if not attrs["Name"] in self.counters: - return - if "nan" in attrs["Value"]: - return - - path = ".".join(["IBRelVals", self.architecture, self.release, self.workflow, self.step, attrs["Name"]]) - value = attrs["Value"] - timestamp = time.mktime(self.timestamp) - self.metrics.append((path, (timestamp, value))) - self.counters[attrs["Name"]] = attrs["Value"] - -class SchemaDumper(ContentHandler): - def __init__(self, schema): - ContentHandler.__init__(self) - self.schema = schema - - def startElement(self, name, attrs): - if name != "Metric": - return - self.schema.add(attrs["Name"]) - -IB_BASE_DIR="/afs/cern.ch/cms/sw/ReleaseCandidates" - -def chunks(l, n): - for i in range(0, len(l), n): - yield l[i:i+n] - -def format(s, **kwds): - return s % kwds - -# 100 metrics at the time -def sendMetrics(metrics, server, port): - for l in chunks(metrics, 100): - payload = pickle.dumps(l) - print(len(payload)) - header = struct.pack("!L", len(payload)) - message = header + payload - sock = socket.socket() - sock.connect((server, port)) - sock.sendall(message) - sock.close() - time.sleep(0.5) - -def calculateFileSizeMetrics(release, architecture, timestamp, fullRelease, args): - timestamp = time.mktime(timestamp) - cmd = format("find %(base)s/vol*/%(architecture)s/cms/cmssw*/%(fullRelease)s/lib/%(architecture)s -name '*.so' -exec wc -c {} \; | sed -e 's|/.*/||;s|[.]so||'", - base=IB_BASE_DIR, - releasePath=releasePath, - fullRelease=fullRelease, - architecture=architecture) - error, out = run_cmd(format(cmd)) - if error: - return - if not out.strip(): - return - metrics = [] - for line in out.split("\n"): - size, library = line.split(" ", 1) - metric = ".".join(["IBStats", architecture, release, library, "FileSize"]) - metrics.append((metric, (timestamp, size))) - sendMetrics(metrics, args.server, args.port) - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Extract plot data from IB job reports') - parser.add_argument('--base-dir', dest='baseDir', default=IB_BASE_DIR, - help='Where the logs are located.') - parser.add_argument('--server', dest='server', default=CARBON_SERVER, - help='Where the logs are located.') - parser.add_argument('--port', dest='port', default=CARBON_PORT, - help='Where the logs are located.') - parser.add_argument('--filter-release', dest='filterRelease', default=".*", - help='regexp to filter releases') - parser.add_argument('--filter-workflows', dest='filterWorkflows', default=".*", - help='regexp to filter releases') - parser.add_argument('--days', dest="days", default=7, type=int, help="days to go into the past.") - parser.add_argument('what', metavar='KEYS', type=str, nargs='*', - help='What to dump from the logs') - args = parser.parse_args() - - print("Parsing files", file=sys.stderr) - cmd = "find %s/slc* -name 'pyRelValMatrixLogs.zip' | sort -r" % IB_BASE_DIR - print(cmd) - error, files = run_cmd(cmd) - files = [x for x in files.split("\n") if x] - schema = set() - beginning = (date.today() - timedelta(args.days)).timetuple() - for f in files: - print(f, file=sys.stderr) - releasePath = dirname(f) - architecture = re.sub(".*/((slc|osx|fc)[^/]*)/.*", "\\1", f) - fullRelease = re.sub(".*/(CMSSW_[^/]*)/.*", "\\1", f) - release = re.sub(".*/(CMSSW_[^/]*)/.*", "\\1", f) - # Note for a future maintainer, remember to fix it by year 2100. - date = re.sub(".*/CMSSW_[^/]*(20[0-9][0-9]-[0-1][0-9]-[0-3][0-9]-[0-2][0-9][0-9][0-9]).*", "\\1", f) - release = release.replace(date,"").strip("_") - timestamp = time.strptime(date, "%Y-%m-%d-%H%M") - if timestamp < beginning: - continue - if not re.match(args.filterRelease, release): - continue - error, reports = run_cmd("unzip -l %s | grep JobReport | awk '{print $4}'" % f) - calculateFileSizeMetrics(release, architecture, timestamp, fullRelease, args) - - metrics = [] - for r in [x for x in reports.split("\n") if x]: - cmd = "unzip -p %s %s" % (f, r) - error, report = run_cmd(cmd) - workflow = re.sub("^([^/]*).*", "\\1", r).replace(".","_") - if not re.match(args.filterWorkflows, workflow): - continue - step = re.sub(".*JobReport([0-9]).*", "step\\1", r) - if not args.what: - handler = SchemaDumper(schema) - else: - handler = JobReportHandler(args.what, step, architecture, release, workflow, timestamp) - try: - parseString(report, handler) - except: - continue - metrics += handler.metrics - if schema: - print("\n".join(sorted(schema))) - exit(0) - if not len(metrics): - continue - print("Sending %s metrics." % len(metrics)) - sendMetrics(metrics, args.server, args.port) diff --git a/generate-performance-summary b/generate-performance-summary new file mode 120000 index 000000000000..459930bb261e --- /dev/null +++ b/generate-performance-summary @@ -0,0 +1 @@ +generate-performance-summary.py \ No newline at end of file diff --git a/generate-performance-summary.py b/generate-performance-summary.py new file mode 100755 index 000000000000..224bd6f56df0 --- /dev/null +++ b/generate-performance-summary.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python +from __future__ import print_function +from _py2with3compatibility import run_cmd +from os.path import dirname +from xml.sax import parseString, ContentHandler +import argparse +import re +from sys import exit +import sys +import time +import pickle +import struct +import socket +from datetime import date, timedelta + +CARBON_SERVER = "0.0.0.0" +CARBON_PORT = 2004 + + +class JobReportHandler(ContentHandler): + def __init__(self, what, step, architecture, release, workflow, timestamp): + ContentHandler.__init__(self) + self.counters = dict((k, "") for k in what) + self.step = step + self.architecture = architecture + self.release = release + self.workflow = workflow + self.timestamp = timestamp + self.metrics = [] + + def startElement(self, name, attrs): + if name != "Metric": + return + + if not attrs["Name"] in self.counters: + return + if "nan" in attrs["Value"]: + return + + path = ".".join( + ["IBRelVals", self.architecture, self.release, self.workflow, self.step, attrs["Name"]] + ) + value = attrs["Value"] + timestamp = time.mktime(self.timestamp) + self.metrics.append((path, (timestamp, value))) + self.counters[attrs["Name"]] = attrs["Value"] + + +class SchemaDumper(ContentHandler): + def __init__(self, schema): + ContentHandler.__init__(self) + self.schema = schema + + def startElement(self, name, attrs): + if name != "Metric": + return + self.schema.add(attrs["Name"]) + + +IB_BASE_DIR = "/afs/cern.ch/cms/sw/ReleaseCandidates" + + +def chunks(l, n): + for i in range(0, len(l), n): + yield l[i : i + n] + + +def format(s, **kwds): + return s % kwds + + +# 100 metrics at the time +def sendMetrics(metrics, server, port): + for l in chunks(metrics, 100): + payload = pickle.dumps(l) + print(len(payload)) + header = struct.pack("!L", len(payload)) + message = header + payload + sock = socket.socket() + sock.connect((server, port)) + sock.sendall(message) + sock.close() + time.sleep(0.5) + + +def calculateFileSizeMetrics(release, architecture, timestamp, fullRelease, args): + timestamp = time.mktime(timestamp) + cmd = format( + "find %(base)s/vol*/%(architecture)s/cms/cmssw*/%(fullRelease)s/lib/%(architecture)s -name '*.so' -exec wc -c {} \; | sed -e 's|/.*/||;s|[.]so||'", + base=IB_BASE_DIR, + releasePath=releasePath, + fullRelease=fullRelease, + architecture=architecture, + ) + error, out = run_cmd(format(cmd)) + if error: + return + if not out.strip(): + return + metrics = [] + for line in out.split("\n"): + size, library = line.split(" ", 1) + metric = ".".join(["IBStats", architecture, release, library, "FileSize"]) + metrics.append((metric, (timestamp, size))) + sendMetrics(metrics, args.server, args.port) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Extract plot data from IB job reports") + parser.add_argument( + "--base-dir", dest="baseDir", default=IB_BASE_DIR, help="Where the logs are located." + ) + parser.add_argument( + "--server", dest="server", default=CARBON_SERVER, help="Where the logs are located." + ) + parser.add_argument( + "--port", dest="port", default=CARBON_PORT, help="Where the logs are located." + ) + parser.add_argument( + "--filter-release", dest="filterRelease", default=".*", help="regexp to filter releases" + ) + parser.add_argument( + "--filter-workflows", + dest="filterWorkflows", + default=".*", + help="regexp to filter releases", + ) + parser.add_argument( + "--days", dest="days", default=7, type=int, help="days to go into the past." + ) + parser.add_argument( + "what", metavar="KEYS", type=str, nargs="*", help="What to dump from the logs" + ) + args = parser.parse_args() + + print("Parsing files", file=sys.stderr) + cmd = "find %s/slc* -name 'pyRelValMatrixLogs.zip' | sort -r" % IB_BASE_DIR + print(cmd) + error, files = run_cmd(cmd) + files = [x for x in files.split("\n") if x] + schema = set() + beginning = (date.today() - timedelta(args.days)).timetuple() + for f in files: + print(f, file=sys.stderr) + releasePath = dirname(f) + architecture = re.sub(".*/((slc|osx|fc)[^/]*)/.*", "\\1", f) + fullRelease = re.sub(".*/(CMSSW_[^/]*)/.*", "\\1", f) + release = re.sub(".*/(CMSSW_[^/]*)/.*", "\\1", f) + # Note for a future maintainer, remember to fix it by year 2100. + date = re.sub( + ".*/CMSSW_[^/]*(20[0-9][0-9]-[0-1][0-9]-[0-3][0-9]-[0-2][0-9][0-9][0-9]).*", "\\1", f + ) + release = release.replace(date, "").strip("_") + timestamp = time.strptime(date, "%Y-%m-%d-%H%M") + if timestamp < beginning: + continue + if not re.match(args.filterRelease, release): + continue + error, reports = run_cmd("unzip -l %s | grep JobReport | awk '{print $4}'" % f) + calculateFileSizeMetrics(release, architecture, timestamp, fullRelease, args) + + metrics = [] + for r in [x for x in reports.split("\n") if x]: + cmd = "unzip -p %s %s" % (f, r) + error, report = run_cmd(cmd) + workflow = re.sub("^([^/]*).*", "\\1", r).replace(".", "_") + if not re.match(args.filterWorkflows, workflow): + continue + step = re.sub(".*JobReport([0-9]).*", "step\\1", r) + if not args.what: + handler = SchemaDumper(schema) + else: + handler = JobReportHandler( + args.what, step, architecture, release, workflow, timestamp + ) + try: + parseString(report, handler) + except: + continue + metrics += handler.metrics + if schema: + print("\n".join(sorted(schema))) + exit(0) + if not len(metrics): + continue + print("Sending %s metrics." % len(metrics)) + sendMetrics(metrics, args.server, args.port) diff --git a/get-builds-stats.py b/get-builds-stats.py index 1889e76c945a..f9753fe2a431 100755 --- a/get-builds-stats.py +++ b/get-builds-stats.py @@ -2,65 +2,71 @@ from __future__ import print_function from cmsutils import MachineMemoryGB, MachineCPUCount import json, re, sys -r=open(sys.argv[1]) -items=json.load(r) + +r = open(sys.argv[1]) +items = json.load(r) r.close() -cpus=MachineCPUCount -mem=MachineMemoryGB*1024*1024*1024 -default_keys= {"cpu": "cpu_90", "rss": "rss_90", "time": "time"} +cpus = MachineCPUCount +mem = MachineMemoryGB * 1024 * 1024 * 1024 +default_keys = {"cpu": "cpu_90", "rss": "rss_90", "time": "time"} all_data = {} for item in items: - name=item['_source']['name'] - jobs=item['_source']['build_jobs'] - if name not in all_data: - all_data[name] = {} - for k in default_keys: all_data[name][k] = [] - for k in default_keys: - xk = default_keys[k] - all_data[name][k].append(int(item['_source'][xk]*cpus/jobs)) + name = item["_source"]["name"] + jobs = item["_source"]["build_jobs"] + if name not in all_data: + all_data[name] = {} + for k in default_keys: + all_data[name][k] = [] + for k in default_keys: + xk = default_keys[k] + all_data[name][k].append(int(item["_source"][xk] * cpus / jobs)) default_res = 4 -if cpus<4: default_res=1 -elif cpus<8: default_res=2 -else: default_res = 4 -total_cpus = cpus*100 -data={"defaults": {"cpu": (50, total_cpus/default_res), - "rss": (int(mem/cpus), int(mem/default_res)), - "time": (1, 300) - }, - "resources":{"cpu": total_cpus, "rss": mem}, - "packages": {}, - "known": [("^.+-toolfile$", 0), - ("^data-.+$", 0), - ("^.+$", 1)] - } +if cpus < 4: + default_res = 1 +elif cpus < 8: + default_res = 2 +else: + default_res = 4 +total_cpus = cpus * 100 +data = { + "defaults": { + "cpu": (50, total_cpus / default_res), + "rss": (int(mem / cpus), int(mem / default_res)), + "time": (1, 300), + }, + "resources": {"cpu": total_cpus, "rss": mem}, + "packages": {}, + "known": [("^.+-toolfile$", 0), ("^data-.+$", 0), ("^.+$", 1)], +} for name in all_data: - data["packages"][name] = {'cpu': 0, 'rss': 0, 'time': -1, "name": name} - for k in default_keys: - if all_data[name][k]: - data["packages"][name][k] = int(sum(all_data[name][k])/len(all_data[name][k])) - #Default resources if no data found for a package - if data["packages"][name]['time']==-1: - idx = 1 - for exp in data["known"]: - if re.match(exp[0], name): - idx = exp[1] - break - for k in data["defaults"]: - data["packages"][name][k] = data["defaults"][k][idx] - #for small package with build time 1 or less use min resources - elif data["packages"][name]['time']==0: - for k in data["defaults"]: - data["packages"][name][k] = data["defaults"][k][0] - else: - #Make sure resources are not more than the total - for k in data["defaults"]: - if k == "time": continue - v = data["packages"][name][k] - if v>data["resources"][k]: - v = data["resources"][k] - elif v==0: - v = data["defaults"][k][0] - data["packages"][name][k] = v + data["packages"][name] = {"cpu": 0, "rss": 0, "time": -1, "name": name} + for k in default_keys: + if all_data[name][k]: + data["packages"][name][k] = int(sum(all_data[name][k]) / len(all_data[name][k])) + # Default resources if no data found for a package + if data["packages"][name]["time"] == -1: + idx = 1 + for exp in data["known"]: + if re.match(exp[0], name): + idx = exp[1] + break + for k in data["defaults"]: + data["packages"][name][k] = data["defaults"][k][idx] + # for small package with build time 1 or less use min resources + elif data["packages"][name]["time"] == 0: + for k in data["defaults"]: + data["packages"][name][k] = data["defaults"][k][0] + else: + # Make sure resources are not more than the total + for k in data["defaults"]: + if k == "time": + continue + v = data["packages"][name][k] + if v > data["resources"][k]: + v = data["resources"][k] + elif v == 0: + v = data["defaults"][k][0] + data["packages"][name][k] = v print(json.dumps(data, sort_keys=True, indent=2)) diff --git a/get-git-tags b/get-git-tags deleted file mode 100644 index b4c3ec90c8be..000000000000 --- a/get-git-tags +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -from github import Github, GithubException -from os.path import expanduser -from optparse import OptionParser -from datetime import datetime -from sys import exit -import re -from socket import setdefaulttimeout -setdefaulttimeout(120) - -if __name__ == "__main__": - parser = OptionParser(usage="%prog -b|--branch -d|--date -t|--tag [-n|--dry-run]") - parser.add_option("-r", "--repository", dest="repo", help="Github repository e.g. cms-sw/cmssw", type=str, default='cms-sw/cmssw') - parser.add_option("-m", "--match", dest="match", help="Regexp to match tags e.g. CMSSW_8_0_X", type=str, default='CMSSW_.+') - opts, args = parser.parse_args() - - gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) - repo = gh.get_repo(opts.repo) - print('API Rate Limit') - print('Limit, Remaining: ', gh.rate_limiting) - print('Reset time (GMT): ', datetime.fromtimestamp(gh.rate_limiting_resettime)) - - tags = repo.get_releases() - tagRe = re.compile ('^'+opts.match+'.*$') - for t in tags: - if tagRe.match(t.name): - print(t.name) - diff --git a/get-git-tags b/get-git-tags new file mode 120000 index 000000000000..a25e8e44ed36 --- /dev/null +++ b/get-git-tags @@ -0,0 +1 @@ +get-git-tags.py \ No newline at end of file diff --git a/get-git-tags.py b/get-git-tags.py new file mode 100644 index 000000000000..c726d845785d --- /dev/null +++ b/get-git-tags.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +from __future__ import print_function +from github import Github, GithubException +from os.path import expanduser +from optparse import OptionParser +from datetime import datetime +from sys import exit +import re +from socket import setdefaulttimeout + +setdefaulttimeout(120) + +if __name__ == "__main__": + parser = OptionParser( + usage="%prog -b|--branch -d|--date -t|--tag [-n|--dry-run]" + ) + parser.add_option( + "-r", + "--repository", + dest="repo", + help="Github repository e.g. cms-sw/cmssw", + type=str, + default="cms-sw/cmssw", + ) + parser.add_option( + "-m", + "--match", + dest="match", + help="Regexp to match tags e.g. CMSSW_8_0_X", + type=str, + default="CMSSW_.+", + ) + opts, args = parser.parse_args() + + gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) + repo = gh.get_repo(opts.repo) + print("API Rate Limit") + print("Limit, Remaining: ", gh.rate_limiting) + print("Reset time (GMT): ", datetime.fromtimestamp(gh.rate_limiting_resettime)) + + tags = repo.get_releases() + tagRe = re.compile("^" + opts.match + ".*$") + for t in tags: + if tagRe.match(t.name): + print(t.name) diff --git a/get-local-build-stats.py b/get-local-build-stats.py index 900abd173034..97025aedae54 100755 --- a/get-local-build-stats.py +++ b/get-local-build-stats.py @@ -7,20 +7,24 @@ from _py2with3compatibility import run_cmd data = [] -e, o = run_cmd("find %s -maxdepth 6 -mindepth 6 -name opts.json -type f | sed 's|/opts.json$||'" % sys.argv[1]) +e, o = run_cmd( + "find %s -maxdepth 6 -mindepth 6 -name opts.json -type f | sed 's|/opts.json$||'" % sys.argv[1] +) for d in o.split("\n"): - tool = d.split("/")[-2] - jf = join(d,"opts.json") - lf = join(d,"log") - sf = join(d,"%s.json" % tool) - if not exists(lf) or not exists(sf): continue - e, c = run_cmd("tail -1 %s | grep 'exit 0' | wc -l" % lf) - if c == "0": continue - jopts = {} - with open(jf) as opts_dict_f: jopts = json.load(opts_dict_f) - item = get_summary_stats_from_json_file(sf, 1) - item.update(jopts) - data.append({'_source': item}) + tool = d.split("/")[-2] + jf = join(d, "opts.json") + lf = join(d, "log") + sf = join(d, "%s.json" % tool) + if not exists(lf) or not exists(sf): + continue + e, c = run_cmd("tail -1 %s | grep 'exit 0' | wc -l" % lf) + if c == "0": + continue + jopts = {} + with open(jf) as opts_dict_f: + jopts = json.load(opts_dict_f) + item = get_summary_stats_from_json_file(sf, 1) + item.update(jopts) + data.append({"_source": item}) print(json.dumps(data, sort_keys=True, indent=2)) - diff --git a/get-pr-branch b/get-pr-branch deleted file mode 100755 index e8f04d691db3..000000000000 --- a/get-pr-branch +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python3 -""" -Get the branch of pull request. It was made in mind that it will be used to get branch-name for CMSSW repo. - -Arguments: -argv[1] - Pull request ID -argv[2] - Repository (optional) -""" - -from sys import exit, argv, path -from os.path import expanduser,dirname,abspath,join, exists -from socket import setdefaulttimeout -from github_utils import get_pr -setdefaulttimeout(120) - -if __name__ == "__main__": - prId = int(argv[1]) - repo = "cms-sw/cmssw" - try: repo = argv[2] - except IndexError: pass - - try: - pr = get_pr(repo, prId) - except Exception as ex: - print("Could not find pull request %s. Maybe this is an issue" % prId) - print(ex) - exit(1) - - if pr["base"]["ref"] == "master": - from releases import CMSSW_DEVEL_BRANCH - from _py2with3compatibility import run_cmd - e, o = run_cmd("curl -k -s -L https://cmssdt.cern.ch/SDT/BaselineDevRelease | grep '^CMSSW_'") - if not o: o = CMSSW_DEVEL_BRANCH - print(o) - else: - pr_branch = pr["base"]["ref"] - try: - SCRIPT_DIR = dirname(abspath(argv[0])) - repo_dir = join(SCRIPT_DIR,'repos',repo.replace("-","_")) - if exists(join(repo_dir,"repo_config.py")): - path.insert(0,repo_dir) - import repo_config - pr_branch = repo_config.CMS_BRANCH_MAP[pr["base"]["ref"]] - except KeyError: pass - print(pr_branch) diff --git a/get-pr-branch b/get-pr-branch new file mode 120000 index 000000000000..b5bea55188fe --- /dev/null +++ b/get-pr-branch @@ -0,0 +1 @@ +get-pr-branch.py \ No newline at end of file diff --git a/get-pr-branch.py b/get-pr-branch.py new file mode 100755 index 000000000000..a449d28c1ebb --- /dev/null +++ b/get-pr-branch.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +""" +Get the branch of pull request. It was made in mind that it will be used to get branch-name for CMSSW repo. + +Arguments: +argv[1] - Pull request ID +argv[2] - Repository (optional) +""" + +from sys import exit, argv, path +from os.path import expanduser, dirname, abspath, join, exists +from socket import setdefaulttimeout +from github_utils import get_pr + +setdefaulttimeout(120) + +if __name__ == "__main__": + prId = int(argv[1]) + repo = "cms-sw/cmssw" + try: + repo = argv[2] + except IndexError: + pass + + try: + pr = get_pr(repo, prId) + except Exception as ex: + print("Could not find pull request %s. Maybe this is an issue" % prId) + print(ex) + exit(1) + + if pr["base"]["ref"] == "master": + from releases import CMSSW_DEVEL_BRANCH + from _py2with3compatibility import run_cmd + + e, o = run_cmd( + "curl -k -s -L https://cmssdt.cern.ch/SDT/BaselineDevRelease | grep '^CMSSW_'" + ) + if not o: + o = CMSSW_DEVEL_BRANCH + print(o) + else: + pr_branch = pr["base"]["ref"] + try: + SCRIPT_DIR = dirname(abspath(argv[0])) + repo_dir = join(SCRIPT_DIR, "repos", repo.replace("-", "_")) + if exists(join(repo_dir, "repo_config.py")): + path.insert(0, repo_dir) + import repo_config + + pr_branch = repo_config.CMS_BRANCH_MAP[pr["base"]["ref"]] + except KeyError: + pass + print(pr_branch) diff --git a/get-relval-failures.py b/get-relval-failures.py index 9a6f2c8fe997..c1450b991d08 100755 --- a/get-relval-failures.py +++ b/get-relval-failures.py @@ -3,8 +3,8 @@ from es_utils import get_payload_wscroll parser = argparse.ArgumentParser() -parser.add_argument('release', type=str, help='CMSSW Release') -parser.add_argument('arch', type=str, help='Architecture ') +parser.add_argument("release", type=str, help="CMSSW Release") +parser.add_argument("arch", type=str, help="Architecture ") args = parser.parse_args() print("Searching relval failures for %s/%s" % (args.release, args.arch)) @@ -13,17 +13,20 @@ "query": {"bool": {"must": {"query_string": {"query": "release:%s AND architecture:%s AND NOT exitcode:0", "default_operator": "AND"}}}}, "from": 0, "size": 10000 -}""" % (args.release, args.arch) +}""" % ( + args.release, + args.arch, +) -content_hash = get_payload_wscroll('cmssdt-ib-matrix-*', query_relval_failures) +content_hash = get_payload_wscroll("cmssdt-ib-matrix-*", query_relval_failures) if content_hash: - if (not 'hits' in content_hash) or (not 'hits' in content_hash['hits']): - print("ERROR: ", content_hash) - sys.exit(1) + if (not "hits" in content_hash) or (not "hits" in content_hash["hits"]): + print("ERROR: ", content_hash) + sys.exit(1) - for hit in content_hash['hits']['hits']: - relval = hit["_source"]["workflow"] - step = hit["_source"]["step"] - exitcode = hit["_source"]["exitcode"] - print(f"WF:{relval}:{step}:{exitcode}") + for hit in content_hash["hits"]["hits"]: + relval = hit["_source"]["workflow"] + step = hit["_source"]["step"] + exitcode = hit["_source"]["exitcode"] + print(f"WF:{relval}:{step}:{exitcode}") diff --git a/getWorkflowStatsFromES.py b/getWorkflowStatsFromES.py index df837027df61..741820ed14ea 100644 --- a/getWorkflowStatsFromES.py +++ b/getWorkflowStatsFromES.py @@ -6,43 +6,48 @@ from ROOT import * -''' +""" this program uses pyROOT, no brainer would be to set cmsenv before running it -''' +""" + def _format(s, **kwds): return s % kwds -def getWorkflowStatsFromES(release='*', arch='*', lastNdays=7, page_size=0): - stats = es_query(index='relvals_stats_*', - query=format('(NOT cpu_max:0) AND (exit_code:0) AND release:%(release_cycle)s AND architecture:%(architecture)s', - release_cycle=release+"_*", - architecture=arch - ), - start_time=1000*int(time()-(86400*lastNdays)), - end_time=1000*int(time()),scroll=True) - return stats['hits']['hits'] +def getWorkflowStatsFromES(release="*", arch="*", lastNdays=7, page_size=0): + stats = es_query( + index="relvals_stats_*", + query=format( + "(NOT cpu_max:0) AND (exit_code:0) AND release:%(release_cycle)s AND architecture:%(architecture)s", + release_cycle=release + "_*", + architecture=arch, + ), + start_time=1000 * int(time() - (86400 * lastNdays)), + end_time=1000 * int(time()), + scroll=True, + ) + return stats["hits"]["hits"] + -''' +""" have a function that narrows the result to fields of interest, described in a list and in the given order -''' +""" -def filterElasticSearchResult(ES_result=None, list_of_fields=None): - #arch = ES_result[0]['_source']['architecture'] - #print arch +def filterElasticSearchResult(ES_result=None, list_of_fields=None): + # arch = ES_result[0]['_source']['architecture'] + # print arch final_struct = {} for element in ES_result: + source_object = element["_source"] + if source_object["exit_code"] is not 0: + continue - source_object = element['_source'] - if source_object['exit_code'] is not 0: continue - - stamp = source_object['@timestamp'] - flow = source_object['workflow'] - step = source_object['step'] - + stamp = source_object["@timestamp"] + flow = source_object["workflow"] + step = source_object["step"] if not stamp in final_struct: final_struct.update({stamp: {}}) @@ -57,21 +62,22 @@ def filterElasticSearchResult(ES_result=None, list_of_fields=None): return final_struct -''' + +""" deeper in this context :) ,this function 1. gets two objects filtered after the ES query 2. for each sub-step key found tries to find the same in both objects and to make the difference between their values -''' +""" -def compareMetrics(firstObject=None, secondObject=None,workflow=None,stepnum=None): +def compareMetrics(firstObject=None, secondObject=None, workflow=None, stepnum=None): fields = [] comparison_results = {} for stamp in firstObject: for wf in firstObject[stamp]: for step in firstObject[stamp][wf]: - fields = firstObject[stamp][wf][step].keys() + fields = firstObject[stamp][wf][step].keys() break break break @@ -82,22 +88,28 @@ def compareMetrics(firstObject=None, secondObject=None,workflow=None,stepnum=Non for stamp in firstObject: for wf in firstObject[stamp]: if workflow: - if (float(wf) != float(workflow)): continue + if float(wf) != float(workflow): + continue for step in firstObject[stamp][wf]: if stepnum: - #print stepnum, step - if str(stepnum) != str(step): continue + # print stepnum, step + if str(stepnum) != str(step): + continue for field in firstObject[stamp][wf][step]: - #print field - if stamp in secondObject and wf in secondObject[stamp] \ - and step in secondObject[stamp][wf] \ - and field in secondObject[stamp][wf][step]: + # print field + if ( + stamp in secondObject + and wf in secondObject[stamp] + and step in secondObject[stamp][wf] + and field in secondObject[stamp][wf][step] + ): first_metric = firstObject[stamp][wf][step][field] second_metric = secondObject[stamp][wf][step][field] - if field.startswith('rss'): - if second_metric is 0: continue #sometimes the result is zero even when the exit_code is non 0 - #difference = 100 - ( float( float(first_metric) / float(second_metric) ) * 100 ) + if field.startswith("rss"): + if second_metric is 0: + continue # sometimes the result is zero even when the exit_code is non 0 + # difference = 100 - ( float( float(first_metric) / float(second_metric) ) * 100 ) difference = int((first_metric - second_metric) / 1048576) else: difference = first_metric - second_metric @@ -106,13 +118,13 @@ def compareMetrics(firstObject=None, secondObject=None,workflow=None,stepnum=Non return comparison_results -if __name__ == "__main__": +if __name__ == "__main__": opts = None release = None - fields = ['time', 'rss_max', 'cpu_avg', 'rss_75' , 'rss_25' , 'rss_avg' ] + fields = ["time", "rss_max", "cpu_avg", "rss_75", "rss_25", "rss_avg"] - arch = 'slc7_amd64_gcc700' + arch = "slc7_amd64_gcc700" days = int(sys.argv[5]) page_size = 0 limit = 20 @@ -123,10 +135,12 @@ def compareMetrics(firstObject=None, secondObject=None,workflow=None,stepnum=Non archtwo = sys.argv[4] wf_n = None step_n = None - if len(sys.argv) > 6: wf_n = sys.argv[6] - if len(sys.argv) > 7: step_n = sys.argv[7] + if len(sys.argv) > 6: + wf_n = sys.argv[6] + if len(sys.argv) > 7: + step_n = sys.argv[7] print(wf_n, step_n) - + json_out_first = getWorkflowStatsFromES(release_one, archone, days, page_size) json_out_second = getWorkflowStatsFromES(release_two, archtwo, days, page_size) @@ -134,23 +148,25 @@ def compareMetrics(firstObject=None, secondObject=None,workflow=None,stepnum=Non filtered_second = filterElasticSearchResult(json_out_second, fields) comp_results = compareMetrics(filtered_first, filtered_second, wf_n, step_n) - #print json.dumps(comp_results, indent=2, sort_keys=True, separators=(',', ': ')) + # print json.dumps(comp_results, indent=2, sort_keys=True, separators=(',', ': ')) for hist in comp_results: print(hist) - histo = TH1F(hist, release_one + ' - ' + release_two + '['+ hist +']', 100000, -5000, 5000) - - if hist.startswith('rss'): - histo.GetXaxis().SetTitle('Difference in MB') - #print 'title set for', hist - if hist is 'time': - histo.GetXaxis().SetTitle('Difference in seconds') - if hist.startswith('cpu'): - histo.GetXaxis().SetTitle('Difference in cpu time') + histo = TH1F( + hist, release_one + " - " + release_two + "[" + hist + "]", 100000, -5000, 5000 + ) + + if hist.startswith("rss"): + histo.GetXaxis().SetTitle("Difference in MB") + # print 'title set for', hist + if hist is "time": + histo.GetXaxis().SetTitle("Difference in seconds") + if hist.startswith("cpu"): + histo.GetXaxis().SetTitle("Difference in cpu time") for i in comp_results[hist]: histo.Fill(i) - histo.SaveAs(hist+".root") + histo.SaveAs(hist + ".root") # setup any CMSSW first (to get pyROOT in path) # example usage: diff --git a/get_repo_authors.py b/get_repo_authors.py index 5d1ae254830c..6648e98a7ec5 100755 --- a/get_repo_authors.py +++ b/get_repo_authors.py @@ -4,21 +4,25 @@ from sys import argv, exit from _py2with3compatibility import run_cmd from json import loads, dumps + try: - authors_info = {} - repo = argv[1] - err, output = run_cmd("curl -s https://api.github.com/repos/" + repo + "/stats/contributors") - if err: - print(output) - exit(1) - data = loads(output) - for item in data: - authors_info[item['author']['login']] = item['total'] - if not authors_info: - print(output) - exit(1) - print(basename(repo).upper().replace('-','_') + "_AUTHORS="+dumps(authors_info,sort_keys=True, indent=2)) + authors_info = {} + repo = argv[1] + err, output = run_cmd("curl -s https://api.github.com/repos/" + repo + "/stats/contributors") + if err: + print(output) + exit(1) + data = loads(output) + for item in data: + authors_info[item["author"]["login"]] = item["total"] + if not authors_info: + print(output) + exit(1) + print( + basename(repo).upper().replace("-", "_") + + "_AUTHORS=" + + dumps(authors_info, sort_keys=True, indent=2) + ) except IndexError: - print("Repo Name Required ... Arugement missing !!!!") - exit (1) - + print("Repo Name Required ... Arugement missing !!!!") + exit(1) diff --git a/gh-teams.py b/gh-teams.py index da0a2cad5c62..2ee8490466b2 100755 --- a/gh-teams.py +++ b/gh-teams.py @@ -6,273 +6,308 @@ from argparse import ArgumentParser from sys import exit from socket import setdefaulttimeout -from github_utils import api_rate_limits, github_api,add_organization_member -from github_utils import create_team,get_pending_members, get_gh_token +from github_utils import api_rate_limits, github_api, add_organization_member +from github_utils import create_team, get_pending_members, get_gh_token from github_utils import get_delete_pending_members, get_failed_pending_members from categories import CMSSW_L1, CMSSW_L2, CMS_SDT + setdefaulttimeout(120) -CMS_OWNERS = [ "davidlange6", "smuzaffar", "cmsbuild" ] + CMSSW_L1[:] -CMS_ORGANIZATIONS = [ "cms-data", "cms-externals", "cms-sw" ] +CMS_OWNERS = ["davidlange6", "smuzaffar", "cmsbuild"] + CMSSW_L1[:] +CMS_ORGANIZATIONS = ["cms-data", "cms-externals", "cms-sw"] REPO_OWNERS = {} -REPO_TEAMS = {} +REPO_TEAMS = {} for org in CMS_ORGANIZATIONS: - REPO_OWNERS[org] = CMS_OWNERS[:] - REPO_TEAMS[org] = {} + REPO_OWNERS[org] = CMS_OWNERS[:] + REPO_TEAMS[org] = {} ################################# -#Set Extra owners for repos # +# Set Extra owners for repos # ################################# -REPO_OWNERS["cms-data"] += [] +REPO_OWNERS["cms-data"] += [] REPO_OWNERS["cms-externals"] += [] -REPO_OWNERS["cms-sw"] += [ "sextonkennedy" ] +REPO_OWNERS["cms-sw"] += ["sextonkennedy"] ################################# -#Set Teams for organizations # +# Set Teams for organizations # ################################# -#Teams for cms-data -REPO_TEAMS["cms-data"]["Developers"] = { - "members" : CMS_SDT[:], - "repositories" : { "*" : "push" } -} +# Teams for cms-data +REPO_TEAMS["cms-data"]["Developers"] = {"members": CMS_SDT[:], "repositories": {"*": "push"}} -#Teams for cms-externals +# Teams for cms-externals REPO_TEAMS["cms-externals"]["Developers"] = deepcopy(REPO_TEAMS["cms-data"]["Developers"]) -REPO_TEAMS["cms-externals"]["boost-developers"] = { "members": ["fwyzard"], "repositories" : { "boost" : "push" } } +REPO_TEAMS["cms-externals"]["boost-developers"] = { + "members": ["fwyzard"], + "repositories": {"boost": "push"}, +} REPO_TEAMS["cms-externals"]["Developers"]["members"].append("gartung") REPO_TEAMS["cms-externals"]["Developers"]["members"].append("fwyzard") -#Teams for cms-sw -REPO_TEAMS["cms-sw"]["RecoLuminosity-LumiDB-admins"] = { - "members" : [], - "repositories" : { "RecoLuminosity-LumiDB": "admin"} +# Teams for cms-sw +REPO_TEAMS["cms-sw"]["RecoLuminosity-LumiDB-admins"] = { + "members": [], + "repositories": {"RecoLuminosity-LumiDB": "admin"}, } -REPO_TEAMS["cms-sw"]["generators-l2"] = { - "members" : ["GurpreetSinghChahal","agrohsje"], - "repositories" : { "genproductions" : "admin", - "xsecdb" : "admin"} +REPO_TEAMS["cms-sw"]["generators-l2"] = { + "members": ["GurpreetSinghChahal", "agrohsje"], + "repositories": {"genproductions": "admin", "xsecdb": "admin"}, } REPO_TEAMS["cms-sw"]["Dqm-Integration-developers"] = { - "members" : ["rovere","deguio"], - "repositories" : { "DQM-Integration": "push"} + "members": ["rovere", "deguio"], + "repositories": {"DQM-Integration": "push"}, } REPO_TEAMS["cms-sw"]["configdb-owners"] = { - "members" : ["fwyzard", "Martin-Grunewald", "Sam-Harper", "silviodonato", "missirol", "mmusich"], - "repositories" : { "hlt-confdb":"admin", "web-confdb":"admin"} + "members": [ + "fwyzard", + "Martin-Grunewald", + "Sam-Harper", + "silviodonato", + "mmusich", + ], + "repositories": {"hlt-confdb": "admin", "web-confdb": "admin"}, } REPO_TEAMS["cms-sw"]["cmsdist-writers"] = { - "members" : [ "h4d4", "muhammadimranfarooqi", "arooshap" ] + CMS_SDT[:], - "repositories" : { "cmsdist":"push" } -} -REPO_TEAMS["cms-sw"]["cmssw-l2"] = { - "members" : [ "*" ], - "repositories" : { "cmssw":"pull" } -} -REPO_TEAMS["cms-sw"]["cmssw-developers"] = { - "members" : [ "*" ], - "repositories" : { "cmssw":"pull" } + "members": ["h4d4", "muhammadimranfarooqi", "arooshap"] + CMS_SDT[:], + "repositories": {"cmsdist": "push"}, } +REPO_TEAMS["cms-sw"]["cmssw-l2"] = {"members": ["*"], "repositories": {"cmssw": "pull"}} +REPO_TEAMS["cms-sw"]["cmssw-developers"] = {"members": ["*"], "repositories": {"cmssw": "pull"}} REPO_TEAMS["cms-sw"]["cms-sw-writers"] = { - "members" : CMS_SDT[:], - "repositories" : { "*":"push", "!cmssw" : "pull", "!cmsdist" : "pull" } + "members": CMS_SDT[:], + "repositories": {"*": "push", "!cmssw": "pull", "!cmsdist": "pull"}, } REPO_TEAMS["cms-sw"]["cms-sw-admins"] = { - "members" : CMS_SDT[:], - "repositories" : { "cmssdt-wiki":"admin", "cms-sw.github.io":"admin" } + "members": CMS_SDT[:], + "repositories": {"cmssdt-wiki": "admin", "cms-sw.github.io": "admin"}, } REPO_TEAMS["cms-sw"]["all-l2"] = { - "members" : CMSSW_L1[:], + "members": CMSSW_L1[:], } for user in CMSSW_L2: - REPO_TEAMS["cms-sw"]['all-l2']["members"].append(user) - for cat in CMSSW_L2[user]: - cat = '%s-l2' % cat - if not cat in REPO_TEAMS["cms-sw"]: - REPO_TEAMS["cms-sw"][cat] = {"members": []} - REPO_TEAMS["cms-sw"][cat]["members"].append(user) + REPO_TEAMS["cms-sw"]["all-l2"]["members"].append(user) + for cat in CMSSW_L2[user]: + cat = "%s-l2" % cat + if not cat in REPO_TEAMS["cms-sw"]: + REPO_TEAMS["cms-sw"][cat] = {"members": []} + REPO_TEAMS["cms-sw"][cat]["members"].append(user) ################################# parser = ArgumentParser() -parser.add_argument("-o", "--organization", dest="organization", help="Github Organization name e.g. cms-sw. Default is * i.e. all cms origanizations", type=str, default="*") +parser.add_argument( + "-o", + "--organization", + dest="organization", + help="Github Organization name e.g. cms-sw. Default is * i.e. all cms origanizations", + type=str, + default="*", +) parser.add_argument("-n", "-dry-run", dest="dryRun", default=False, action="store_true") args = parser.parse_args() gh = Github(login_or_token=get_gh_token(token_file=expanduser("~/.github-token"))) -cache = {"users" : {}} -total_changes=0 -err_code=0 +cache = {"users": {}} +total_changes = 0 +err_code = 0 for org_name in CMS_ORGANIZATIONS: - if args.organization!="*" and org_name!=args.organization: continue - print("Wroking on Organization ",org_name) - for inv in get_failed_pending_members(org_name): - if ('failed_reason' in inv) and ('Invitation expired' in inv['failed_reason']): - print(" =>Deleting pending invitation ",inv['id'],inv['login']) - if not args.dryRun: - get_delete_pending_members(org_name, inv['id']) - api_rate_limits(gh,msg=False) - pending_members = [] - for user in get_pending_members(org_name): - user = user['login'].encode("ascii", "ignore").decode() - pending_members.append(user) - print("Pending Invitations: %s" % ",".join(["@%s" % u for u in pending_members])) - api_rate_limits(gh) - org = gh.get_organization(org_name) - ok_mems = [] - print(" Looking for owners:",REPO_OWNERS[org_name]) - chg_flag=0 - for mem in org.get_members(role="admin"): - login = mem.login.encode("ascii", "ignore").decode() - if not login in cache["users"]: cache["users"][login] = mem - if not login in REPO_OWNERS[org_name]: - print(" =>Removing owner:",login) - if not args.dryRun: - try: - add_organization_member(org_name, login, role="member") - chg_flag+=1 - except Exception as ex: - print(" =>",ex) - err_code = 1 - else: - ok_mems.append(login) - for login in [ l for l in REPO_OWNERS[org_name] if not l in ok_mems ]: - print(" =>Adding owner:",login) - if not args.dryRun: add_organization_member(org_name, login, role="admin") - chg_flag+=1 - total_changes+=chg_flag - if not chg_flag: print(" OK Owners") - print(" Looking for teams:",list(REPO_TEAMS[org_name].keys())) - org_repos = [ repo for repo in org.get_repos() ] - teams = org.get_teams() - chg_flag=0 - for team in REPO_TEAMS[org_name]: - flag = False - for xteam in teams: - if xteam.name == team: - flag = True - break - if flag: continue - print(" => Creating team",team) - if not args.dryRun: - create_team(org_name, team, "cmssw team for "+team) - chg_flag+=1 - total_changes+=chg_flag - org_members = [ mem.login.encode("ascii", "ignore").decode() for mem in org.get_members() ] - print(" All members: ",org_members) - if chg_flag: teams = org.get_teams() - for team in teams: - xfile = "%s-%s.done" % (org_name, team.name) - if exists(xfile): continue - print(" Checking team:",team.name) - api_rate_limits(gh,msg=False) - team_info = {} - try: team_info = REPO_TEAMS[org_name][team.name] - except: - print(" WARNING: New team found on Github:",team.name) - err_code=1 - continue - members = team_info["members"] - tm_members = [ mem for mem in team.get_members()] - tm_members_login = [ mem.login.encode("ascii", "ignore").decode() for mem in tm_members ] - print(" Valid Members:",members) - print(" Existing Members:",tm_members_login) - ok_mems = ["*"] - chg_flag=0 - if not "*" in members: - for mem in tm_members: - api_rate_limits(gh,msg=False) + if args.organization != "*" and org_name != args.organization: + continue + print("Wroking on Organization ", org_name) + for inv in get_failed_pending_members(org_name): + if ("failed_reason" in inv) and ("Invitation expired" in inv["failed_reason"]): + print(" =>Deleting pending invitation ", inv["id"], inv["login"]) + if not args.dryRun: + get_delete_pending_members(org_name, inv["id"]) + api_rate_limits(gh, msg=False) + pending_members = [] + for user in get_pending_members(org_name): + user = user["login"].encode("ascii", "ignore").decode() + pending_members.append(user) + print("Pending Invitations: %s" % ",".join(["@%s" % u for u in pending_members])) + api_rate_limits(gh) + org = gh.get_organization(org_name) + ok_mems = [] + print(" Looking for owners:", REPO_OWNERS[org_name]) + chg_flag = 0 + for mem in org.get_members(role="admin"): login = mem.login.encode("ascii", "ignore").decode() - if not login in cache["users"]: cache["users"][login] = mem - if (login in members): - ok_mems.append(login) - else: - if not args.dryRun: team.remove_from_members(mem) - print(" =>Removed member:",login) - chg_flag+=1 - for login in [ l for l in members if not l in ok_mems ]: - api_rate_limits(gh,msg=False) - if login in pending_members: - print(" => Can not add member, pending invitation: %s" % login) - continue - if login not in org_members: - print(" =>Inviting member:",login) + if not login in cache["users"]: + cache["users"][login] = mem + if not login in REPO_OWNERS[org_name]: + print(" =>Removing owner:", login) if not args.dryRun: - try: - add_organization_member(org_name, login, role="member") - chg_flag+=1 - except Exception as ex: - print(" =>",ex) - err_code = 1 - continue - if not login in cache["users"]: cache["users"][login] = gh.get_user(login) + try: + add_organization_member(org_name, login, role="member") + chg_flag += 1 + except Exception as ex: + print(" =>", ex) + err_code = 1 + else: + ok_mems.append(login) + for login in [l for l in REPO_OWNERS[org_name] if not l in ok_mems]: + print(" =>Adding owner:", login) if not args.dryRun: - try: team.add_to_members(cache["users"][login]) - except Exception as e: - print(e) - err_code=1 - print(" =>Added member:",login) - chg_flag+=1 - total_changes+=chg_flag - if not chg_flag: print(" OK Team members") - if not "repositories" in team_info: - ref = open(xfile,"w") - ref.close() - continue - team_repos = [ repo for repo in team.get_repos() ] - team_repos_name = [ repo.name.encode("ascii", "ignore").decode() for repo in team_repos ] - print(" Checking team repositories") - print(" Valid Repos:",list(team_info["repositories"].keys())) - print(" Existing Repos:",team_repos_name) - repo_to_check = team_repos_name[:] - for repo in team_info["repositories"]: - if repo=="*": repo_to_check += [ r.name.encode("ascii", "ignore").decode() for r in org_repos ] - elif repo.startswith("!"): repo_to_check += [repo[1:]] - else: repo_to_check += [repo] - chg_flag=0 - for repo_name in set(repo_to_check): - api_rate_limits(gh, msg=False) - inv_repo = "!"+repo_name - repo = [ r for r in team_repos if r.name.encode("ascii", "ignore").decode() == repo_name ] - if (repo_name in team_info["repositories"]) or \ - ("*" in team_info["repositories"] and (not inv_repo in team_info["repositories"])): - prem = "pull" - if repo_name in team_info["repositories"]: prem = team_info["repositories"][repo_name] - elif "*" in team_info["repositories"]: prem = team_info["repositories"]["*"] - if not repo_name in team_repos_name: - if not args.dryRun: - if not repo: repo.append(gh.get_repo(org_name+"/"+repo_name)) - team.set_repo_permission(repo[0], prem) - print(" =>Added repo:",repo_name,prem) - chg_flag+=1 - else: - curperm = repo[0].permissions - set_perm=False - curperm_name="pull" - if curperm.admin: - curperm_name="admin" - if not prem=="admin": set_perm=True - elif curperm.push: - curperm_name="push" - if not prem=="push": set_perm=True - elif curperm.pull: - if not prem=="pull": set_perm=True - if set_perm: - if not args.dryRun: - if not repo: repo.append(gh.get_repo(org_name+"/"+repo_name)) - team.set_repo_permission(repo[0], prem) - print(" =>Set Permission:",repo_name,curperm_name,"=>",prem) - chg_flag+=1 - elif repo_name in team_repos_name: + add_organization_member(org_name, login, role="admin") + chg_flag += 1 + total_changes += chg_flag + if not chg_flag: + print(" OK Owners") + print(" Looking for teams:", list(REPO_TEAMS[org_name].keys())) + org_repos = [repo for repo in org.get_repos()] + teams = org.get_teams() + chg_flag = 0 + for team in REPO_TEAMS[org_name]: + flag = False + for xteam in teams: + if xteam.name == team: + flag = True + break + if flag: + continue + print(" => Creating team", team) if not args.dryRun: - if not repo: repo.append(gh.get_repo(org_name+"/"+repo_name)) - team.remove_from_repos(repo[0]) - print(" =>Removed repository:",repo_name) - chg_flag+=1 - if not chg_flag: print(" OK Team Repositories") - total_changes+=chg_flag - ref = open(xfile,"w") - ref.close() + create_team(org_name, team, "cmssw team for " + team) + chg_flag += 1 + total_changes += chg_flag + org_members = [mem.login.encode("ascii", "ignore").decode() for mem in org.get_members()] + print(" All members: ", org_members) + if chg_flag: + teams = org.get_teams() + for team in teams: + xfile = "%s-%s.done" % (org_name, team.name) + if exists(xfile): + continue + print(" Checking team:", team.name) + api_rate_limits(gh, msg=False) + team_info = {} + try: + team_info = REPO_TEAMS[org_name][team.name] + except: + print(" WARNING: New team found on Github:", team.name) + err_code = 1 + continue + members = team_info["members"] + tm_members = [mem for mem in team.get_members()] + tm_members_login = [mem.login.encode("ascii", "ignore").decode() for mem in tm_members] + print(" Valid Members:", members) + print(" Existing Members:", tm_members_login) + ok_mems = ["*"] + chg_flag = 0 + if not "*" in members: + for mem in tm_members: + api_rate_limits(gh, msg=False) + login = mem.login.encode("ascii", "ignore").decode() + if not login in cache["users"]: + cache["users"][login] = mem + if login in members: + ok_mems.append(login) + else: + if not args.dryRun: + team.remove_from_members(mem) + print(" =>Removed member:", login) + chg_flag += 1 + for login in [l for l in members if not l in ok_mems]: + api_rate_limits(gh, msg=False) + if login in pending_members: + print(" => Can not add member, pending invitation: %s" % login) + continue + if login not in org_members: + print(" =>Inviting member:", login) + if not args.dryRun: + try: + add_organization_member(org_name, login, role="member") + chg_flag += 1 + except Exception as ex: + print(" =>", ex) + err_code = 1 + continue + if not login in cache["users"]: + cache["users"][login] = gh.get_user(login) + if not args.dryRun: + try: + team.add_to_members(cache["users"][login]) + except Exception as e: + print(e) + err_code = 1 + print(" =>Added member:", login) + chg_flag += 1 + total_changes += chg_flag + if not chg_flag: + print(" OK Team members") + if not "repositories" in team_info: + ref = open(xfile, "w") + ref.close() + continue + team_repos = [repo for repo in team.get_repos()] + team_repos_name = [repo.name.encode("ascii", "ignore").decode() for repo in team_repos] + print(" Checking team repositories") + print(" Valid Repos:", list(team_info["repositories"].keys())) + print(" Existing Repos:", team_repos_name) + repo_to_check = team_repos_name[:] + for repo in team_info["repositories"]: + if repo == "*": + repo_to_check += [r.name.encode("ascii", "ignore").decode() for r in org_repos] + elif repo.startswith("!"): + repo_to_check += [repo[1:]] + else: + repo_to_check += [repo] + chg_flag = 0 + for repo_name in set(repo_to_check): + api_rate_limits(gh, msg=False) + inv_repo = "!" + repo_name + repo = [ + r for r in team_repos if r.name.encode("ascii", "ignore").decode() == repo_name + ] + if (repo_name in team_info["repositories"]) or ( + "*" in team_info["repositories"] and (not inv_repo in team_info["repositories"]) + ): + prem = "pull" + if repo_name in team_info["repositories"]: + prem = team_info["repositories"][repo_name] + elif "*" in team_info["repositories"]: + prem = team_info["repositories"]["*"] + if not repo_name in team_repos_name: + if not args.dryRun: + if not repo: + repo.append(gh.get_repo(org_name + "/" + repo_name)) + team.set_repo_permission(repo[0], prem) + print(" =>Added repo:", repo_name, prem) + chg_flag += 1 + else: + curperm = repo[0].permissions + set_perm = False + curperm_name = "pull" + if curperm.admin: + curperm_name = "admin" + if not prem == "admin": + set_perm = True + elif curperm.push: + curperm_name = "push" + if not prem == "push": + set_perm = True + elif curperm.pull: + if not prem == "pull": + set_perm = True + if set_perm: + if not args.dryRun: + if not repo: + repo.append(gh.get_repo(org_name + "/" + repo_name)) + team.set_repo_permission(repo[0], prem) + print(" =>Set Permission:", repo_name, curperm_name, "=>", prem) + chg_flag += 1 + elif repo_name in team_repos_name: + if not args.dryRun: + if not repo: + repo.append(gh.get_repo(org_name + "/" + repo_name)) + team.remove_from_repos(repo[0]) + print(" =>Removed repository:", repo_name) + chg_flag += 1 + if not chg_flag: + print(" OK Team Repositories") + total_changes += chg_flag + ref = open(xfile, "w") + ref.close() -print("Total Updates:",total_changes) +print("Total Updates:", total_changes) exit(err_code) diff --git a/gh_create_branches.py b/gh_create_branches.py index 422b08891c6b..55b4724feb1a 100755 --- a/gh_create_branches.py +++ b/gh_create_branches.py @@ -7,47 +7,62 @@ from cms_static import GH_CMSSW_REPO as gh_cmssw from cms_static import GH_CMSDIST_REPO as gh_cmsdist from socket import setdefaulttimeout + setdefaulttimeout(120) + def create_branch(repo, src_branch, des_branch, dryRun=False): - print("Creating new branch '%s' based on '%s'" % (des_branch, src_branch)) - base_ref = repo.get_branch(src_branch) - print(" Base branch %s has sha %s" % (src_branch, base_ref.commit.sha)) - try: - repo.get_branch(des_branch) - print(" Branch already exists: ",des_branch) + print("Creating new branch '%s' based on '%s'" % (des_branch, src_branch)) + base_ref = repo.get_branch(src_branch) + print(" Base branch %s has sha %s" % (src_branch, base_ref.commit.sha)) + try: + repo.get_branch(des_branch) + print(" Branch already exists: ", des_branch) + return + except GithubException as e: + if not "Branch not found" in e.data["message"]: + raise e + if not dryRun: + repo.create_git_ref("refs/heads/" + des_branch, base_ref.commit.sha) + print(" Created new branch ", des_branch, " based on ", base_ref.commit.sha) + else: + print(" DryRun: Creating new branch ", des_branch, " based on ", base_ref.commit.sha) return - except GithubException as e: - if not "Branch not found" in e.data['message']: raise e - if not dryRun: - repo.create_git_ref ("refs/heads/"+des_branch, base_ref.commit.sha) - print(" Created new branch ",des_branch," based on ",base_ref.commit.sha) - else: - print(" DryRun: Creating new branch ",des_branch," based on ",base_ref.commit.sha) - return -if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument("-c", "--cmssw", dest="cmssw", action='append', type=lambda kv: kv.split("="), help="cmssw branch to be created. formate is key=value") - parser.add_argument("-d", "--cmsdist", dest="cmsdist", action='append', type=lambda kv: kv.split("="), help="cmsdist branch to be created. formate is key=value") - parser.add_argument("-n", "-dry-run", dest="dryRun", default=False, action="store_true") - args = parser.parse_args() - print(args.cmssw) - print(args.cmsdist) +if __name__ == "__main__": + parser = ArgumentParser() + parser.add_argument( + "-c", + "--cmssw", + dest="cmssw", + action="append", + type=lambda kv: kv.split("="), + help="cmssw branch to be created. formate is key=value", + ) + parser.add_argument( + "-d", + "--cmsdist", + dest="cmsdist", + action="append", + type=lambda kv: kv.split("="), + help="cmsdist branch to be created. formate is key=value", + ) + parser.add_argument("-n", "-dry-run", dest="dryRun", default=False, action="store_true") + args = parser.parse_args() - GH_TOKEN = open(expanduser("~/.github-token")).read().strip() - gh = Github(login_or_token=GH_TOKEN) - if args.cmssw: - print("Creating CMSSW Branch(es)") - repo = gh.get_repo(gh_user+"/"+gh_cmssw) - for br_pair in args.cmssw: - create_branch(repo, br_pair[0], br_pair[1], args.dryRun) - if args.cmsdist: - print("\nCreating CMSDIST Branch(es)") - repo = gh.get_repo(gh_user+"/"+gh_cmsdist) - for br_pair in args.cmsdist: - create_branch(repo, br_pair[0], br_pair[1], args.dryRun) - - + print(args.cmssw) + print(args.cmsdist) + GH_TOKEN = open(expanduser("~/.github-token")).read().strip() + gh = Github(login_or_token=GH_TOKEN) + if args.cmssw: + print("Creating CMSSW Branch(es)") + repo = gh.get_repo(gh_user + "/" + gh_cmssw) + for br_pair in args.cmssw: + create_branch(repo, br_pair[0], br_pair[1], args.dryRun) + if args.cmsdist: + print("\nCreating CMSDIST Branch(es)") + repo = gh.get_repo(gh_user + "/" + gh_cmsdist) + for br_pair in args.cmsdist: + create_branch(repo, br_pair[0], br_pair[1], args.dryRun) diff --git a/gh_update_pr_milestone.py b/gh_update_pr_milestone.py index b870bdaac6d1..8c7e2e7de302 100755 --- a/gh_update_pr_milestone.py +++ b/gh_update_pr_milestone.py @@ -7,44 +7,60 @@ from cms_static import GH_CMSSW_ORGANIZATION as gh_user from cms_static import GH_CMSSW_REPO as gh_cmssw from socket import setdefaulttimeout + setdefaulttimeout(120) if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument("-s", "--source", dest="source", help="Source repository, default is master", type=str, default="master") - parser.add_argument("-d", "--dest") - parser.add_argument("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default=gh_user+"/"+gh_cmssw) - parser.add_argument("-f", "--force", dest="force", default=False, action="store_true") - parser.add_argument("-n", "-dry-run", dest="dryRun", default=False, action="store_true") - args = parser.parse_args() - - if args.source == args.dest: - print("Source and destination branches are same") - exit(1) - elif (not args.source) or (not args.dest): - print("Missing source or destination branch") - exit(1) + parser = ArgumentParser() + parser.add_argument( + "-s", + "--source", + dest="source", + help="Source repository, default is master", + type=str, + default="master", + ) + parser.add_argument("-d", "--dest") + parser.add_argument( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default=gh_user + "/" + gh_cmssw, + ) + parser.add_argument("-f", "--force", dest="force", default=False, action="store_true") + parser.add_argument("-n", "-dry-run", dest="dryRun", default=False, action="store_true") + args = parser.parse_args() - GH_TOKEN = open(expanduser("~/.github-token")).read().strip() - gh = Github(login_or_token=GH_TOKEN) + if args.source == args.dest: + print("Source and destination branches are same") + exit(1) + elif (not args.source) or (not args.dest): + print("Missing source or destination branch") + exit(1) - repo = gh.get_repo(args.repository) - desMilestone = None - milestones = repo.get_milestones() - for item in repo.get_milestones(): - if args.dest in item.title: - desMilestone = item - break - if not desMilestone: - print("ERROR: Unable to find milestone for with title %s" % args.dest) - print("Found milestone: %s" % desMilestone.number) - pulls = repo.get_pulls(base=args.source, state="open", sort="created", direction="asc") - for pr in pulls: - print("Wroking on PR ",pr.number,"with milestone",pr.milestone.number) - if (not args.force) and (pr.milestone.number == desMilestone.number): continue - if not args.dryRun: - issue = repo.get_issue(pr.number) - if args.force: issue.edit(milestone=None) - issue.edit(milestone=desMilestone) - print(" Updated milestone:",desMilestone.number) + GH_TOKEN = open(expanduser("~/.github-token")).read().strip() + gh = Github(login_or_token=GH_TOKEN) + repo = gh.get_repo(args.repository) + desMilestone = None + milestones = repo.get_milestones() + for item in repo.get_milestones(): + if args.dest in item.title: + desMilestone = item + break + if not desMilestone: + print("ERROR: Unable to find milestone for with title %s" % args.dest) + print("Found milestone: %s" % desMilestone.number) + pulls = repo.get_pulls(base=args.source, state="open", sort="created", direction="asc") + for pr in pulls: + print("Wroking on PR ", pr.number, "with milestone", pr.milestone.number) + if (not args.force) and (pr.milestone.number == desMilestone.number): + continue + if not args.dryRun: + issue = repo.get_issue(pr.number) + if args.force: + issue.edit(milestone=None) + issue.edit(milestone=desMilestone) + print(" Updated milestone:", desMilestone.number) diff --git a/github-rate-limits b/github-rate-limits deleted file mode 100755 index 8cffdb3cfee2..000000000000 --- a/github-rate-limits +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -from github import Github -from os.path import expanduser -from datetime import datetime -from socket import setdefaulttimeout -setdefaulttimeout(120) - -if __name__ == "__main__": - gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) - print('API Rate Limit') - print('Limit, Remaining: ', gh.rate_limiting) - print('Reset time (GMT): ', datetime.fromtimestamp(gh.rate_limiting_resettime)) - diff --git a/github-rate-limits b/github-rate-limits new file mode 120000 index 000000000000..6e74bd46f427 --- /dev/null +++ b/github-rate-limits @@ -0,0 +1 @@ +github-rate-limits.py \ No newline at end of file diff --git a/github-rate-limits.py b/github-rate-limits.py new file mode 100755 index 000000000000..fec46b27ecb2 --- /dev/null +++ b/github-rate-limits.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +from __future__ import print_function +from github import Github +from os.path import expanduser +from datetime import datetime +from socket import setdefaulttimeout + +setdefaulttimeout(120) + +if __name__ == "__main__": + gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) + print("API Rate Limit") + print("Limit, Remaining: ", gh.rate_limiting) + print("Reset time (GMT): ", datetime.fromtimestamp(gh.rate_limiting_resettime)) diff --git a/github_backup.py b/github_backup.py index 5420cbbadadc..4ba90fa6389a 100755 --- a/github_backup.py +++ b/github_backup.py @@ -1,241 +1,272 @@ #!/usr/bin/env python3 -from sys import exit,argv +from sys import exit, argv from os import environ -from os.path import exists, join,dirname +from os.path import exists, join, dirname from json import load, dump from time import time, sleep, gmtime from subprocess import getstatusoutput from hashlib import md5 import threading, re -from github_utils import get_organization_repositores, get_repository_issues, check_rate_limits, github_time, get_issue_comments, get_page_range, get_gh_token +from github_utils import ( + get_organization_repositores, + get_repository_issues, + check_rate_limits, + github_time, + get_issue_comments, + get_page_range, + get_gh_token, +) from github_utils import get_releases + get_gh_token(token_file=argv[1]) backup_store = argv[2] -comment_imgs_regexp = re.compile('^(.*?)\!\[[^\]]+\]\(([^\)]+)\)(.*)$') +comment_imgs_regexp = re.compile("^(.*?)\!\[[^\]]+\]\(([^\)]+)\)(.*)$") if not exists(backup_store): - print("Backup store not exists.") - exit(1) + print("Backup store not exists.") + exit(1) + def download_patch(issue, pfile, force=False): - if 'pull_request' in issue: - if (not exists(pfile)) or force: - e, o = getstatusoutput('curl -L -s "%s" > %s.tmp && mv %s.tmp %s' % (issue['pull_request']['patch_url'], pfile, pfile, pfile)) - if e: - print("ERROR:",issue['number'],o) - return 1 - return 0 + if "pull_request" in issue: + if (not exists(pfile)) or force: + e, o = getstatusoutput( + 'curl -L -s "%s" > %s.tmp && mv %s.tmp %s' + % (issue["pull_request"]["patch_url"], pfile, pfile, pfile) + ) + if e: + print("ERROR:", issue["number"], o) + return 1 + return 0 + def process_comment(body, repo): - err = 0 - if not body: return err - for comment in body.split("\n"): - while comment: - m = comment_imgs_regexp.match(" "+comment+" ") - if not m: break - comment = "%s%s" % (m.group(1), m.group(3)) - url = m.group(2) - if ('"' in url) or ("'" in url): continue - if url.startswith('data:'): continue - ifile = "%s/%s/images/%s" % (backup_store, repo, url.split("://")[-1]) - ifile = re.sub("[^a-zA-Z0-9/._-]", "", ifile) - if exists(ifile): continue - getstatusoutput("mkdir -p %s" % dirname(ifile)) - try: - cmd = "curl -L -s '%s' > %s.tmp && mv %s.tmp %s" % (url,ifile,ifile,ifile) - e, o = getstatusoutput(cmd) - if e: - print(" ERROR:",o) - err = 1 - else: - print( " Download user content: ",url) - except: - print("ERROR: Runing ",cmd) - err = 1 - return err + err = 0 + if not body: + return err + for comment in body.split("\n"): + while comment: + m = comment_imgs_regexp.match(" " + comment + " ") + if not m: + break + comment = "%s%s" % (m.group(1), m.group(3)) + url = m.group(2) + if ('"' in url) or ("'" in url): + continue + if url.startswith("data:"): + continue + ifile = "%s/%s/images/%s" % (backup_store, repo, url.split("://")[-1]) + ifile = re.sub("[^a-zA-Z0-9/._-]", "", ifile) + if exists(ifile): + continue + getstatusoutput("mkdir -p %s" % dirname(ifile)) + try: + cmd = "curl -L -s '%s' > %s.tmp && mv %s.tmp %s" % (url, ifile, ifile, ifile) + e, o = getstatusoutput(cmd) + if e: + print(" ERROR:", o) + err = 1 + else: + print(" Download user content: ", url) + except: + print("ERROR: Runing ", cmd) + err = 1 + return err + def process_issue(repo, issue, data): - num = issue['number'] - pr_md5 = md5((str(num)+"\n").encode()).hexdigest() - pr_md5_dir = join(backup_store, repo_name, "issues", pr_md5[:2], pr_md5[2:]) - ifile = join(pr_md5_dir, "issue.json") - pfile = join(pr_md5_dir, "patch.txt") - getstatusoutput("mkdir -p %s" % pr_md5_dir) - err = process_comment(issue['body'], repo) - err += download_patch(issue, pfile) - if exists (ifile): - obj = {} - with open(ifile) as ref: - obj = load(ref) - if obj['updated_at']==issue['updated_at']: - data['status'] = False if err>0 else True - return - err += download_patch(issue, pfile, True) - comments = get_issue_comments(repo, num) - for c in comments: - err += process_comment(c['body'],repo) - dump(comments, open(join(pr_md5_dir, "comments.json"),"w")) - dump(issue, open(ifile, "w")) - print(" Updated ",repo,num,issue['updated_at'],err) - data['status'] = False if err>0 else True - return + num = issue["number"] + pr_md5 = md5((str(num) + "\n").encode()).hexdigest() + pr_md5_dir = join(backup_store, repo_name, "issues", pr_md5[:2], pr_md5[2:]) + ifile = join(pr_md5_dir, "issue.json") + pfile = join(pr_md5_dir, "patch.txt") + getstatusoutput("mkdir -p %s" % pr_md5_dir) + err = process_comment(issue["body"], repo) + err += download_patch(issue, pfile) + if exists(ifile): + obj = {} + with open(ifile) as ref: + obj = load(ref) + if obj["updated_at"] == issue["updated_at"]: + data["status"] = False if err > 0 else True + return + err += download_patch(issue, pfile, True) + comments = get_issue_comments(repo, num) + for c in comments: + err += process_comment(c["body"], repo) + dump(comments, open(join(pr_md5_dir, "comments.json"), "w")) + dump(issue, open(ifile, "w")) + print(" Updated ", repo, num, issue["updated_at"], err) + data["status"] = False if err > 0 else True + return + def process_issues(repo, max_threads=8): - issues = get_repository_issues(repo_name) - pages = get_page_range() - check_rate_limits(msg=True, prefix=" ") - threads = [] - all_ok = True - latest_date = 0 - ref_datefile = join(backup_store, repo, "issues", "latest.txt") - ref_date = 0 - if exists(ref_datefile): - with open(ref_datefile) as ref: - ref_date = int(ref.read().strip()) - while issues: - for issue in issues: - idate = github_time(issue['updated_at']) - if latest_date==0: latest_date = idate - if idate<=ref_date: - pages = [] - break - check_rate_limits(msg=False, when_slow=True, prefix=" ") - inum = issue['number'] - print(" Processing ",repo,inum) - while (len(threads) >= max_threads): - sleep(0.01) - athreads = [] - for t in threads: - if t[0].is_alive(): athreads.append(t) - else: - all_ok = (all_ok and t[1]['status']) - threads = athreads - data={'status': False, 'number': inum} - t = threading.Thread(target=process_issue, args=(repo, issue, data)) - t.start() - threads.append((t, data)) - sleep(0.01) - issues = get_repository_issues(repo_name, page = pages.pop(0)) if pages else [] - for t in threads: - t[0].join() - all_ok = (all_ok and t[1]['status']) - if all_ok and (latest_date!=ref_date): - with open(ref_datefile, "w") as ref: - ref.write(str(latest_date)) - return + issues = get_repository_issues(repo_name) + pages = get_page_range() + check_rate_limits(msg=True, prefix=" ") + threads = [] + all_ok = True + latest_date = 0 + ref_datefile = join(backup_store, repo, "issues", "latest.txt") + ref_date = 0 + if exists(ref_datefile): + with open(ref_datefile) as ref: + ref_date = int(ref.read().strip()) + while issues: + for issue in issues: + idate = github_time(issue["updated_at"]) + if latest_date == 0: + latest_date = idate + if idate <= ref_date: + pages = [] + break + check_rate_limits(msg=False, when_slow=True, prefix=" ") + inum = issue["number"] + print(" Processing ", repo, inum) + while len(threads) >= max_threads: + sleep(0.01) + athreads = [] + for t in threads: + if t[0].is_alive(): + athreads.append(t) + else: + all_ok = all_ok and t[1]["status"] + threads = athreads + data = {"status": False, "number": inum} + t = threading.Thread(target=process_issue, args=(repo, issue, data)) + t.start() + threads.append((t, data)) + sleep(0.01) + issues = get_repository_issues(repo_name, page=pages.pop(0)) if pages else [] + for t in threads: + t[0].join() + all_ok = all_ok and t[1]["status"] + if all_ok and (latest_date != ref_date): + with open(ref_datefile, "w") as ref: + ref.write(str(latest_date)) + return + def process_release(repo, rel, data): - rdir = join(backup_store, repo, "releases", data['year']) - getstatusoutput("mkdir -p %s" % rdir) - dump(rel, open(join(rdir, "%s.json" % rel['id']),"w")) - data['status'] = True - return + rdir = join(backup_store, repo, "releases", data["year"]) + getstatusoutput("mkdir -p %s" % rdir) + dump(rel, open(join(rdir, "%s.json" % rel["id"]), "w")) + data["status"] = True + return + def process_releases(repo, max_threads=8): - rels = get_releases(repo_name) - pages = get_page_range() - check_rate_limits(msg=True) - threads = [] - all_ok = True - latest_date = 0 - ref_datefile = join(backup_store, repo, "releases", "latest.txt") - ref_date = 0 - if exists(ref_datefile): - with open(ref_datefile) as ref: - ref_date = int(ref.read().strip()) - while rels: - for rel in rels: - idate = github_time(rel['published_at']) - if latest_date==0: latest_date = idate - if idate<=ref_date: - pages = [] - break - print(" Processing release",rel['name']) - while (len(threads) >= max_threads): - athreads = [] - for t in threads: - if t[0].is_alive(): athreads.append(t) - else: - all_ok = (all_ok and t[1]['status']) - threads = athreads - data={'status': False, 'year': str(gmtime(idate).tm_year) } - t = threading.Thread(target=process_release, args=(repo, rel, data)) - t.start() - threads.append((t, data)) - rels = get_releases(repo_name, page=pages.pop(0)) if pages else [] - check_rate_limits(msg=False, when_slow=True) - for t in threads: - t[0].join() - all_ok = (all_ok and t[1]['status']) - if all_ok and (latest_date!=ref_date): - with open(ref_datefile, "w") as ref: - ref.write(str(latest_date)) - return + rels = get_releases(repo_name) + pages = get_page_range() + check_rate_limits(msg=True) + threads = [] + all_ok = True + latest_date = 0 + ref_datefile = join(backup_store, repo, "releases", "latest.txt") + ref_date = 0 + if exists(ref_datefile): + with open(ref_datefile) as ref: + ref_date = int(ref.read().strip()) + while rels: + for rel in rels: + idate = github_time(rel["published_at"]) + if latest_date == 0: + latest_date = idate + if idate <= ref_date: + pages = [] + break + print(" Processing release", rel["name"]) + while len(threads) >= max_threads: + athreads = [] + for t in threads: + if t[0].is_alive(): + athreads.append(t) + else: + all_ok = all_ok and t[1]["status"] + threads = athreads + data = {"status": False, "year": str(gmtime(idate).tm_year)} + t = threading.Thread(target=process_release, args=(repo, rel, data)) + t.start() + threads.append((t, data)) + rels = get_releases(repo_name, page=pages.pop(0)) if pages else [] + check_rate_limits(msg=False, when_slow=True) + for t in threads: + t[0].join() + all_ok = all_ok and t[1]["status"] + if all_ok and (latest_date != ref_date): + with open(ref_datefile, "w") as ref: + ref.write(str(latest_date)) + return ########################################################## -orgs = { - "cms-sw": ["issues", "releases"], - "dmwm": ["issues", "releases"], - "cms-externals": ["issues"], - "cms-data": ["issues"], - "cms-analysis": ["issues", "releases"], - "cms-cvs-history": [], - "cms-obsolete": [], - } - -err=0 +orgs = { + "cms-sw": ["issues", "releases"], + "dmwm": ["issues", "releases"], + "cms-externals": ["issues"], + "cms-data": ["issues"], + "cms-analysis": ["issues", "releases"], + "cms-cvs-history": [], + "cms-obsolete": [], +} + +err = 0 e, o = getstatusoutput("date") print("=================================================") print(o.strip()) print("=================================================") for org in orgs: - for repo in get_organization_repositores(org): - repo_name = repo['full_name'] - print("Working on",repo_name) - repo_dir = join(backup_store,repo_name) - repo_stat = join(repo_dir, "json") - backup = True - if exists(repo_stat): - repo_obj = load(open(repo_stat)) - backup = False - for v in ['pushed_at', 'updated_at']: - if repo_obj[v] != repo[v]: - backup = True - break - getstatusoutput("mkdir -p %s" % repo_dir) - if 'issues' in orgs[org]: - print(" Processing issues for",repo_name) - getstatusoutput("mkdir -p %s/issues" % repo_dir) - process_issues(repo_name) - if 'releases' in orgs[org]: - print(" Processing releases for",repo_name) - getstatusoutput("mkdir -p %s/releases" % repo_dir) - process_releases(repo_name) - if not backup: - print(" Skipping mirror, no change") - continue - brepo = join(repo_dir, "repo") - if exists(brepo): - getstatusoutput("mv %s %s.%s" % (brepo, brepo, int(time()))) - getstatusoutput("rm -rf %s.tmp" % brepo) - print(" Mirroring repository",repo_name) - e, o = getstatusoutput("git clone --mirror https://github.com/%s %s.tmp" % (repo_name, brepo)) - if e: - print(o) - err = 1 - else: - e, o = getstatusoutput("mv %s.tmp %s" % (brepo, brepo)) - if not e: - with open(repo_stat, "w") as obj: - dump(repo, obj) - print(" Backed up",repo_name) - getstatusoutput("find %s -mindepth 1 -maxdepth 1 -name 'repo.*' | sort | head -n -100 | xargs rm -rf" % repo_dir) - else: - print(o) - err = 1 + for repo in get_organization_repositores(org): + repo_name = repo["full_name"] + print("Working on", repo_name) + repo_dir = join(backup_store, repo_name) + repo_stat = join(repo_dir, "json") + backup = True + if exists(repo_stat): + repo_obj = load(open(repo_stat)) + backup = False + for v in ["pushed_at", "updated_at"]: + if repo_obj[v] != repo[v]: + backup = True + break + getstatusoutput("mkdir -p %s" % repo_dir) + if "issues" in orgs[org]: + print(" Processing issues for", repo_name) + getstatusoutput("mkdir -p %s/issues" % repo_dir) + process_issues(repo_name) + if "releases" in orgs[org]: + print(" Processing releases for", repo_name) + getstatusoutput("mkdir -p %s/releases" % repo_dir) + process_releases(repo_name) + if not backup: + print(" Skipping mirror, no change") + continue + brepo = join(repo_dir, "repo") + if exists(brepo): + getstatusoutput("mv %s %s.%s" % (brepo, brepo, int(time()))) + getstatusoutput("rm -rf %s.tmp" % brepo) + print(" Mirroring repository", repo_name) + e, o = getstatusoutput( + "git clone --mirror https://github.com/%s %s.tmp" % (repo_name, brepo) + ) + if e: + print(o) + err = 1 + else: + e, o = getstatusoutput("mv %s.tmp %s" % (brepo, brepo)) + if not e: + with open(repo_stat, "w") as obj: + dump(repo, obj) + print(" Backed up", repo_name) + getstatusoutput( + "find %s -mindepth 1 -maxdepth 1 -name 'repo.*' | sort | head -n -100 | xargs rm -rf" + % repo_dir + ) + else: + print(o) + err = 1 e, o = getstatusoutput("date") print("=================================================") print(o.strip()) print("=================================================") exit(err) - diff --git a/github_get_file_changes.py b/github_get_file_changes.py index a17bc47d9e2d..8b6dad4bf9e2 100755 --- a/github_get_file_changes.py +++ b/github_get_file_changes.py @@ -22,7 +22,7 @@ # logger and logger config # https://docs.python.org/2/library/logger.html -FORMAT = '%(levelname)s - %(funcName)s - %(lineno)d: %(message)s' +FORMAT = "%(levelname)s - %(funcName)s - %(lineno)d: %(message)s" logging.basicConfig(format=FORMAT) logger = logging.getLogger(__name__) @@ -30,7 +30,7 @@ def get_changed_modules(filename_it): changed_m = set() for f_n in filename_it: - s_l = f_n.split('/') + s_l = f_n.split("/") if len(s_l) <= 2: # It is not a module, ignore pass @@ -50,8 +50,8 @@ def get_changed_filenames_by_pr(old_prs_dict, pr_list): nr = str(pr.number) if nr in old_prs_dict.keys(): pr_old = old_prs_dict[nr] - if int(get_unix_time(pr.updated_at)) == pr_old['updated_at']: - changed_file_set = changed_file_set.union(pr_old['changed_files_names']) + if int(get_unix_time(pr.updated_at)) == pr_old["updated_at"]: + changed_file_set = changed_file_set.union(pr_old["changed_files_names"]) logger.debug(" Pr {} was cached".format(nr)) continue # we used cached files, ignore the rest of the loop @@ -76,8 +76,9 @@ def get_git_mt(path, filename): def get_modules_with_mt(path, depth=2): data_list = [] unique_list = {} - for l in ['/'.join(d.split('/')[-depth:]) for d in glob('%s/*/*' % path)]: - if l in unique_list: continue + for l in ["/".join(d.split("/")[-depth:]) for d in glob("%s/*/*" % path)]: + if l in unique_list: + continue data_list.append([l, get_git_mt(path, l)]) unique_list[l] = 1 return data_list @@ -88,20 +89,35 @@ def main(): parser.add_argument("-n", "--repo_name", help="Repo name 'org/project") parser.add_argument("-c", "--cached_pr", default=None, help="Path to cached pr list") parser.add_argument("-r", "--cloned_repo", help="Path to cloned git repository") - parser.add_argument("-l", "--logging", default="DEBUG", choices=logging._levelNames, help="Set level of logging") - parser.add_argument("-o", "--output", default=None, help="Output result, which is a list of modules that are not" - " being modified by other PRs.") - parser.add_argument("-i", "--ignore_modules", default=None, help="Ignore modules which are already done.") + parser.add_argument( + "-l", + "--logging", + default="DEBUG", + choices=logging._levelNames, + help="Set level of logging", + ) + parser.add_argument( + "-o", + "--output", + default=None, + help="Output result, which is a list of modules that are not" + " being modified by other PRs.", + ) + parser.add_argument( + "-i", "--ignore_modules", default=None, help="Ignore modules which are already done." + ) args = parser.parse_args() logger.setLevel(args.logging) logger.debug(args.repo_name) gh = Github(login_or_token=open(expanduser(GH_TOKEN)).read().strip()) repo = gh.get_repo(args.repo_name) - pr_list = get_pull_requests(repo, branch='master') + pr_list = get_pull_requests(repo, branch="master") logger.debug("GitHub API rate limit before: {}".format(gh.get_rate_limit())) - all_branch_modules_w_mt = get_modules_with_mt(args.cloned_repo) # this will return folders 2 levels deep + all_branch_modules_w_mt = get_modules_with_mt( + args.cloned_repo + ) # this will return folders 2 levels deep all_branch_modules_names = set([x[0] for x in all_branch_modules_w_mt]) modules_mod_by_prs = set() new_files = [] # new files introduced vy PRs @@ -111,7 +127,7 @@ def main(): with open(args.cached_pr) as f: old_prs_dict = json.load(f) except Exception as e: - print('Could not load a dumped prs', str(e)) + print("Could not load a dumped prs", str(e)) exit(1) ch_f_set = get_changed_filenames_by_pr(old_prs_dict, pr_list) modules_mod_by_prs = get_changed_modules(ch_f_set) @@ -126,7 +142,9 @@ def main(): non_changed_modules = all_branch_modules_names.difference(modules_mod_by_prs) already_done_modules = set() if args.ignore_modules and exists(args.ignore_modules): - already_done_modules = set(['/'.join(d.split('/')[-2:]) for d in glob('%s/*/*' % args.ignore_modules)]) + already_done_modules = set( + ["/".join(d.split("/")[-2:]) for d in glob("%s/*/*" % args.ignore_modules)] + ) logger.debug("modules_mod_by_prs") logger.debug(pformat(modules_mod_by_prs)) @@ -137,14 +155,25 @@ def main(): logger.debug("non_changed_module") logger.debug(pformat(non_changed_modules)) logger.debug("---") - print(pformat( - "Modules modified by prs: {} \nAll modules: {} \nModules not touched by prs: {} \nNew modules: {}".format( - len(modules_mod_by_prs), len(all_branch_modules_w_mt), len(non_changed_modules), len(new_files)) - )) - - unmodified_modules_sorted_by_time = [x for x in all_branch_modules_w_mt if - (x[0] not in modules_mod_by_prs) and (x[0] not in already_done_modules)] - unmodified_modules_sorted_by_time = sorted(unmodified_modules_sorted_by_time, cmp=lambda x, y: cmp_f(x[1], y[1])) + print( + pformat( + "Modules modified by prs: {} \nAll modules: {} \nModules not touched by prs: {} \nNew modules: {}".format( + len(modules_mod_by_prs), + len(all_branch_modules_w_mt), + len(non_changed_modules), + len(new_files), + ) + ) + ) + + unmodified_modules_sorted_by_time = [ + x + for x in all_branch_modules_w_mt + if (x[0] not in modules_mod_by_prs) and (x[0] not in already_done_modules) + ] + unmodified_modules_sorted_by_time = sorted( + unmodified_modules_sorted_by_time, cmp=lambda x, y: cmp_f(x[1], y[1]) + ) logger.debug("Modules not modified by prs:") logger.debug(pformat(unmodified_modules_sorted_by_time)) @@ -158,12 +187,12 @@ def main(): categories = {} for pack in package2categories: categories[pack] = "-".join(sorted(package2categories[pack])) - with open(args.output, 'w') as f: + with open(args.output, "w") as f: for i in unmodified_modules_sorted_by_time: - cat = categories[i[0]] if i[0] in categories else 'unknown' + cat = categories[i[0]] if i[0] in categories else "unknown" f.write("{} {}\n".format(cat, i[0])) logger.debug("GitHub API rate limit after: {}".format(gh.get_rate_limit())) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/github_hooks_config.py b/github_hooks_config.py index 7dd12dfbb10a..0b477b80ff84 100755 --- a/github_hooks_config.py +++ b/github_hooks_config.py @@ -1,23 +1,20 @@ GITHUB_HOOKS = {} GITHUB_HOOKS["Jenkins_Github_Hook"] = { - "active":True, - "events": ["issues","pull_request","issue_comment","status"], - "config": { - "url": "https://cmssdt.cern.ch/SDT/cgi-bin/github_webhook", - "content_type":"json" - } + "active": True, + "events": ["issues", "pull_request", "issue_comment", "status"], + "config": {"url": "https://cmssdt.cern.ch/SDT/cgi-bin/github_webhook", "content_type": "json"}, } GITHUB_HOOKS["Jenkins_Github_Hook_Push"] = { - "active":True, - "events": ["push"], - "config": { - "url": "https://cmssdt.cern.ch/SDT/cgi-bin/github_webhook?push", - "content_type":"json" - } + "active": True, + "events": ["push"], + "config": { + "url": "https://cmssdt.cern.ch/SDT/cgi-bin/github_webhook?push", + "content_type": "json", + }, } -#First repository name matches wins +# First repository name matches wins REPO_HOOK_MAP = [] REPO_HOOK_MAP.append(["cms-sw/cms-sw.github.io", ["Jenkins_Github_Hook_Push"]]) REPO_HOOK_MAP.append(["cms-sw/cms-prs", ["Jenkins_Github_Hook_Push"]]) @@ -27,33 +24,43 @@ REPO_HOOK_MAP.append(["cms-sw/web-confdb", ["Jenkins_Github_Hook_Push"]]) REPO_HOOK_MAP.append(["cms-sw/RecoLuminosity-LumiDB", ["Jenkins_Github_Hook_Push"]]) REPO_HOOK_MAP.append(["cms-sw/DQM-Integration", ["Jenkins_Github_Hook_Push"]]) -REPO_HOOK_MAP.append(["cms-sw/.+", ["Jenkins_Github_Hook","Jenkins_Github_Hook_Push"]]) -REPO_HOOK_MAP.append(["cms-data/.+", ["Jenkins_Github_Hook","Jenkins_Github_Hook_Push"]]) -REPO_HOOK_MAP.append(["cms-externals/.+", ["Jenkins_Github_Hook","Jenkins_Github_Hook_Push"]]) +REPO_HOOK_MAP.append(["cms-sw/.+", ["Jenkins_Github_Hook", "Jenkins_Github_Hook_Push"]]) +REPO_HOOK_MAP.append(["cms-data/.+", ["Jenkins_Github_Hook", "Jenkins_Github_Hook_Push"]]) +REPO_HOOK_MAP.append(["cms-externals/.+", ["Jenkins_Github_Hook", "Jenkins_Github_Hook_Push"]]) + def is_valid_gh_repo(repo_name): - import re - for r in REPO_HOOK_MAP: - if re.match("^"+r[0]+"$",repo_name): return True - return False + import re + + for r in REPO_HOOK_MAP: + if re.match("^" + r[0] + "$", repo_name): + return True + return False + def get_repository_hooks(repo_name, hook=""): - import re - hooks = {} - for r in REPO_HOOK_MAP: - if re.match("^"+r[0]+"$",repo_name): - if not hook: - for h in r[1]: hooks[h]=GITHUB_HOOKS[h] - elif hook in r[1]: - hooks[hook] = GITHUB_HOOKS[hook] - break - return hooks + import re + + hooks = {} + for r in REPO_HOOK_MAP: + if re.match("^" + r[0] + "$", repo_name): + if not hook: + for h in r[1]: + hooks[h] = GITHUB_HOOKS[h] + elif hook in r[1]: + hooks[hook] = GITHUB_HOOKS[hook] + break + return hooks + def get_event_hooks(events): - hooks = {} - for ev in events: - hook = None - if ev in ["push"]: hook="Jenkins_Github_Hook_Push" - elif ev in ["issues","pull_request","issue_comment"]: hook="Jenkins_Github_Hook" - if hook: hooks[hook] = GITHUB_HOOKS[hook] - return hooks + hooks = {} + for ev in events: + hook = None + if ev in ["push"]: + hook = "Jenkins_Github_Hook_Push" + elif ev in ["issues", "pull_request", "issue_comment"]: + hook = "Jenkins_Github_Hook" + if hook: + hooks[hook] = GITHUB_HOOKS[hook] + return hooks diff --git a/github_modified_files.py b/github_modified_files.py index 2b9573e59859..3cdb25f6283f 100755 --- a/github_modified_files.py +++ b/github_modified_files.py @@ -16,7 +16,7 @@ # logger and logger config # https://docs.python.org/2/library/logger.html -FORMAT = '%(levelname)s - %(funcName)s - %(lineno)d: %(message)s' +FORMAT = "%(levelname)s - %(funcName)s - %(lineno)d: %(message)s" logging.basicConfig(format=FORMAT) logger = logging.getLogger(__name__) @@ -28,7 +28,13 @@ def main(): parser.add_argument("-c", "--cached_pr", default=None) parser.add_argument("-b", "--branch", default=None) parser.add_argument("-p", "--pull", default=None) - parser.add_argument("-l", "--logging", default="DEBUG", choices=logging._levelNames, help="Set level of logging") + parser.add_argument( + "-l", + "--logging", + default="DEBUG", + choices=logging._levelNames, + help="Set level of logging", + ) args = parser.parse_args() logger.setLevel(args.logging) @@ -41,46 +47,48 @@ def main(): with open(args.cached_pr) as f: old_prs_dict = json.load(f) except Exception as e: - logger.warning('Could not load a dumped prs', str(e)) + logger.warning("Could not load a dumped prs", str(e)) pr_list = [] rez = {} if args.pull: import copy + rez = copy.deepcopy(old_prs_dict) - pr_list = [ repo.get_pull(int(args.pull)) ] + pr_list = [repo.get_pull(int(args.pull))] else: pr_list = get_pull_requests(repo, branch=args.branch) print("GitHub API rate limit before: {}".format(gh.get_rate_limit())) for pr in pr_list: nr = str(pr.number) - if pr.state == 'closed': - if nr in rez: del rez[nr] + if pr.state == "closed": + if nr in rez: + del rez[nr] continue rez[nr] = { - 'number': int(nr), - 'state': pr.state, - 'created_at': int(pr.created_at.strftime("%s")), - 'updated_at': int(pr.updated_at.strftime("%s")), - 'base_branch': pr.base.ref + "number": int(nr), + "state": pr.state, + "created_at": int(pr.created_at.strftime("%s")), + "updated_at": int(pr.updated_at.strftime("%s")), + "base_branch": pr.base.ref, } # to check for cached PRs if nr in old_prs_dict.keys(): pr_old = old_prs_dict[nr] - if int(get_unix_time(pr.updated_at)) == pr_old['updated_at']: - rez[nr]['changed_files_names'] = pr_old['changed_files_names'] + if int(get_unix_time(pr.updated_at)) == pr_old["updated_at"]: + rez[nr]["changed_files_names"] = pr_old["changed_files_names"] logger.debug(" Using from cache %s" % nr) continue logger.debug("!PR was updated %s" % nr) - rez[nr]['changed_files_names'] = pr_get_changed_files(pr) + rez[nr]["changed_files_names"] = pr_get_changed_files(pr) - with open(args.destination, 'w') as d: + with open(args.destination, "w") as d: json.dump(rez, d, sort_keys=True, indent=4) print("GitHub API rate limit after: {}".format(gh.get_rate_limit())) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/github_scripts/get_Github_API_rate.py b/github_scripts/get_Github_API_rate.py index 5dc11c610a23..0b5544439933 100755 --- a/github_scripts/get_Github_API_rate.py +++ b/github_scripts/get_Github_API_rate.py @@ -10,5 +10,5 @@ def main(): print("GitHub API rate limit: {0}".format(gh.get_rate_limit())) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/github_scripts/simultaneous_files_modifications_by_PRs.py b/github_scripts/simultaneous_files_modifications_by_PRs.py index 4d69f360d9be..58983e66e714 100755 --- a/github_scripts/simultaneous_files_modifications_by_PRs.py +++ b/github_scripts/simultaneous_files_modifications_by_PRs.py @@ -13,53 +13,63 @@ def build_open_file_list(prs_dict, branch): open_file_list = {} for pr in prs_dict: - if prs_dict[pr]['base_branch'] == branch: - for file in prs_dict[pr]['changed_files_names']: + if prs_dict[pr]["base_branch"] == branch: + for file in prs_dict[pr]["changed_files_names"]: if file in open_file_list: open_file_list[file].append(pr) else: - open_file_list[file] = [pr, ] + open_file_list[file] = [ + pr, + ] return open_file_list def check_pr_dict(prs_dict, prs_list, pr_number): - for my_file in prs_dict[pr_number]['changed_files_names']: + for my_file in prs_dict[pr_number]["changed_files_names"]: if len(prs_list[my_file]) > 1: - print("File ", my_file, " modified in PR(s):", ', '.join(['#'+p for p in prs_list[my_file] if p!=pr_number])) + print( + "File ", + my_file, + " modified in PR(s):", + ", ".join(["#" + p for p in prs_list[my_file] if p != pr_number]), + ) -if __name__ == '__main__': +if __name__ == "__main__": if len(sys.argv) < 2: print("Usage: SearchPROverlap.py [ ]") print( - " : number of PR belonging to , or \"all\" for loop on all open PRs in \n" - " If \"all\" is given as , then a branch must be given as well.") + ' : number of PR belonging to , or "all" for loop on all open PRs in \n' + ' If "all" is given as , then a branch must be given as well.' + ) exit() my_pr = sys.argv[1] my_branch = None - e, o = run_cmd('curl -s -k -L https://raw.githubusercontent.com/cms-sw/cms-prs/master/cms-sw/cmssw/.other/files_changed_by_prs.json') + e, o = run_cmd( + "curl -s -k -L https://raw.githubusercontent.com/cms-sw/cms-prs/master/cms-sw/cmssw/.other/files_changed_by_prs.json" + ) prs_dict = json.loads(o) if my_pr not in prs_dict and not "all": print("PR # ", my_pr, "does not exists", file=sys.stderr) exit(1) if len(sys.argv) > 2: my_branch = sys.argv[2] - elif len(sys.argv) == 2 and my_pr == 'all': - print("ERROR: If \"all\" is given as , then a branch must be given as well.") + elif len(sys.argv) == 2 and my_pr == "all": + print('ERROR: If "all" is given as , then a branch must be given as well.') exit(1) else: pr_metadata = prs_dict[my_pr] - my_branch = pr_metadata['base_branch'] + my_branch = pr_metadata["base_branch"] my_list = build_open_file_list(prs_dict, my_branch) if my_pr == "all": for pr in prs_dict: - if prs_dict[pr]['base_branch'] == my_branch: + if prs_dict[pr]["base_branch"] == my_branch: check_pr_dict(prs_dict, my_list, pr) else: - if prs_dict[my_pr]['base_branch'] != my_branch: + if prs_dict[my_pr]["base_branch"] != my_branch: print("PR # ", my_pr, " not belonging to branch ", my_branch, file=sys.stderr) exit(1) else: diff --git a/github_utils.py b/github_utils.py index 4aa155a2e7ab..3cd942b341eb 100644 --- a/github_utils.py +++ b/github_utils.py @@ -10,42 +10,60 @@ GH_TOKENS = [] GH_USER = None GH_TOKEN_INDEX = 0 -GH_RATE_LIMIT = [ 5000, 5000, 3600] +GH_RATE_LIMIT = [5000, 5000, 3600] GH_PAGE_RANGE = [] try: from github import UnknownObjectException except: + class UnknownObjectException(Exception): pass + try: scriptPath = dirname(abspath(__file__)) except Exception as e: scriptPath = dirname(abspath(argv[0])) -def format(s, **kwds): return s % kwds +def format(s, **kwds): + return s % kwds def comment_gh_pr(gh, repo, pr, msg): repo = gh.get_repo(repo) - pr = repo.get_issue(pr) + pr = repo.get_issue(pr) pr.create_comment(msg) + def github_time(gh_time): - return int(mktime(strptime(gh_time, "%Y-%m-%dT%H:%M:%SZ"))) + return int(mktime(strptime(gh_time, "%Y-%m-%dT%H:%M:%SZ"))) + def get_page_range(): - return GH_PAGE_RANGE[:] + return GH_PAGE_RANGE[:] + -def _check_rate_limits(rate_limit, rate_limit_max, rate_limiting_resettime, msg=True, when_slow=False, prefix=""): +def _check_rate_limits( + rate_limit, rate_limit_max, rate_limiting_resettime, msg=True, when_slow=False, prefix="" +): global GH_TOKENS, GH_TOKEN_INDEX from calendar import timegm from datetime import datetime + doSleep = 0 rate_reset_sec = rate_limiting_resettime - timegm(gmtime()) + 5 - if msg: print('%sAPI Rate Limit: %s/%s, Reset in %s sec i.e. at %s' % ( - prefix, rate_limit, rate_limit_max, rate_reset_sec, datetime.fromtimestamp(rate_limiting_resettime))) + if msg: + print( + "%sAPI Rate Limit: %s/%s, Reset in %s sec i.e. at %s" + % ( + prefix, + rate_limit, + rate_limit_max, + rate_reset_sec, + datetime.fromtimestamp(rate_limiting_resettime), + ) + ) if rate_limit < 100: doSleep = rate_reset_sec elif rate_limit < 200: @@ -58,39 +76,56 @@ def _check_rate_limits(rate_limit, rate_limit_max, rate_limiting_resettime, msg= doSleep = 0.5 elif rate_limit < 2000: doSleep = 0.25 - if (rate_reset_sec < doSleep): doSleep = rate_reset_sec - if when_slow: msg=True + if rate_reset_sec < doSleep: + doSleep = rate_reset_sec + if when_slow: + msg = True if doSleep > 0: - tok_len = len(GH_TOKENS)-1 - if tok_len>=1: - GH_TOKEN_INDEX = 0 if (GH_TOKEN_INDEX==tok_len) else GH_TOKEN_INDEX+1 - get_rate_limits() - if GH_TOKEN_INDEX>0: return + tok_len = len(GH_TOKENS) - 1 + if tok_len >= 1: + GH_TOKEN_INDEX = 0 if (GH_TOKEN_INDEX == tok_len) else GH_TOKEN_INDEX + 1 + get_rate_limits() + if GH_TOKEN_INDEX > 0: + return if msg: - print("%sSlowing down for %s sec due to api rate limits %s approching zero (reset in %s secs)" % (prefix, doSleep, rate_limit, rate_reset_sec)) + print( + "%sSlowing down for %s sec due to api rate limits %s approching zero (reset in %s secs)" + % (prefix, doSleep, rate_limit, rate_reset_sec) + ) sleep(doSleep) return + def check_rate_limits(msg=True, when_slow=False, prefix=""): - _check_rate_limits(GH_RATE_LIMIT[0], GH_RATE_LIMIT[1], GH_RATE_LIMIT[2], msg, when_slow, prefix=prefix) + _check_rate_limits( + GH_RATE_LIMIT[0], GH_RATE_LIMIT[1], GH_RATE_LIMIT[2], msg, when_slow, prefix=prefix + ) def api_rate_limits_repo(obj, msg=True, when_slow=False, prefix=""): global GH_RATE_LIMIT - GH_RATE_LIMIT = [ int(obj.raw_headers['x-ratelimit-remaining']), int(obj.raw_headers['x-ratelimit-limit']), int(obj.raw_headers['x-ratelimit-reset']) ] + GH_RATE_LIMIT = [ + int(obj.raw_headers["x-ratelimit-remaining"]), + int(obj.raw_headers["x-ratelimit-limit"]), + int(obj.raw_headers["x-ratelimit-reset"]), + ] check_rate_limits(msg, when_slow, prefix=prefix) def api_rate_limits(gh, msg=True, when_slow=False, prefix=""): global GH_RATE_LIMIT gh.get_rate_limit() - GH_RATE_LIMIT = [ int(gh.rate_limiting[0]), int(gh.rate_limiting[1]), int(gh.rate_limiting_resettime) ] + GH_RATE_LIMIT = [ + int(gh.rate_limiting[0]), + int(gh.rate_limiting[1]), + int(gh.rate_limiting_resettime), + ] check_rate_limits(msg, when_slow, prefix=prefix) def get_ported_PRs(repo, src_branch, des_branch): done_prs_id = {} - prRe = re.compile('Automatically ported from ' + src_branch + ' #(\d+)\s+.*', re.MULTILINE) + prRe = re.compile("Automatically ported from " + src_branch + " #(\d+)\s+.*", re.MULTILINE) for pr in repo.get_pulls(base=des_branch): body = pr.body.encode("ascii", "ignore") if sys.version_info[0] == 3: @@ -112,7 +147,14 @@ def port_pr(repo, pr_num, des_branch, dryRun=False): print("Already ported as #", done_prs_id[pr.number]) return True branch = repo.get_branch(des_branch) - print("Preparing checkout area:", pr_num, repo.full_name, pr.head.user.login, pr.head.ref, des_branch) + print( + "Preparing checkout area:", + pr_num, + repo.full_name, + pr.head.user.login, + pr.head.ref, + des_branch, + ) prepare_cmd = format( "%(cmsbot)s/prepare-repo-clone-for-port.sh %(pr)s %(pr_user)s/%(pr_branch)s %(repo)s %(des_branch)s", cmsbot=scriptPath, @@ -120,30 +162,38 @@ def port_pr(repo, pr_num, des_branch, dryRun=False): repo=repo.full_name, pr_user=pr.head.user.login, pr_branch=pr.head.ref, - des_branch=des_branch) + des_branch=des_branch, + ) err, out = run_cmd(prepare_cmd) print(out) - if err: return False + if err: + return False all_commits = set([]) for c in pr.get_commits(): all_commits.add(c.sha) - git_cmd = format("cd %(clone_dir)s; git cherry-pick -x %(commit)s", - clone_dir=pr.base.repo.name, - commit=c.sha) + git_cmd = format( + "cd %(clone_dir)s; git cherry-pick -x %(commit)s", + clone_dir=pr.base.repo.name, + commit=c.sha, + ) err, out = run_cmd(git_cmd) print(out) - if err: return False - git_cmd = format("cd %(clone_dir)s; git log %(des_branch)s..", - clone_dir=pr.base.repo.name, - des_branch=des_branch) + if err: + return False + git_cmd = format( + "cd %(clone_dir)s; git log %(des_branch)s..", + clone_dir=pr.base.repo.name, + des_branch=des_branch, + ) err, out = run_cmd(git_cmd) print(out) - if err: return False + if err: + return False last_commit = None new_commit = None new_commits = {} for line in out.split("\n"): - m = re.match('^commit\s+([0-9a-f]+)$', line) + m = re.match("^commit\s+([0-9a-f]+)$", line) if m: print("New commit:", m.group(1), last_commit) if last_commit: @@ -151,36 +201,55 @@ def port_pr(repo, pr_num, des_branch, dryRun=False): new_commit = m.group(1) new_commits[new_commit] = None continue - m = re.match('^\s*\(cherry\s+picked\s+from\s+commit\s([0-9a-f]+)\)$', line) + m = re.match("^\s*\(cherry\s+picked\s+from\s+commit\s([0-9a-f]+)\)$", line) if m: print("found commit", m.group(1)) last_commit = m.group(1) - if last_commit: new_commits[new_commit] = last_commit + if last_commit: + new_commits[new_commit] = last_commit if pr.commits != len(new_commits): - print("Error: PR has ", pr.commits, " commits while we only found ", len(new_commits), ":", new_commits) + print( + "Error: PR has ", + pr.commits, + " commits while we only found ", + len(new_commits), + ":", + new_commits, + ) for c in new_commits: all_commits.remove(new_commits[c]) if all_commits: print("Something went wrong: Following commists not cherry-picked", all_commits) return False - git_cmd = format("cd %(clone_dir)s; git rev-parse --abbrev-ref HEAD", clone_dir=pr.base.repo.name) + git_cmd = format( + "cd %(clone_dir)s; git rev-parse --abbrev-ref HEAD", clone_dir=pr.base.repo.name + ) err, out = run_cmd(git_cmd) print(out) - if err or not out.startswith("port-" + str(pr_num) + "-"): return False + if err or not out.startswith("port-" + str(pr_num) + "-"): + return False new_branch = out - git_cmd = format("cd %(clone_dir)s; git push origin %(new_branch)s", - clone_dir=pr.base.repo.name, - new_branch=new_branch) + git_cmd = format( + "cd %(clone_dir)s; git push origin %(new_branch)s", + clone_dir=pr.base.repo.name, + new_branch=new_branch, + ) if not dryRun: err, out = run_cmd(git_cmd) print(out) - if err: return False + if err: + return False else: print("DryRun: should have push %s branch" % new_branch) from cms_static import GH_CMSSW_ORGANIZATION + newHead = "%s:%s" % (GH_CMSSW_ORGANIZATION, new_branch) - newBody = pr.body + "\nAutomatically ported from " + pr.base.ref + " #%s (original by @%s)." % ( - pr_num, str(pr.head.user.login)) + newBody = ( + pr.body + + "\nAutomatically ported from " + + pr.base.ref + + " #%s (original by @%s)." % (pr_num, str(pr.head.user.login)) + ) print(newHead) print(newBody) if not dryRun: @@ -188,9 +257,11 @@ def port_pr(repo, pr_num, des_branch, dryRun=False): else: print("DryRun: should have created Pull Request for %s using %s" % (des_branch, newHead)) print("Every thing looks good") - git_cmd = format("cd %(clone_dir)s; git branch -d %(new_branch)s", - clone_dir=pr.base.repo.name, - new_branch=new_branch) + git_cmd = format( + "cd %(clone_dir)s; git branch -d %(new_branch)s", + clone_dir=pr.base.repo.name, + new_branch=new_branch, + ) err, out = run_cmd(git_cmd) print("Local branch %s deleted" % new_branch) return True @@ -199,18 +270,21 @@ def port_pr(repo, pr_num, des_branch, dryRun=False): def prs2relnotes(notes, ref_repo=""): new_notes = {} for pr_num in notes: - new_notes[pr_num] = format("- %(ref_repo)s#%(pull_request)s from @%(author)s: %(title)s", - ref_repo=ref_repo, - pull_request=pr_num, - author=notes[pr_num]['author'], - title=notes[pr_num]['title']) + new_notes[pr_num] = format( + "- %(ref_repo)s#%(pull_request)s from @%(author)s: %(title)s", + ref_repo=ref_repo, + pull_request=pr_num, + author=notes[pr_num]["author"], + title=notes[pr_num]["title"], + ) return new_notes def cache_invalid_pr(pr_id, cache): - if not 'invalid_prs' in cache: cache['invalid_prs'] = [] - cache['invalid_prs'].append(pr_id) - cache['dirty'] = True + if not "invalid_prs" in cache: + cache["invalid_prs"] = [] + cache["invalid_prs"].append(pr_id) + cache["dirty"] = True def fill_notes_description(notes, repo_name, cmsprs, cache={}): @@ -221,12 +295,13 @@ def fill_notes_description(notes, repo_name, cmsprs, cache={}): author = items[1] pr_number = items[0] if cache and (pr_number in cache): - new_notes[pr_number] = cache[pr_number]['notes'] - print('Read from cache ', pr_number) + new_notes[pr_number] = cache[pr_number]["notes"] + print("Read from cache ", pr_number) continue parent_hash = items.pop() pr_hash_id = pr_number + ":" + parent_hash - if 'invalid_prs' in cache and pr_hash_id in cache['invalid_prs']: continue + if "invalid_prs" in cache and pr_hash_id in cache["invalid_prs"]: + continue print("Checking ", pr_number, author, parent_hash) try: pr_md5 = md5((pr_number + "\n").encode()).hexdigest() @@ -237,32 +312,33 @@ def fill_notes_description(notes, repo_name, cmsprs, cache={}): cache_invalid_pr(pr_hash_id, cache) continue pr = json.load(open(pr_cache)) - if not 'auther_sha' in pr: + if not "auther_sha" in pr: print(" Invalid/Indirect PR", pr) cache_invalid_pr(pr_hash_id, cache) continue ok = True - if pr['author'] != author: - print(" Author mismatch:", pr['author']) + if pr["author"] != author: + print(" Author mismatch:", pr["author"]) ok = False - if pr['auther_sha'] != parent_hash: - print(" sha mismatch:", pr['auther_sha']) + if pr["auther_sha"] != parent_hash: + print(" sha mismatch:", pr["auther_sha"]) ok = False if not ok: print(" Invalid/Indirect PR") cache_invalid_pr(pr_hash_id, cache) continue new_notes[pr_number] = { - 'author': author, - 'title': pr['title'], - 'user_ref': pr['auther_ref'], - 'hash': parent_hash, - 'branch': pr['branch']} + "author": author, + "title": pr["title"], + "user_ref": pr["auther_ref"], + "hash": parent_hash, + "branch": pr["branch"], + } if not pr_number in cache: cache[pr_number] = {} - cache[pr_number]['notes'] = new_notes[pr_number] - cache[pr_number]['pr'] = pr - cache['dirty'] = True + cache[pr_number]["notes"] = new_notes[pr_number] + cache[pr_number]["pr"] = pr + cache["dirty"] = True except UnknownObjectException as e: print("ERR:", e) cache_invalid_pr(pr_hash_id, cache) @@ -272,15 +348,17 @@ def fill_notes_description(notes, repo_name, cmsprs, cache={}): def get_merge_prs(prev_tag, this_tag, git_dir, cmsprs, cache={}, repo_name=None): print("Getting merged Pull Requests b/w", prev_tag, this_tag) - cmd = format("GIT_DIR=%(git_dir)s" - " git log --graph --merges --pretty='%%s: %%P' %(previous)s..%(release)s | " - " grep ' Merge pull request #[1-9][0-9]* from ' | " - " sed 's|^.* Merge pull request #||' | " - " sed 's|Dr15Jones:clangRecoParticleFlowPFProducer:|Dr15Jones/clangRecoParticleFlowPFProducer:|' | " - " sed 's|/[^:]*:||;s|from ||'", - git_dir=git_dir, - previous=prev_tag, - release=this_tag) + cmd = format( + "GIT_DIR=%(git_dir)s" + " git log --graph --merges --pretty='%%s: %%P' %(previous)s..%(release)s | " + " grep ' Merge pull request #[1-9][0-9]* from ' | " + " sed 's|^.* Merge pull request #||' | " + " sed 's|Dr15Jones:clangRecoParticleFlowPFProducer:|Dr15Jones/clangRecoParticleFlowPFProducer:|' | " + " sed 's|/[^:]*:||;s|from ||'", + git_dir=git_dir, + previous=prev_tag, + release=this_tag, + ) error, notes = run_cmd(cmd) print("Getting Merged Commits:", cmd) print(notes) @@ -294,12 +372,12 @@ def get_merge_prs(prev_tag, this_tag, git_dir, cmsprs, cache={}, repo_name=None) def save_prs_cache(cache, cache_file): - if cache['dirty']: - del cache['dirty'] + if cache["dirty"]: + del cache["dirty"] with open(cache_file, "w") as out_json: json.dump(cache, out_json, indent=2, sort_keys=True) out_json.close() - cache['dirty'] = False + cache["dirty"] = False def read_prs_cache(cache_file): @@ -308,28 +386,34 @@ def read_prs_cache(cache_file): with open(cache_file) as json_file: cache = json.loads(json_file.read()) json_file.close() - cache['dirty'] = False + cache["dirty"] = False return cache def get_ref_commit(repo, ref): for n in ["tags", "heads"]: - error, out = run_cmd("curl -s -L https://api.github.com/repos/%s/git/refs/%s/%s" % (repo, n, ref)) + error, out = run_cmd( + "curl -s -L https://api.github.com/repos/%s/git/refs/%s/%s" % (repo, n, ref) + ) if not error: info = json.loads(out) - if "object" in info: return info["object"]["sha"] + if "object" in info: + return info["object"]["sha"] print("Error: Unable to get sha for %s" % ref) return None def get_commit_info(repo, commit): - error, out = run_cmd("curl -s -L https://api.github.com/repos/%s/git/commits/%s" % (repo, commit)) + error, out = run_cmd( + "curl -s -L https://api.github.com/repos/%s/git/commits/%s" % (repo, commit) + ) if error: - tag = 'X (tag is undefined)' # TODO tag is undefined + tag = "X (tag is undefined)" # TODO tag is undefined print("Error, unable to get sha for tag %s" % tag) return {} commit_info = json.loads(out) - if "sha" in commit_info: return commit_info + if "sha" in commit_info: + return commit_info return {} @@ -351,7 +435,9 @@ def get_delete_pending_members(org, invitation_id): def get_organization_members(org, role="all", filter="all"): - return github_api("/orgs/%s/members" % org, params={"role": role, "filter": filter}, method="GET") + return github_api( + "/orgs/%s/members" % org, params={"role": role, "filter": filter}, method="GET" + ) def get_organization_repositores(org): @@ -363,65 +449,109 @@ def get_repository(repo): def add_organization_member(org, member, role="member"): - return github_api("/orgs/%s/memberships/%s" % (org, member), params={"role": role}, method="PUT") + return github_api( + "/orgs/%s/memberships/%s" % (org, member), params={"role": role}, method="PUT" + ) def invite_organization_member(org, member, role="direct_member"): - return github_api("/orgs/%s/invitations" % org, params={"role": role, "invitee_id": member}, method="POST") + return github_api( + "/orgs/%s/invitations" % org, params={"role": role, "invitee_id": member}, method="POST" + ) + def edit_pr(repo, pr_num, title=None, body=None, state=None, base=None): get_gh_token(repo) params = {} - if title: params["title"] = title - if body: params["body"] = body - if base: params["base"] = base - if state: params["state"] = state + if title: + params["title"] = title + if body: + params["body"] = body + if base: + params["base"] = base + if state: + params["state"] = state return github_api(uri="/repos/%s/pulls/%s" % (repo, pr_num), params=params, method="PATCH") + def create_issue_comment(repo, issue_num, body): get_gh_token(repo) - return github_api(uri="/repos/%s/issues/%s/comments" % (repo, issue_num), params={"body": body}) + return github_api( + uri="/repos/%s/issues/%s/comments" % (repo, issue_num), params={"body": body} + ) + def get_issue_labels(repo, issue_num): get_gh_token(repo) return github_api(uri="/repos/%s/issues/%s/labels" % (repo, issue_num), method="GET") + def add_issue_labels(repo, issue_num, labels=[]): get_gh_token(repo) - return github_api(uri="/repos/%s/issues/%s/labels" % (repo, issue_num), params={"labels": labels}, method="POST") + return github_api( + uri="/repos/%s/issues/%s/labels" % (repo, issue_num), + params={"labels": labels}, + method="POST", + ) + def set_issue_labels(repo, issue_num, labels=[]): get_gh_token(repo) - return github_api(uri="/repos/%s/issues/%s/labels" % (repo, issue_num), params={"labels": labels}, method="PUT") + return github_api( + uri="/repos/%s/issues/%s/labels" % (repo, issue_num), + params={"labels": labels}, + method="PUT", + ) + def remove_issue_labels_all(repo, issue_num): get_gh_token(repo) - return github_api(uri="/repos/%s/issues/%s/labels" % (repo, issue_num), method="DELETE", status=[204]) + return github_api( + uri="/repos/%s/issues/%s/labels" % (repo, issue_num), method="DELETE", status=[204] + ) + def remove_issue_label(repo, issue_num, label): get_gh_token(repo) - return github_api(uri="/repos/%s/issues/%s/labels/%s" % (repo, issue_num, label), method="DELETE") + return github_api( + uri="/repos/%s/issues/%s/labels/%s" % (repo, issue_num, label), method="DELETE" + ) -def get_rate_limits(): - return github_api(uri="/rate_limit", method="GET") -def github_api(uri, params=None, method="POST", headers=None, page=1, raw=False, per_page=100, last_page=False, all_pages=True, max_pages=-1, status=[]): +def get_rate_limits(): + return github_api(uri="/rate_limit", method="GET") + + +def github_api( + uri, + params=None, + method="POST", + headers=None, + page=1, + raw=False, + per_page=100, + last_page=False, + all_pages=True, + max_pages=-1, + status=[], +): global GH_RATE_LIMIT, GH_PAGE_RANGE - if max_pages>0 and page>max_pages: - return '[]' if raw else [] + if max_pages > 0 and page > max_pages: + return "[]" if raw else [] if not params: params = {} if not headers: headers = {} url = "https://api.github.com%s" % uri data = "" - if per_page and ('per_page' not in params) and (not method in ["POST", "PATCH", "PUT"]): params['per_page']=per_page + if per_page and ("per_page" not in params) and (not method in ["POST", "PATCH", "PUT"]): + params["per_page"] = per_page if method == "GET": if params: url = url + "?" + urlencode(params) elif method in ["POST", "PATCH", "PUT"]: data = json.dumps(params) - if version_info[0]==3: + if version_info[0] == 3: data = data.encode("utf-8") if page > 1: if not "?" in url: @@ -433,37 +563,56 @@ def github_api(uri, params=None, method="POST", headers=None, page=1, raw=False request = Request(url, data=data, headers=headers) request.get_method = lambda: method response = urlopen(request) - if page<=1 : GH_PAGE_RANGE = [] + if page <= 1: + GH_PAGE_RANGE = [] try: - GH_RATE_LIMIT = [ int(response.headers["X-RateLimit-Remaining"]), int(response.headers["X-RateLimit-Limit"]), int(response.headers["X-Ratelimit-Reset"])] + GH_RATE_LIMIT = [ + int(response.headers["X-RateLimit-Remaining"]), + int(response.headers["X-RateLimit-Limit"]), + int(response.headers["X-Ratelimit-Reset"]), + ] except Exception as e: - print("ERROR:",e) - if (page <= 1) and (method=='GET'): + print("ERROR:", e) + if (page <= 1) and (method == "GET"): link = response.headers.get("Link") if link: pages = [] for x in link.split(" "): - m = re.match('^.*[?&]page=([1-9][0-9]*).*$', x) - if m: pages.append(int(m.group(1))) + m = re.match("^.*[?&]page=([1-9][0-9]*).*$", x) + if m: + pages.append(int(m.group(1))) if len(pages) == 2: GH_PAGE_RANGE += range(pages[0], pages[1] + 1) elif len(pages) == 1: GH_PAGE_RANGE += pages cont = response.read() if status: - return response.status in status - if raw: return cont + return response.status in status + if raw: + return cont data = json.loads(cont) if GH_PAGE_RANGE and all_pages: - if last_page: - return github_api(uri, params, method, headers, GH_PAGE_RANGE[-1], raw=False, per_page=per_page, all_pages=False) - for page in GH_PAGE_RANGE: - if max_pages>0 and page>max_pages: break - data += github_api(uri, params, method, headers, page, raw=raw, per_page=per_page, all_pages=False) + if last_page: + return github_api( + uri, + params, + method, + headers, + GH_PAGE_RANGE[-1], + raw=False, + per_page=per_page, + all_pages=False, + ) + for page in GH_PAGE_RANGE: + if max_pages > 0 and page > max_pages: + break + data += github_api( + uri, params, method, headers, page, raw=raw, per_page=per_page, all_pages=False + ) return data -def get_pull_requests(gh_repo, branch=None, status='open'): +def get_pull_requests(gh_repo, branch=None, status="open"): """ Get all pull request for the current branch of the repo :return: @@ -491,8 +640,10 @@ def pr_get_changed_files(pr): for f in pr.get_files(): rez.append(f.filename) try: - if f.previous_filename: rez.append(f.previous_filename) - except: pass + if f.previous_filename: + rez.append(f.previous_filename) + except: + pass return rez @@ -501,115 +652,161 @@ def get_unix_time(data_obj): def get_gh_token(repository=None, token_file=None): - global GH_TOKENS, GH_TOKEN_INDEX - if not GH_TOKENS: - GH_TOKEN_INDEX = 0 - if not token_file: - if repository: - repo_dir = join(scriptPath,'repos',repository.replace("-","_")) - if exists(join(repo_dir,"repo_config.py")): sys.path.insert(0,repo_dir) - import repo_config - token_file = expanduser(repo_config.GH_TOKEN) - try: - with open(token_file) as ref: - for tok in [t.strip() for t in ref.readlines() ]: - if not tok: continue - GH_TOKENS.append(tok) - except: - GH_TOKENS = [""] - return GH_TOKENS[GH_TOKEN_INDEX] + global GH_TOKENS, GH_TOKEN_INDEX + if not GH_TOKENS: + GH_TOKEN_INDEX = 0 + if not token_file: + if repository: + repo_dir = join(scriptPath, "repos", repository.replace("-", "_")) + if exists(join(repo_dir, "repo_config.py")): + sys.path.insert(0, repo_dir) + import repo_config + + token_file = expanduser(repo_config.GH_TOKEN) + try: + with open(token_file) as ref: + for tok in [t.strip() for t in ref.readlines()]: + if not tok: + continue + GH_TOKENS.append(tok) + except: + GH_TOKENS = [""] + return GH_TOKENS[GH_TOKEN_INDEX] def set_gh_user(user): - global GH_USER - GH_USER = user + global GH_USER + GH_USER = user def get_combined_statuses(commit, repository): - get_gh_token(repository) - return github_api("/repos/%s/commits/%s/status" % (repository, commit), method='GET') + get_gh_token(repository) + return github_api("/repos/%s/commits/%s/status" % (repository, commit), method="GET") def get_pr_commits(pr, repository, per_page=None, last_page=False): - get_gh_token(repository) - return github_api("/repos/%s/pulls/%s/commits" % (repository, pr), method='GET', per_page=per_page, last_page=last_page) + get_gh_token(repository) + return github_api( + "/repos/%s/pulls/%s/commits" % (repository, pr), + method="GET", + per_page=per_page, + last_page=last_page, + ) def get_pr_latest_commit(pr, repository): - get_gh_token(repository) - return str(get_pr_commits(pr, repository, per_page=1, last_page=True)[-1]["sha"]) + get_gh_token(repository) + return str(get_pr_commits(pr, repository, per_page=1, last_page=True)[-1]["sha"]) def set_comment_emoji(comment_id, repository, emoji="+1", reset_other=True): - cur_emoji = None - if reset_other: - for e in get_comment_emojis(comment_id, repository): - login = e['user']['login'].encode("ascii", "ignore") - if sys.version_info[0] == 3: - login = login.decode() - if login == GH_USER: - if e['content']!=emoji: - delete_comment_emoji(e['id'], comment_id, repository) - else: - cur_emoji = e - if cur_emoji: return cur_emoji - get_gh_token(repository) - params = {"content" : emoji } - return github_api('/repos/%s/issues/comments/%s/reactions' % (repository, comment_id), params=params) + cur_emoji = None + if reset_other: + for e in get_comment_emojis(comment_id, repository): + login = e["user"]["login"].encode("ascii", "ignore") + if sys.version_info[0] == 3: + login = login.decode() + if login == GH_USER: + if e["content"] != emoji: + delete_comment_emoji(e["id"], comment_id, repository) + else: + cur_emoji = e + if cur_emoji: + return cur_emoji + get_gh_token(repository) + params = {"content": emoji} + return github_api( + "/repos/%s/issues/comments/%s/reactions" % (repository, comment_id), params=params + ) -def get_repository_issues(repository, params={'sort': 'updated', 'state': 'all'}, page=1, all_pages=False): - get_gh_token(repository) - return github_api('/repos/%s/issues' % repository, method="GET", params=params, page=page, all_pages=all_pages) +def get_repository_issues( + repository, params={"sort": "updated", "state": "all"}, page=1, all_pages=False +): + get_gh_token(repository) + return github_api( + "/repos/%s/issues" % repository, + method="GET", + params=params, + page=page, + all_pages=all_pages, + ) def get_issue_comments(repository, issue_num): - get_gh_token(repository) - return github_api('/repos/%s/issues/%s/comments' % (repository, issue_num), method="GET") + get_gh_token(repository) + return github_api("/repos/%s/issues/%s/comments" % (repository, issue_num), method="GET") def get_issue(repository, issue_num): - get_gh_token(repository) - return github_api('/repos/%s/issues/%s' % (repository, issue_num), method="GET") + get_gh_token(repository) + return github_api("/repos/%s/issues/%s" % (repository, issue_num), method="GET") + -def get_releases(repository, params={'sort':'updated'}, page=1, all_pages=False): - get_gh_token(repository) - return github_api('/repos/%s/releases' % repository, method="GET", params=params, page=page, all_pages=all_pages) +def get_releases(repository, params={"sort": "updated"}, page=1, all_pages=False): + get_gh_token(repository) + return github_api( + "/repos/%s/releases" % repository, + method="GET", + params=params, + page=page, + all_pages=all_pages, + ) def get_release_by_tag(repository, tag): - get_gh_token(repository) - return github_api('/repos/%s/releases/tags/%s' % (repository, tag), method="GET") + get_gh_token(repository) + return github_api("/repos/%s/releases/tags/%s" % (repository, tag), method="GET") def get_comment_emojis(comment_id, repository): - get_gh_token(repository) - return github_api('/repos/%s/issues/comments/%s/reactions' % (repository, comment_id), method="GET") + get_gh_token(repository) + return github_api( + "/repos/%s/issues/comments/%s/reactions" % (repository, comment_id), method="GET" + ) def delete_comment_emoji(emoji_id, comment_id, repository): - get_gh_token(repository) - return github_api('/repos/%s/issues/comments/%s/reactions/%s' % (repository, comment_id, emoji_id), method="DELETE", raw=True) + get_gh_token(repository) + return github_api( + "/repos/%s/issues/comments/%s/reactions/%s" % (repository, comment_id, emoji_id), + method="DELETE", + raw=True, + ) def get_git_tree(sha, repository): - get_gh_token(repository) - return github_api("/repos/%s/git/trees/%s" % (repository, sha), method='GET') - - -def mark_commit_status(commit, repository, context="default", state="pending", url="", description="Test started", reset=False): - get_gh_token(repository) - params = {'state': state, 'target_url': url, 'description': description, 'context': context} - github_api('/repos/%s/statuses/%s' % (repository, commit), params=params) - if reset: - statuses = get_combined_statuses(commit, repository) - if 'statuses' not in statuses: return - params = {'state': 'success', 'target_url': '', 'description': 'Not yet started or might not rerun'} - for s in statuses['statuses']: - if s['context'].startswith(context+"/"): - params['context'] = s['context'] - github_api('/repos/%s/statuses/%s' % (repository, commit), params=params) - return + get_gh_token(repository) + return github_api("/repos/%s/git/trees/%s" % (repository, sha), method="GET") + + +def mark_commit_status( + commit, + repository, + context="default", + state="pending", + url="", + description="Test started", + reset=False, +): + get_gh_token(repository) + params = {"state": state, "target_url": url, "description": description, "context": context} + github_api("/repos/%s/statuses/%s" % (repository, commit), params=params) + if reset: + statuses = get_combined_statuses(commit, repository) + if "statuses" not in statuses: + return + params = { + "state": "success", + "target_url": "", + "description": "Not yet started or might not rerun", + } + for s in statuses["statuses"]: + if s["context"].startswith(context + "/"): + params["context"] = s["context"] + github_api("/repos/%s/statuses/%s" % (repository, commit), params=params) + return + def get_branch(repository, branch_name): get_gh_token(repository) @@ -619,9 +816,7 @@ def get_branch(repository, branch_name): def get_git_tag(repository, tag_name): get_gh_token(repository) - data = github_api( - "/repos/%s/git/ref/tags/%s" % (repository, tag_name), method="GET" - ) + data = github_api("/repos/%s/git/ref/tags/%s" % (repository, tag_name), method="GET") return data @@ -643,38 +838,39 @@ def get_commit_tags(repository, commit_sha, all_tags=False): def get_org_packages(org, package_type="container", visibility=None, token_file=None): - get_gh_token(token_file=token_file) - params = {"package_type" : package_type} - if visibility: params["visibility"] = visibility - return github_api( - "/orgs/%s/packages" % org, - method="GET", - params=params, - all_pages=True, + get_gh_token(token_file=token_file) + params = {"package_type": package_type} + if visibility: + params["visibility"] = visibility + return github_api( + "/orgs/%s/packages" % org, + method="GET", + params=params, + all_pages=True, ) def get_org_package(org, package, package_type="container", token_file=None): - get_gh_token(token_file=token_file) - return github_api( - "/orgs/%s/packages/%s/%s" % (org, package_type, package), - method="GET", - all_pages=True + get_gh_token(token_file=token_file) + return github_api( + "/orgs/%s/packages/%s/%s" % (org, package_type, package), method="GET", all_pages=True ) + def get_org_package_versions(org, package, package_type="container", token_file=None): - get_gh_token(token_file=token_file) - return github_api( - "/orgs/%s/packages/%s/%s/versions" % (org, package_type, package), - method="GET", - all_pages=True + get_gh_token(token_file=token_file) + return github_api( + "/orgs/%s/packages/%s/%s/versions" % (org, package_type, package), + method="GET", + all_pages=True, ) + def get_org_package_version(org, package, version_id, package_type="container", token_file=None): - get_gh_token(token_file=token_file) - return github_api( - "/orgs/%s/packages/%s/%s/versions/%s" % (org, package_type, package, version_id), - method="GET", + get_gh_token(token_file=token_file) + return github_api( + "/orgs/%s/packages/%s/%s/versions/%s" % (org, package_type, package, version_id), + method="GET", ) @@ -699,10 +895,9 @@ def find_tags(repository, name): data = github_api("/repos/%s/git/matching-refs/tags/%s" % (repository, name), method="GET") return data - - + + def get_pr(repository, pr_id): data = github_api("/repos/%s/pulls/%s" % (repository, pr_id), method="GET") return data - diff --git a/githublabels.py b/githublabels.py index 3b4bd141365e..6d7c7869ed76 100644 --- a/githublabels.py +++ b/githublabels.py @@ -1,72 +1,84 @@ try: - from categories import get_dpg_pog + from categories import get_dpg_pog except: - def get_dpg_pog(): return [] + + def get_dpg_pog(): + return [] + LABEL_COLORS = { - "hold" : "ff8000", - "pending": "fbca04", - "approved": "2cbe4e", - "rejected": "e11d21", - "info": "0000ff", - "doc": "257fdb", + "hold": "ff8000", + "pending": "fbca04", + "approved": "2cbe4e", + "rejected": "e11d21", + "info": "0000ff", + "doc": "257fdb", } LABEL_TYPES = { - "pending": LABEL_COLORS["pending"], - "approved": LABEL_COLORS["approved"], - "rejected": LABEL_COLORS["rejected"], + "pending": LABEL_COLORS["pending"], + "approved": LABEL_COLORS["approved"], + "rejected": LABEL_COLORS["rejected"], } -#TYPE_COMMANDS[LABEL_NAME]=[LABEL_COLOR, +# TYPE_COMMANDS[LABEL_NAME]=[LABEL_COLOR, # REGEXP_TO_MATCH_CCOMMENT", # TYPE # type: only apply the last comment # mtype: accomulate all comments] TYPE_COMMANDS = { - "bug-fix" : ["b8860b", "bug(-fix|fix|)", "type"], - "new-feature" : [LABEL_COLORS["info"], "(new-|)(feature|idea)", "type"], - "documentation" : [LABEL_COLORS["doc"], "doc(umentation|)", "mtype"], - "performance-improvements" : ["5b9ee3", "performance|improvements|performance-improvements", "mtype"], + "bug-fix": ["b8860b", "bug(-fix|fix|)", "type"], + "new-feature": [LABEL_COLORS["info"], "(new-|)(feature|idea)", "type"], + "documentation": [LABEL_COLORS["doc"], "doc(umentation|)", "mtype"], + "performance-improvements": [ + "5b9ee3", + "performance|improvements|performance-improvements", + "mtype", + ], } for lab in get_dpg_pog(): - if lab in TYPE_COMMANDS: continue - TYPE_COMMANDS[lab] = [LABEL_COLORS['doc'], lab, "mtype"] + if lab in TYPE_COMMANDS: + continue + TYPE_COMMANDS[lab] = [LABEL_COLORS["doc"], lab, "mtype"] + +TEST_IGNORE_REASON = ["manual-override", "ib-failure", "external-failure"] COMMON_LABELS = { - "tests-started": LABEL_COLORS["hold"], - "fully-signed": LABEL_COLORS["approved"], - "pending-signatures": LABEL_COLORS["hold"], - "pending-assignment": LABEL_COLORS["hold"], - "new-package-pending" : LABEL_COLORS["rejected"], - "backport" : LABEL_COLORS["info"], - "backport-ok" : LABEL_COLORS["approved"], - "urgent" : "cc317c", - "process-complete" : LABEL_COLORS["approved"], - "hold": LABEL_COLORS["hold"], - "compilation-warnings": LABEL_COLORS["hold"], - "requires-external" : LABEL_COLORS["info"], + "tests-started": LABEL_COLORS["hold"], + "fully-signed": LABEL_COLORS["approved"], + "pending-signatures": LABEL_COLORS["hold"], + "pending-assignment": LABEL_COLORS["hold"], + "new-package-pending": LABEL_COLORS["rejected"], + "backport": LABEL_COLORS["info"], + "backport-ok": LABEL_COLORS["approved"], + "urgent": "cc317c", + "process-complete": LABEL_COLORS["approved"], + "hold": LABEL_COLORS["hold"], + "compilation-warnings": LABEL_COLORS["hold"], + "requires-external": LABEL_COLORS["info"], } for lab in TYPE_COMMANDS: - COMMON_LABELS[lab] = TYPE_COMMANDS[lab][0] + COMMON_LABELS[lab] = TYPE_COMMANDS[lab][0] + +for reason in TEST_IGNORE_REASON: + COMMON_LABELS["tests-" + reason] = LABEL_COLORS["info"] COMPARISON_LABELS = { - "comparison-notrun" : "bfe5bf", - "comparison-available" : LABEL_TYPES["approved"], - "comparison-pending" : LABEL_TYPES["pending"], + "comparison-notrun": "bfe5bf", + "comparison-available": LABEL_TYPES["approved"], + "comparison-pending": LABEL_TYPES["pending"], } CMSSW_BUILD_LABELS = { - "build-aborted" : LABEL_COLORS["rejected"], - "build-in-progress" : LABEL_COLORS["hold"], - "build-pending-approval" : LABEL_TYPES["pending"], - "build-successful" : LABEL_TYPES["approved"], - "release-notes-requested" : LABEL_TYPES["approved"], - "release-announced" : LABEL_TYPES["approved"], - "toolconf-building" : LABEL_COLORS["hold"], - "uploading-builds" : LABEL_COLORS["hold"], - "release-build-request" : LABEL_COLORS["approved"], + "build-aborted": LABEL_COLORS["rejected"], + "build-in-progress": LABEL_COLORS["hold"], + "build-pending-approval": LABEL_TYPES["pending"], + "build-successful": LABEL_TYPES["approved"], + "release-notes-requested": LABEL_TYPES["approved"], + "release-announced": LABEL_TYPES["approved"], + "toolconf-building": LABEL_COLORS["hold"], + "uploading-builds": LABEL_COLORS["hold"], + "release-build-request": LABEL_COLORS["approved"], } - diff --git a/gitmergesgraph.py b/gitmergesgraph.py index 9dc9de491a46..d409a37204a3 100755 --- a/gitmergesgraph.py +++ b/gitmergesgraph.py @@ -7,215 +7,224 @@ # # Interprets the commit history of a branch as a graph. # It uses git log --graph to do so -# +# # Node: a merge commit, it can be a pull request or a automated merge # Each node contains the links to its parent commits and children commits -# +# -INFO_SEPARATOR = '--INFO--' -REPO = 'cmssw.git' -MAGIC_COMMAND_GRAPH = 'GIT_DIR='+REPO+' git log --merges --graph --pretty=\'"'+INFO_SEPARATOR+'%H,%s"\' RELEASE_QUEUE ' +INFO_SEPARATOR = "--INFO--" +REPO = "cmssw.git" +MAGIC_COMMAND_GRAPH = ( + "GIT_DIR=" + + REPO + + " git log --merges --graph --pretty='\"" + + INFO_SEPARATOR + + "%H,%s\"' RELEASE_QUEUE " +) # This regular expression allows to identify if a merge commit is an automatic forward port -AUTO_FORWARD_PORT_REGEX='Merge CMSSW.+ into CMSSW.+' +AUTO_FORWARD_PORT_REGEX = "Merge CMSSW.+ into CMSSW.+" + # # load the graph for a given release queue # maxNodes limits the number of nodes(commits) to check, -1 means no maximum # -def load_graph(release_queue , maxNodes): - command = MAGIC_COMMAND_GRAPH.replace('RELEASE_QUEUE',release_queue) +def load_graph(release_queue, maxNodes): + command = MAGIC_COMMAND_GRAPH.replace("RELEASE_QUEUE", release_queue) - error, out = run_cmd(command) + error, out = run_cmd(command) - prev_node_lane = {} + prev_node_lane = {} - previous_lane = 1 - node_number = 0 + previous_lane = 1 + node_number = 0 - all_nodes = {} + all_nodes = {} - for line in out.splitlines(): - if maxNodes != -1 and node_number > maxNodes: - identify_automated_merges(all_nodes) - return all_nodes - #check if the line contains a node - if INFO_SEPARATOR in line: - - node_number += 1 - line_parts = line.split(INFO_SEPARATOR) - lanes = line_parts[0].replace('"','').replace(' ','') - lane = lanes.index('*') + 1 - - node_info = line_parts[1] - node_info_parts = node_info.split(",") - - #hash, description - new_node = Node(node_info_parts[0],node_info_parts[1],lane) - all_nodes[node_info_parts[0]] = new_node + for line in out.splitlines(): + if maxNodes != -1 and node_number > maxNodes: + identify_automated_merges(all_nodes) + return all_nodes + # check if the line contains a node + if INFO_SEPARATOR in line: + node_number += 1 + line_parts = line.split(INFO_SEPARATOR) + lanes = line_parts[0].replace('"', "").replace(" ", "") + lane = lanes.index("*") + 1 - # for the first node I just add it without any conection - if node_number == 1: - set_previous_node_lane( prev_node_lane , lane , new_node ) - continue + node_info = line_parts[1] + node_info_parts = node_info.split(",") - #changed lane? - if previous_lane < lane: - #connect this node with the preivous one from the previous lane - previous_node = get_previous_node_lane( prev_node_lane , previous_lane ) - else: - #connect this node with the previous one from of the same lane - previous_node = get_previous_node_lane( prev_node_lane , lane ) + # hash, description + new_node = Node(node_info_parts[0], node_info_parts[1], lane) + all_nodes[node_info_parts[0]] = new_node - if previous_node == None: - set_previous_node_lane( prev_node_lane , lane , new_node ) - previous_lane = lane - continue + # for the first node I just add it without any conection + if node_number == 1: + set_previous_node_lane(prev_node_lane, lane, new_node) + continue - link_nodes( new_node , previous_node ) - set_previous_node_lane( prev_node_lane , lane , new_node ) + # changed lane? + if previous_lane < lane: + # connect this node with the preivous one from the previous lane + previous_node = get_previous_node_lane(prev_node_lane, previous_lane) + else: + # connect this node with the previous one from of the same lane + previous_node = get_previous_node_lane(prev_node_lane, lane) + if previous_node == None: + set_previous_node_lane(prev_node_lane, lane, new_node) + previous_lane = lane + continue - all_nodes[node_info_parts[0]] = new_node - previous_lane = lane + link_nodes(new_node, previous_node) + set_previous_node_lane(prev_node_lane, lane, new_node) + all_nodes[node_info_parts[0]] = new_node + previous_lane = lane + identify_automated_merges(all_nodes) - identify_automated_merges(all_nodes) + return all_nodes - return all_nodes # # adds the node to the prev_node_lane structure in the given lane # -def set_previous_node_lane( prev_node_lane , lane , node ): - prev_node_lane[lane] = node - #print prev_node_lane +def set_previous_node_lane(prev_node_lane, lane, node): + prev_node_lane[lane] = node + # print prev_node_lane + # # get the previous node for the lane given as parameter # -def get_previous_node_lane( prev_node_lane , lane ): - return prev_node_lane.get(lane) +def get_previous_node_lane(prev_node_lane, lane): + return prev_node_lane.get(lane) + # # links a parent node with a son node # parent and son must be instances of Node # -def link_nodes( parent, son): - parent.add_son(son) - son.add_parent(parent) +def link_nodes(parent, son): + parent.add_son(son) + son.add_parent(parent) + # -# identifies the automated merge commits that were responsible for bringing +# identifies the automated merge commits that were responsible for bringing # a commit into the release queue # def identify_automated_merges(nodes): - commits_from_merge = [n for n in list(nodes.values()) if n.is_from_merge] - - for commit in commits_from_merge: - if not commit.brought_by: - responsible_commit = identify_responsible_automated_merge(commit) - commit.brought_by = responsible_commit - if commit.brought_by: - responsible_commit.brought_commits.append( commit ) - + commits_from_merge = [n for n in list(nodes.values()) if n.is_from_merge] + + for commit in commits_from_merge: + if not commit.brought_by: + responsible_commit = identify_responsible_automated_merge(commit) + commit.brought_by = responsible_commit + if commit.brought_by: + responsible_commit.brought_commits.append(commit) - #print '-'.join( [ '%s by %s' % (c.hash,c.brought_by.hash) for c in commits_from_merge if c.brought_by] ) + # print '-'.join( [ '%s by %s' % (c.hash,c.brought_by.hash) for c in commits_from_merge if c.brought_by] ) - # automated_merges = [n for n in list(nodes.values()) if n.is_automated_merge] + # automated_merges = [n for n in list(nodes.values()) if n.is_automated_merge] - #for auto_merge in automated_merges: - # auto_merge.printme() + # for auto_merge in automated_merges: + # auto_merge.printme() # # identifies the automated merge that was responsible for binging the commit # def identify_responsible_automated_merge(commit): - children = list(commit.children.values()) - - if len( children ) == 0: - return commit + children = list(commit.children.values()) + + if len(children) == 0: + return commit + + # for the moment a commit only has one kid! if that changes this needs to be changed + child = children[0] + if child.lane == 1: + return child + else: + return identify_responsible_automated_merge(child) - #for the moment a commit only has one kid! if that changes this needs to be changed - child = children[0] - if child.lane == 1: - return child - else: - return identify_responsible_automated_merge(child) # # returns a list of commits(Nodes) of the pull requests that come from a merge commit # -def get_prs_from_merge_commit( graph ): - return [ c for c in list(graph.values()) if c.is_from_merge and c.is_pr ] +def get_prs_from_merge_commit(graph): + return [c for c in list(graph.values()) if c.is_from_merge and c.is_pr] + # # returns a list of pr numbers that were brougth by a commit given its hash # -def get_prs_brought_by_commit( graph , commit_hash ): - return [ c for c in list(graph.values()) if c.is_pr and c.is_from_merge and c.brought_by.hash == commit_hash ] +def get_prs_brought_by_commit(graph, commit_hash): + return [ + c + for c in list(graph.values()) + if c.is_pr and c.is_from_merge and c.brought_by.hash == commit_hash + ] class Node(object): - - # initializes the node with a hash, the lane (line in history), and a description - def __init__(self, hash, desc,lane): - self.hash = hash - self.desc = desc - self.lane = lane - self.is_from_merge = lane > 1 - self.is_automated_merge = re.match(AUTO_FORWARD_PORT_REGEX, desc) != None - # which commit brought this one to the release queue - self.brought_by = None - # which commits did this commit bring - self.brought_commits = [] - self.is_pr = 'Merge pull request #' in desc - - if self.is_pr: - self.pr_number = self.identify_pr_number() - else: - self.pr_number = None - - # nodes to which is node is parent - self.children = {} - # nodes to which this node is son - self.parents = {} - - def add_son( self , son_node ): - self.children[son_node.hash] = son_node - - def add_parent(self,parent_node): - self.parents[parent_node.hash] = parent_node - - def identify_pr_number(self): - return self.desc.split(' ')[3].replace('#','') - - def printme(self): - spaces = '' - for l in range(self.lane-1): - spaces += ' ' - - print('%s %d;%s-%s' % (spaces,self.lane,self.hash,self.desc)) - print('parents: %s'% '-'.join(list(self.parents.keys()))) - print('children: %s'% '-'.join(list(self.children.keys()))) - print('is automated merge: %s' % self.is_automated_merge) - print('is from merge commit: %s' % self.is_from_merge) - print('is pr: %s' % self.is_pr) - print('pr number: %s' % self.pr_number) - if self.is_automated_merge: - print('Is responsible for: ') - print(' - '.join([ c.hash for c in self.brought_commits ])) - print() - print() - if self.is_from_merge: - print('brought by: ') - print(self.brought_by.hash) - + # initializes the node with a hash, the lane (line in history), and a description + def __init__(self, hash, desc, lane): + self.hash = hash + self.desc = desc + self.lane = lane + self.is_from_merge = lane > 1 + self.is_automated_merge = re.match(AUTO_FORWARD_PORT_REGEX, desc) != None + # which commit brought this one to the release queue + self.brought_by = None + # which commits did this commit bring + self.brought_commits = [] + self.is_pr = "Merge pull request #" in desc + + if self.is_pr: + self.pr_number = self.identify_pr_number() + else: + self.pr_number = None + + # nodes to which is node is parent + self.children = {} + # nodes to which this node is son + self.parents = {} + + def add_son(self, son_node): + self.children[son_node.hash] = son_node + + def add_parent(self, parent_node): + self.parents[parent_node.hash] = parent_node + + def identify_pr_number(self): + return self.desc.split(" ")[3].replace("#", "") + + def printme(self): + spaces = "" + for l in range(self.lane - 1): + spaces += " " + + print("%s %d;%s-%s" % (spaces, self.lane, self.hash, self.desc)) + print("parents: %s" % "-".join(list(self.parents.keys()))) + print("children: %s" % "-".join(list(self.children.keys()))) + print("is automated merge: %s" % self.is_automated_merge) + print("is from merge commit: %s" % self.is_from_merge) + print("is pr: %s" % self.is_pr) + print("pr number: %s" % self.pr_number) + if self.is_automated_merge: + print("Is responsible for: ") + print(" - ".join([c.hash for c in self.brought_commits])) + print() + print() + if self.is_from_merge: + print("brought by: ") + print(self.brought_by.hash) # # Testing # # graph = load_graph('CMSSW_7_2_X',1000) - diff --git a/ib-create-tag.py b/ib-create-tag.py index 0ecdc0ef50b6..9ca8f1c5ada8 100755 --- a/ib-create-tag.py +++ b/ib-create-tag.py @@ -55,18 +55,12 @@ parser.add_option( "-d", "--date", dest="date", action="store", help="CMSSW IB date (YYYY-MM-DD)" ) - parser.add_option( - "-H", "--hour", dest="hour", action="store", help="CMSSW IB hour (HH)" - ) + parser.add_option("-H", "--hour", dest="hour", action="store", help="CMSSW IB hour (HH)") parser.add_option( "-M", "--minute", dest="minute", action="store", help="CMSSW IB minute (MM)", default="00" ) - parser.add_option( - "-b", "--branch", dest="branch", action="store", help="CMSSW branch" - ) - parser.add_option( - "-q", "--queue", dest="queue", action="store", help="CMSSW IB queue" - ) + parser.add_option("-b", "--branch", dest="branch", action="store", help="CMSSW branch") + parser.add_option("-q", "--queue", dest="queue", action="store", help="CMSSW IB queue") opts, args = parser.parse_args() RELEASE_NAME = opts.release_name # "CMSSW_13_0_X_2023-02-02-1100" @@ -89,9 +83,9 @@ head = None for commit_ in commits_: - if commit_["commit"]["committer"]["name"] == "GitHub" and commit_["commit"][ - "author" - ]["name"] in (CMSSW_L1 + ["cmsbuild"]): + if commit_["commit"]["committer"]["name"] == "GitHub" and commit_["commit"]["author"][ + "name" + ] in (CMSSW_L1 + ["cmsbuild"]): head = commit_ break @@ -108,8 +102,6 @@ tags = find_tags(repo, QUEUE + "_20") RELEASE_LIST = [ - t["ref"].replace("refs/tags/", "") - for t in tags - if t["object"]["sha"] == HEAD_SHA + t["ref"].replace("refs/tags/", "") for t in tags if t["object"]["sha"] == HEAD_SHA ] print(" ".join(RELEASE_LIST[::-1])) diff --git a/ib-pr-workflow-changed.py b/ib-pr-workflow-changed.py index cbd31c84957c..98d75f0818a4 100755 --- a/ib-pr-workflow-changed.py +++ b/ib-pr-workflow-changed.py @@ -4,54 +4,58 @@ from sys import argv, exit from _py2with3compatibility import run_cmd + def parse_workflows(workflow_file): - err, out = run_cmd("cat %s" % workflow_file) - if err: - print(out) - exit(1) - - wf = "" - wfs = {} - steps = 0 - for line in out.split("\n"): - line =line.strip() - m = re.match("^.*\[(\d+)\] *: *(.+)$",line) - if not m: continue - step = m.group(1) - cmd = m.group(2).strip() - prefix, rest = line.split(":",1) - items = prefix.split(" ") - if re.match("^\d+(\.\d+|)$",items[0]): wf = items[0] - if not wf in wfs: wfs[wf]={} - wfs[wf][step]=re.sub(" +"," ",cmd) - steps += 1 - print("%s: %s workflows, %s steps" % (workflow_file, len(wfs), steps)) - return wfs + err, out = run_cmd("cat %s" % workflow_file) + if err: + print(out) + exit(1) + + wf = "" + wfs = {} + steps = 0 + for line in out.split("\n"): + line = line.strip() + m = re.match("^.*\[(\d+)\] *: *(.+)$", line) + if not m: + continue + step = m.group(1) + cmd = m.group(2).strip() + prefix, rest = line.split(":", 1) + items = prefix.split(" ") + if re.match("^\d+(\.\d+|)$", items[0]): + wf = items[0] + if not wf in wfs: + wfs[wf] = {} + wfs[wf][step] = re.sub(" +", " ", cmd) + steps += 1 + print("%s: %s workflows, %s steps" % (workflow_file, len(wfs), steps)) + return wfs + orig_workflows = argv[1] new_workflows = argv[2] -wfs={} -wfs["old"]=parse_workflows(argv[1]) -wfs["new"]=parse_workflows(argv[2]) +wfs = {} +wfs["old"] = parse_workflows(argv[1]) +wfs["new"] = parse_workflows(argv[2]) new_wf = [] new_step = [] chg_step = [] for wf in wfs["new"]: - if not wf in wfs["old"]: - new_wf.append(wf) - else: - for step in wfs["new"][wf]: - if not step in wfs["old"][wf]: - new_step.append(wf) - break - elif not wfs["old"][wf]==wfs["new"][wf]: - chg_step.append(wf) - break + if not wf in wfs["old"]: + new_wf.append(wf) + else: + for step in wfs["new"][wf]: + if not step in wfs["old"][wf]: + new_step.append(wf) + break + elif not wfs["old"][wf] == wfs["new"][wf]: + chg_step.append(wf) + break print("New workflows:%s: %s" % (len(new_wf), ",".join(new_wf))) print("Workflows with new steps:%s: %s" % (len(new_step), ",".join(new_step))) print("Wrokflows with changed steps:%s: %s" % (len(chg_step), ",".join(chg_step))) -print("WORKFLOWS TO RUN:",",".join(new_wf+new_step+chg_step)) - +print("WORKFLOWS TO RUN:", ",".join(new_wf + new_step + chg_step)) diff --git a/ib-upload-logs.py b/ib-upload-logs.py index 4007c46615a2..da12130eaf2d 100755 --- a/ib-upload-logs.py +++ b/ib-upload-logs.py @@ -1,5 +1,6 @@ #!/usr/bin/env python from sys import argv from logUpdater import LogUpdater -logger=LogUpdater(dirIn=argv[1]) + +logger = LogUpdater(dirIn=argv[1]) logger.copyLogs(argv[2]) diff --git a/jenkins-jobs/es-cmssw-afs-eos.py b/jenkins-jobs/es-cmssw-afs-eos.py index d7369c09e577..628a5a63ce16 100755 --- a/jenkins-jobs/es-cmssw-afs-eos.py +++ b/jenkins-jobs/es-cmssw-afs-eos.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 from __future__ import print_function -from os.path import dirname,abspath +from os.path import dirname, abspath import sys + sys.path.append(dirname(dirname(abspath(__file__)))) from hashlib import sha1 import json @@ -9,20 +10,25 @@ from _py2with3compatibility import run_cmd from cmsutils import cmsswIB2Week -err, logs = run_cmd("find /data/sdt/SDT/jenkins-artifacts/cmssw-afs-eos-comparison -mindepth 1 -maxdepth 1 -name '*.json' -type f") -for jfile in logs.split('\n'): - if not jfile: continue - print("Processing file",jfile) - payload = {} - try: - payload = json.load(open(jfile)) - except ValueError as err: - print(err) - run_cmd("rm -f %s" % jfile) - continue - week, rel_sec = cmsswIB2Week (payload["release"]) - payload["@timestamp"]=rel_sec*1000 - id = sha1(("%s-%s-%s" % (payload["release"], payload["architecture"], payload["fstype"])).encode()).hexdigest() - print(payload) - if send_payload("cmssw-afs-eos-%s" % week,"build",id,json.dumps(payload)): - run_cmd("rm -f %s" % jfile) +err, logs = run_cmd( + "find /data/sdt/SDT/jenkins-artifacts/cmssw-afs-eos-comparison -mindepth 1 -maxdepth 1 -name '*.json' -type f" +) +for jfile in logs.split("\n"): + if not jfile: + continue + print("Processing file", jfile) + payload = {} + try: + payload = json.load(open(jfile)) + except ValueError as err: + print(err) + run_cmd("rm -f %s" % jfile) + continue + week, rel_sec = cmsswIB2Week(payload["release"]) + payload["@timestamp"] = rel_sec * 1000 + id = sha1( + ("%s-%s-%s" % (payload["release"], payload["architecture"], payload["fstype"])).encode() + ).hexdigest() + print(payload) + if send_payload("cmssw-afs-eos-%s" % week, "build", id, json.dumps(payload)): + run_cmd("rm -f %s" % jfile) diff --git a/jenkins-jobs/git/git-mirror-repository b/jenkins-jobs/git/git-mirror-repository deleted file mode 100755 index 6f3342fa38d2..000000000000 --- a/jenkins-jobs/git/git-mirror-repository +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import print_function -from os.path import abspath, dirname -from sys import argv, exit, path -from os import environ - -path.append(dirname(dirname(dirname(abspath(__file__))))) # in order to import top level modules -from _py2with3compatibility import run_cmd, Request, urlopen, quote_plus - -repo = argv[1] -e, o = run_cmd('git ls-remote -h "https://:@gitlab.cern.ch:8443/%s" 2>&1 | grep "refs/heads/" | wc -l' % repo) -if o=='0': - print("Mirror repository not found:", repo) - exit(0) - -TOKEN_FILE="/data/secrets/cmsbuild-gitlab-secret" -if 'GITLAB_SECRET_TOKEN' in environ: TOKEN_FILE=environ['GITLAB_SECRET_TOKEN'] -url='https://gitlab.cern.ch/api/v4/projects/%s/mirror/pull' % quote_plus(repo) -headers= {"PRIVATE-TOKEN": open(TOKEN_FILE).read().strip()} -request = Request(url, headers=headers) -request.get_method = lambda: "POST" -response = urlopen(request) -print(response.read()) diff --git a/jenkins-jobs/git/git-mirror-repository b/jenkins-jobs/git/git-mirror-repository new file mode 120000 index 000000000000..c120bd39e2f8 --- /dev/null +++ b/jenkins-jobs/git/git-mirror-repository @@ -0,0 +1 @@ +git-mirror-repository.py \ No newline at end of file diff --git a/jenkins-jobs/git/git-mirror-repository.py b/jenkins-jobs/git/git-mirror-repository.py new file mode 100755 index 000000000000..c09f4442e23f --- /dev/null +++ b/jenkins-jobs/git/git-mirror-repository.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +from __future__ import print_function +from os.path import abspath, dirname +from sys import argv, exit, path +from os import environ + +path.append(dirname(dirname(dirname(abspath(__file__))))) # in order to import top level modules +from _py2with3compatibility import run_cmd, Request, urlopen, quote_plus + +repo = argv[1] +e, o = run_cmd( + 'git ls-remote -h "https://:@gitlab.cern.ch:8443/%s" 2>&1 | grep "refs/heads/" | wc -l' % repo +) +if o == "0": + print("Mirror repository not found:", repo) + exit(0) + +TOKEN_FILE = "/data/secrets/cmsbuild-gitlab-secret" +if "GITLAB_SECRET_TOKEN" in environ: + TOKEN_FILE = environ["GITLAB_SECRET_TOKEN"] +url = "https://gitlab.cern.ch/api/v4/projects/%s/mirror/pull" % quote_plus(repo) +headers = {"PRIVATE-TOKEN": open(TOKEN_FILE).read().strip()} +request = Request(url, headers=headers) +request.get_method = lambda: "POST" +response = urlopen(request) +print(response.read()) diff --git a/jenkins-jobs/git/git-notify-ib-updates b/jenkins-jobs/git/git-notify-ib-updates deleted file mode 100755 index b157188ce14a..000000000000 --- a/jenkins-jobs/git/git-notify-ib-updates +++ /dev/null @@ -1,220 +0,0 @@ -#!/usr/bin/env python3 - -import datetime -import json -import logging -import os -import sys -import urllib.error - -from github_utils import github_api -from urllib.error import HTTPError -# noinspection PyUnresolvedReferences -import libib - -# noinspection PyUnresolvedReferences -from libib import PackageInfo, ErrorInfo - -try: - current_shifter = libib.fetch("/SDT/shifter.txt", content_type=libib.ContentType.TEXT) - exit_code = 0 -except (urllib.error.URLError, urllib.error.HTTPError) as e: - print("WARNING: failed to get current_shifter!") - print(e) - current_shifter = "all" - exit_code = 1 - -cache_root = os.path.join(os.getenv("JENKINS_HOME"), "workspace/cache/cms-ib-notifier") - -header = f"@{current_shifter} New IB failures found, please check:" - -table_header = """# {rel}-{ib_date} for {arch} -| Error | Additional data | -| --- | --- |""" - -max_report_lines = 19 - - -def get_commit_info(repo, commit): - return github_api( - uri="/repos/{}/commits/{}".format(repo, commit), method="GET", all_pages=False - ) - - -def isoparse(strDate): - return datetime.datetime.strptime(strDate, "%Y-%m-%dT%H:%M:%SZ") - - -def main(): - workspace = os.getenv("WORKSPACE", os.getcwd()) - os.makedirs(cache_root, exist_ok=True) - - libib.setup_logging(logging.DEBUG) - libib.get_exitcodes() - - ib_dates = libib.get_ib_dates("default") - - commit_dates = {} - - changed_rels = set() - for commit_id in sys.argv[1:]: - print("Processing commit {}".format(commit_id)) - commit_info = get_commit_info("cms-sw/cms-sw.github.io", commit_id) - if "sha" not in commit_info: - print("Invalid or missing commit-id {}".format(commit_id)) - continue - try: - commit_author = commit_info["commit"]["author"] - except KeyError: - print("Commit {} has no author!".format(commit_id)) - continue - if commit_author["email"] != "cmsbuild@cern.ch": - print( - "Commit {} not from cmsbuild: {} <{}>".format( - commit_id, commit_author["name"], commit_author["email"] - ) - ) - continue - - commit_date = libib.date_fromisoformat(commit_author["date"]) - commit_dates[commit_id] = commit_date - - for change in commit_info["files"]: - if not change["filename"].startswith("_data/CMSSW"): - continue - relname = change["filename"].replace("_data/", "").replace(".json", "") - print("Release {} changed".format(relname)) - changed_rels.add(relname) - - if len(changed_rels) == 0: - print("No releases changed") - exit(0) - - newest_commit_date = max(commit_dates.values()) - newest_commit_id = [k for k, v in commit_dates.items() if v == newest_commit_date] - newest_commit_id = newest_commit_id[0] - - first_report = True - payload_index = 0 - - for ib_date in ib_dates: - for rel in changed_rels: - old_data_file = os.path.join(cache_root, "{}.json".format(rel)) - old_result = None - if os.path.exists(old_data_file): - print(f"Loading cached release {rel}") - old_data = json.load(open(old_data_file, "r")) - old_comparision = libib.get_ib_results(ib_date, None, old_data) - if old_comparision: - _, old_result = libib.check_ib(old_comparision) - else: - print(f"No cache for release {rel}") - - try: - new_data = libib.fetch( - f"https://github.com/cms-sw/cms-sw.github.io/raw/{newest_commit_id}/data%2F{rel}.json" - ) - except HTTPError as e: - if e.code == 404: - print(f"Release {rel} not found on github!") - continue - else: - raise - new_comparision = libib.get_ib_results(ib_date, None, new_data) - if new_comparision is None: - continue - - _, new_result = libib.check_ib(new_comparision) - - for arch in new_result: - arch_report = [] - for error in new_result[arch]["build"]: - if ( - old_result - and arch in old_result - and error in old_result[arch]["build"] - ): - continue - - arch_report.append( - f"| [{error.name}]({error.url}) | {error.data[1]}x {error.data[0]} | " - ) - - for error in new_result[arch]["utest"]: - if ( - old_result - and arch in old_result - and error in old_result[arch]["utest"] - ): - continue - - arch_report.append( - f"| [{error.name}]({error.url}) | TBD | " - ) - - for error in new_result[arch]["relval"]: - if ( - old_result - and arch in old_result - and error in old_result[arch]["relval"] - ): - continue - - arch_report.append( - f"| [{error.name}]({error.url}) | {error.data} | " - ) - - if len(arch_report) > (max_report_lines + 1): - arch_report_l = len(arch_report) - arch_report = arch_report[:max_report_lines] - arch_report.append( - f"| :warning: {arch_report_l-max_report_lines} error(s) more | Check IB status page | " - ) - - if arch_report: - arch_report.insert( - 0, table_header.format(rel=rel, ib_date=ib_date.rsplit("-", 1)[0], arch=arch) - ) - if first_report: - arch_report.insert(0, header) - first_report = False - - payload = {"text": "\n".join(arch_report)} - jsondata = json.dumps(payload).encode("utf-8") - with open( - f"{workspace}/report_{payload_index:03d}.json", "wb" - ) as f: - f.write(jsondata) - - payload_index += 1 - - if payload_index > 0: - with open(f"{workspace}/submit.sh", "w") as f: - print("#!/bin/bash", file=f) - for i in range(payload_index): - print( - f'curl -H "Content-Type: application/json" --data-binary "@report_{i:03d}.json" $MM_WEBHOOK_URL', file=f - ) - print(f"rm -f report_{i:03d}.json", file=f) - else: - if os.path.exists(f"{workspace}/submit.sh"): - os.unlink(f"{workspace}/submit.sh") - - # Save new json files - for rel in changed_rels: - url_ = f"https://github.com/cms-sw/cms-sw.github.io/raw/{newest_commit_id}/_data%2F{rel}.json" - try: - data = libib.fetch(url_, libib.ContentType.TEXT) - except urllib.error.HTTPError as e: - if e.code != 404: - raise - else: - pass - else: - with open(os.path.join(cache_root, "{}.json".format(rel)), "w") as f: - f.write(data) - - -if __name__ == "__main__": - main() - exit(exit_code) diff --git a/jenkins-jobs/git/git-notify-ib-updates b/jenkins-jobs/git/git-notify-ib-updates new file mode 120000 index 000000000000..dd0bfc1a72ed --- /dev/null +++ b/jenkins-jobs/git/git-notify-ib-updates @@ -0,0 +1 @@ +git-notify-ib-updates.py \ No newline at end of file diff --git a/jenkins-jobs/git/git-notify-ib-updates.py b/jenkins-jobs/git/git-notify-ib-updates.py new file mode 100755 index 000000000000..b894f9abf21e --- /dev/null +++ b/jenkins-jobs/git/git-notify-ib-updates.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python3 + +import datetime +import json +import logging +import os +import sys +import urllib.error + +from github_utils import github_api +from urllib.error import HTTPError + +# noinspection PyUnresolvedReferences +import libib + +# noinspection PyUnresolvedReferences +from libib import PackageInfo, ErrorInfo + +try: + current_shifter = libib.fetch("/SDT/shifter.txt", content_type=libib.ContentType.TEXT) + exit_code = 0 +except (urllib.error.URLError, urllib.error.HTTPError) as e: + print("WARNING: failed to get current_shifter!") + print(e) + current_shifter = "all" + exit_code = 1 + +cache_root = os.path.join(os.getenv("JENKINS_HOME"), "workspace/cache/cms-ib-notifier") + +header = f"@{current_shifter} New IB failures found, please check:" + +table_header = """# {rel}-{ib_date} for {arch} +| Error | Additional data | +| --- | --- |""" + +max_report_lines = 19 + + +def get_commit_info(repo, commit): + return github_api( + uri="/repos/{}/commits/{}".format(repo, commit), method="GET", all_pages=False + ) + + +def isoparse(strDate): + return datetime.datetime.strptime(strDate, "%Y-%m-%dT%H:%M:%SZ") + + +def main(): + workspace = os.getenv("WORKSPACE", os.getcwd()) + os.makedirs(cache_root, exist_ok=True) + + libib.setup_logging(logging.DEBUG) + libib.get_exitcodes() + + ib_dates = libib.get_ib_dates("default") + + commit_dates = {} + + changed_rels = set() + for commit_id in sys.argv[1:]: + print("Processing commit {}".format(commit_id)) + commit_info = get_commit_info("cms-sw/cms-sw.github.io", commit_id) + if "sha" not in commit_info: + print("Invalid or missing commit-id {}".format(commit_id)) + continue + try: + commit_author = commit_info["commit"]["author"] + except KeyError: + print("Commit {} has no author!".format(commit_id)) + continue + if commit_author["email"] != "cmsbuild@cern.ch": + print( + "Commit {} not from cmsbuild: {} <{}>".format( + commit_id, commit_author["name"], commit_author["email"] + ) + ) + continue + + commit_date = libib.date_fromisoformat(commit_author["date"]) + commit_dates[commit_id] = commit_date + + for change in commit_info["files"]: + if not change["filename"].startswith("_data/CMSSW"): + continue + relname = change["filename"].replace("_data/", "").replace(".json", "") + print("Release {} changed".format(relname)) + changed_rels.add(relname) + + if len(changed_rels) == 0: + print("No releases changed") + exit(0) + + newest_commit_date = max(commit_dates.values()) + newest_commit_id = [k for k, v in commit_dates.items() if v == newest_commit_date] + newest_commit_id = newest_commit_id[0] + + first_report = True + payload_index = 0 + + for ib_date in ib_dates: + for rel in changed_rels: + old_data_file = os.path.join(cache_root, "{}.json".format(rel)) + old_result = None + if os.path.exists(old_data_file): + print(f"Loading cached release {rel}") + old_data = json.load(open(old_data_file, "r")) + old_comparision = libib.get_ib_results(ib_date, None, old_data) + if old_comparision: + _, old_result = libib.check_ib(old_comparision) + else: + print(f"No cache for release {rel}") + + try: + new_data = libib.fetch( + f"https://github.com/cms-sw/cms-sw.github.io/raw/{newest_commit_id}/data%2F{rel}.json" + ) + except HTTPError as e: + if e.code == 404: + print(f"Release {rel} not found on github!") + continue + else: + raise + new_comparision = libib.get_ib_results(ib_date, None, new_data) + if new_comparision is None: + continue + + _, new_result = libib.check_ib(new_comparision) + + for arch in new_result: + arch_report = [] + for error in new_result[arch]["build"]: + if old_result and arch in old_result and error in old_result[arch]["build"]: + continue + + arch_report.append( + f"| [{error.name}]({error.url}) | {error.data[1]}x {error.data[0]} | " + ) + + for error in new_result[arch]["utest"]: + if old_result and arch in old_result and error in old_result[arch]["utest"]: + continue + + arch_report.append(f"| [{error.name}]({error.url}) | TBD | ") + + for error in new_result[arch]["relval"]: + if old_result and arch in old_result and error in old_result[arch]["relval"]: + continue + + arch_report.append(f"| [{error.name}]({error.url}) | {error.data} | ") + + if len(arch_report) > (max_report_lines + 1): + arch_report_l = len(arch_report) + arch_report = arch_report[:max_report_lines] + arch_report.append( + f"| :warning: {arch_report_l-max_report_lines} error(s) more | Check IB status page | " + ) + + if arch_report: + arch_report.insert( + 0, + table_header.format(rel=rel, ib_date=ib_date.rsplit("-", 1)[0], arch=arch), + ) + if first_report: + arch_report.insert(0, header) + first_report = False + + payload = {"text": "\n".join(arch_report)} + jsondata = json.dumps(payload).encode("utf-8") + with open(f"{workspace}/report_{payload_index:03d}.json", "wb") as f: + f.write(jsondata) + + payload_index += 1 + + if payload_index > 0: + with open(f"{workspace}/submit.sh", "w") as f: + print("#!/bin/bash", file=f) + for i in range(payload_index): + print( + f'curl -H "Content-Type: application/json" --data-binary "@report_{i:03d}.json" $MM_WEBHOOK_URL', + file=f, + ) + print(f"rm -f report_{i:03d}.json", file=f) + else: + if os.path.exists(f"{workspace}/submit.sh"): + os.unlink(f"{workspace}/submit.sh") + + # Save new json files + for rel in changed_rels: + url_ = ( + f"https://github.com/cms-sw/cms-sw.github.io/raw/{newest_commit_id}/_data%2F{rel}.json" + ) + try: + data = libib.fetch(url_, libib.ContentType.TEXT) + except urllib.error.HTTPError as e: + if e.code != 404: + raise + else: + pass + else: + with open(os.path.join(cache_root, "{}.json".format(rel)), "w") as f: + f.write(data) + + +if __name__ == "__main__": + main() + exit(exit_code) diff --git a/jenkins/jenkins-kill-placeholder-job.py b/jenkins/jenkins-kill-placeholder-job.py index 0cf160f1a6b1..71a35e008a57 100755 --- a/jenkins/jenkins-kill-placeholder-job.py +++ b/jenkins/jenkins-kill-placeholder-job.py @@ -7,24 +7,28 @@ from pprint import pprint import re, sys from os.path import dirname, abspath, join, exists + SCRIPT_DIR = dirname(abspath(sys.argv[0])) CMS_BOT_DIR = dirname(SCRIPT_DIR) -sys.path.insert(0,CMS_BOT_DIR) +sys.path.insert(0, CMS_BOT_DIR) from xml.etree import cElementTree as ET import requests from collections import defaultdict from os import environ from _py2with3compatibility import run_cmd -RX_Project = re.compile('.+\/job\/(.+)\/(\d+)\/') -RX_Queue_why = re.compile(u'^Waiting for next available executor.*\u2018(.*)\u2019') -RX_Queue_nolabel = re.compile(u'^There are no nodes with the label.*\u2018(.*)\u2019') -JENKINS_URL = environ['LOCAL_JENKINS_URL'] -WORKSPACE = environ['WORKSPACE'] -running_job_xml = JENKINS_URL + '/api/xml?&tree=jobs[builds[url,building]]&xpath=/hudson/job/build[building="true"]&wrapper=jobs' -job_que_json = JENKINS_URL + '/queue/api/json?tree=items[url,why]' +RX_Project = re.compile(".+\/job\/(.+)\/(\d+)\/") +RX_Queue_why = re.compile("^Waiting for next available executor.*\u2018(.*)\u2019") +RX_Queue_nolabel = re.compile("^There are no nodes with the label.*\u2018(.*)\u2019") +JENKINS_URL = environ["LOCAL_JENKINS_URL"] +WORKSPACE = environ["WORKSPACE"] +running_job_xml = ( + JENKINS_URL + + '/api/xml?&tree=jobs[builds[url,building]]&xpath=/hudson/job/build[building="true"]&wrapper=jobs' +) +job_que_json = JENKINS_URL + "/queue/api/json?tree=items[url,why]" node_labels = {} -JENKNS_REQ_HEADER = {"OIDC_CLAIM_CERN_UPN":"cmssdt"} +JENKNS_REQ_HEADER = {"OIDC_CLAIM_CERN_UPN": "cmssdt"} def etree_to_dict(t): @@ -35,54 +39,60 @@ def etree_to_dict(t): for dc in map(etree_to_dict, children): for k, v in dc.items(): dd[k].append(v) - d = {t.tag: {k: v[0] if len(v) == 1 else v - for k, v in dd.items()}} + d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}} if t.attrib: - d[t.tag].update(('@' + k, v) - for k, v in t.attrib.items()) + d[t.tag].update(("@" + k, v) for k, v in t.attrib.items()) if t.text: text = t.text.strip() if children or t.attrib: if text: - d[t.tag]['#text'] = text + d[t.tag]["#text"] = text else: d[t.tag] = text return d + def read_auto_nodes(): - nodes_jobs = {} - auto_nodes = join(SCRIPT_DIR, 'auto-nodes.txt') - if exists(auto_nodes): - for line in open(auto_nodes).readlines(): - if '=' not in line: continue - reg, job = line.strip().split('=',1) - nodes_jobs[re.compile(reg.strip())] = job.strip() - return nodes_jobs + nodes_jobs = {} + auto_nodes = join(SCRIPT_DIR, "auto-nodes.txt") + if exists(auto_nodes): + for line in open(auto_nodes).readlines(): + if "=" not in line: + continue + reg, job = line.strip().split("=", 1) + nodes_jobs[re.compile(reg.strip())] = job.strip() + return nodes_jobs + def auto_node_schedule(auto_jobs): - count=0 + count = 0 for job in auto_jobs: jid = auto_jobs[job] - err, out = run_cmd("cat %s/jenkins/find-jenkins-job.groovy | %s groovy = '%s' 'JENKINS_DYNAMIC_JOB_ID=%s'" % (CMS_BOT_DIR, environ['JENKINS_CLI_CMD'],job,jid)) + err, out = run_cmd( + "cat %s/jenkins/find-jenkins-job.groovy | %s groovy = '%s' 'JENKINS_DYNAMIC_JOB_ID=%s'" + % (CMS_BOT_DIR, environ["JENKINS_CLI_CMD"], job, jid) + ) if err: - count+=1 - prop_file = "jenkins-trigger-dynamic-job-%s.txt" % count - jpram = join(SCRIPT_DIR, 'auto-nodes', job) - run_cmd("echo 'JENKINS_DYNAMIC_JOB_NAME=%s' > %s" % (job, prop_file)) - run_cmd("echo 'JENKINS_DYNAMIC_JOB_ID=%s' >> %s" % (jid, prop_file)) - if exists (jpram): - run_cmd("cat %s >> %s" % (jpram, prop_file)) + count += 1 + prop_file = "jenkins-trigger-dynamic-job-%s.txt" % count + jpram = join(SCRIPT_DIR, "auto-nodes", job) + run_cmd("echo 'JENKINS_DYNAMIC_JOB_NAME=%s' > %s" % (job, prop_file)) + run_cmd("echo 'JENKINS_DYNAMIC_JOB_ID=%s' >> %s" % (jid, prop_file)) + if exists(jpram): + run_cmd("cat %s >> %s" % (jpram, prop_file)) else: print(out) return + def get_nodes(label): - if label not in node_labels: - url = "%s/label/%s/api/json?pretty=true" % (JENKINS_URL, label) - r_json = requests.get(url, headers=JENKNS_REQ_HEADER) - node_labels[label] = r_json.json() - print("Nodes to match label ",node_labels[label]['nodes']) - return node_labels[label]['nodes'] + if label not in node_labels: + url = "%s/label/%s/api/json?pretty=true" % (JENKINS_URL, label) + r_json = requests.get(url, headers=JENKNS_REQ_HEADER) + node_labels[label] = r_json.json() + print("Nodes to match label ", node_labels[label]["nodes"]) + return node_labels[label]["nodes"] + def main(): auto_nodes = read_auto_nodes() @@ -94,29 +104,30 @@ def main(): print("Queued jobs:", job_que_json) pprint(r_json.json()) print("----") - que_job_list = r_json.json()['items'] + que_job_list = r_json.json()["items"] auto_jobs = {} for j in que_job_list: label = "" found = False - m = RX_Queue_why.match(j['why']) + m = RX_Queue_why.match(j["why"]) if m: - label = m.group(1) - print("Waiting for",label) - for node in get_nodes(label): - if re.match('^grid[1-9][0-9]*$', node['nodeName']): - print(" Matched ",node) - found = True - break - m1 = RX_Queue_nolabel.match(j['why']) - if not label and m1 : label = m1.group(1) + label = m.group(1) + print("Waiting for", label) + for node in get_nodes(label): + if re.match("^grid[1-9][0-9]*$", node["nodeName"]): + print(" Matched ", node) + found = True + break + m1 = RX_Queue_nolabel.match(j["why"]) + if not label and m1: + label = m1.group(1) if label: print("Checking label:", label) if found: que_to_free += 1 for reg in auto_nodes: if reg.search(label): - auto_jobs[auto_nodes[reg]] = j['url'] + auto_jobs[auto_nodes[reg]] = j["url"] break print("Number jobs needed to free") @@ -130,16 +141,16 @@ def main(): # get running placeholder job xml = ET.XML(r_xml.text) parsed_dict = etree_to_dict(xml) - print("Running jobs", running_job_xml ) + print("Running jobs", running_job_xml) pprint(parsed_dict) jobs_to_kill = [] - if not isinstance(parsed_dict['jobs']['build'],list): - parsed_dict['jobs']['build']=[parsed_dict['jobs']['build']] - for el in parsed_dict['jobs']['build']: - match = RX_Project.match(el['url']) + if not isinstance(parsed_dict["jobs"]["build"], list): + parsed_dict["jobs"]["build"] = [parsed_dict["jobs"]["build"]] + for el in parsed_dict["jobs"]["build"]: + match = RX_Project.match(el["url"]) project = match.group(1) j_number = match.group(2) - if 'grid-keep-node-busy' != project: + if "grid-keep-node-busy" != project: continue jobs_to_kill.append([project, j_number]) print("Jobs to kill:") @@ -148,9 +159,9 @@ def main(): # create property file for each job to be killed for i in range(0, min(que_to_free, len(jobs_to_kill))): - with open("{0}/job-to-kill-{1}.txt".format(WORKSPACE, i), 'w') as f: + with open("{0}/job-to-kill-{1}.txt".format(WORKSPACE, i), "w") as f: f.write("JENKINS_PROJECT_TO_KILL={0}\nBUILD_NR={1}\n".format(*jobs_to_kill[i])) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/jenkins/jenkins-project-report-to-markdown.py b/jenkins/jenkins-project-report-to-markdown.py index 50f36375e25e..1dd0e219f0a5 100755 --- a/jenkins/jenkins-project-report-to-markdown.py +++ b/jenkins/jenkins-project-report-to-markdown.py @@ -13,8 +13,8 @@ parents = defaultdict(list) # global parameters -jenkins_home = 'https://cmssdt.cern.ch/jenkins/' -split_pat = '\*' # to match bullet list in markdown +jenkins_home = "https://cmssdt.cern.ch/jenkins/" +split_pat = "\*" # to match bullet list in markdown # markdown_output_dir = '/tmp/jenkins_reports/' markdown_output_dir_name = "jenkins_reports" @@ -50,51 +50,54 @@ def link_to_project(value): def write_markdown_file(view_data_dict, all_project_dict, markdown_output_dir): - view_name = view_data_dict['name'] - - with open(markdown_output_dir + '/' + view_name.replace(' ', '-') + ".md", 'w') as output_f: + view_name = view_data_dict["name"] + with open(markdown_output_dir + "/" + view_name.replace(" ", "-") + ".md", "w") as output_f: # write view description output_f.write("# [{0}]({1})\n\n".format(view_name, link_to_view(view_name))) - output_f.write("**View description:** {0}\n\n".format(view_data_dict['description'])) - output_f.write("**View type:** {0}\n\n".format(view_data_dict['view_type'])) + output_f.write("**View description:** {0}\n\n".format(view_data_dict["description"])) + output_f.write("**View type:** {0}\n\n".format(view_data_dict["view_type"])) output_f.write("---\n\n") output_f.write("# Projects:\n\n") # write project description - for project in view_data_dict['project_names']: + for project in view_data_dict["project_names"]: project_data = all_project_dict[project] output_f.write("## [{0}]({1})\n\n".format(project, link_to_project(project))) - output_f.write("**Description:** {0}\n\n".format( - project_data['project_desc'] if project_data['project_desc'] else None - )) + output_f.write( + "**Description:** {0}\n\n".format( + project_data["project_desc"] if project_data["project_desc"] else None + ) + ) # is project disabled - status = 'disabled' if project_data['is_disabled'] else 'enabled' + status = "disabled" if project_data["is_disabled"] else "enabled" output_f.write("**Project is `{0}`.**\n\n".format(status)) output_f.write("**Upstream projects:**\n") - for pr in project_data['upstream']: + for pr in project_data["upstream"]: output_f.write("* [{0}](#{0}):\n".format(pr)) output_f.write("\n") output_f.write("**Downstream projects:**\n") - for pr in project_data['downstream']: + for pr in project_data["downstream"]: output_f.write("* [{0}](#{0}):\n".format(pr)) output_f.write("\n") output_f.write("**Sub-projects:**\n") - for pr in project_data['subprojects']: + for pr in project_data["subprojects"]: output_f.write("* [{0}](#{0}):\n".format(pr)) output_f.write("\n") # TODO look what for trigger is - output_f.write("**Triggers from:** {0}\n\n".format(project_data['triggers_from'])) + output_f.write("**Triggers from:** {0}\n\n".format(project_data["triggers_from"])) - cron_tabs_list = project_data['scheduled_triggers'] - cron_message = cron_tabs_list[0][1] if len(cron_tabs_list) > 0 else "Not periodically build" + cron_tabs_list = project_data["scheduled_triggers"] + cron_message = ( + cron_tabs_list[0][1] if len(cron_tabs_list) > 0 else "Not periodically build" + ) periodic_builds_message = """ **Periodic builds:** ```bash @@ -103,7 +106,9 @@ def write_markdown_file(view_data_dict, all_project_dict, markdown_output_dir): --- -""".format(cron_message) +""".format( + cron_message + ) output_f.write(periodic_builds_message) @@ -114,7 +119,9 @@ def write_readme(markdown_output_dir): This is automatically generated documentation of Jenkins jobs. **All changes in this directory will be overwritten by scheduled job.** In oder to update the documentation, edit project description in Jenkins instead. -""".format(project_report_section_name) +""".format( + project_report_section_name + ) with open(markdown_output_dir + "/README.md", "w") as output_f: output_f.write(readme_message) @@ -125,19 +132,19 @@ def create_uncategorized_view(view_dict, all_project_dict): for _, project in all_project_dict.items(): is_uncategorized = True for _, view_data_dict in view_dict.items(): - if view_data_dict['name'] == "All": + if view_data_dict["name"] == "All": continue - if project['project_name'] in view_data_dict['project_names']: + if project["project_name"] in view_data_dict["project_names"]: is_uncategorized = False break if is_uncategorized: - uncategorized_p_list.append(project['project_name']) + uncategorized_p_list.append(project["project_name"]) return { - 'name': "Uncategorized", - 'view_type': "Custom", - 'description': "This view contains all projects that were not categorized.", - 'project_names': uncategorized_p_list + "name": "Uncategorized", + "view_type": "Custom", + "description": "This view contains all projects that were not categorized.", + "project_names": uncategorized_p_list, } @@ -168,18 +175,20 @@ def main(args): # loads data to dictionary data_dict = json.loads(txt) - data_dict['views']['uncategorized'] = create_uncategorized_view(data_dict['views'], data_dict['projects']) + data_dict["views"]["uncategorized"] = create_uncategorized_view( + data_dict["views"], data_dict["projects"] + ) # create README.md for folder write_readme(markdown_output_dir) # create markdown files - for view_key, view_data_dict in data_dict['views'].items(): - write_markdown_file(view_data_dict, data_dict['projects'], markdown_output_dir) - views_names_list.append(view_data_dict['name']) + for view_key, view_data_dict in data_dict["views"].items(): + write_markdown_file(view_data_dict, data_dict["projects"], markdown_output_dir) + views_names_list.append(view_data_dict["name"]) # edit summary.md in wiki dir to include generated report try: - sum_f = open(wiki_dir + '/SUMMARY.md', "r+") + sum_f = open(wiki_dir + "/SUMMARY.md", "r+") summary_lines = sum_f.readlines() except Exception as e: print("Error reading the SUMMARY.md file" + e) @@ -193,21 +202,27 @@ def main(args): # write new summary indent_size = len(re.split(split_pat, line, 1)[0]) config_dict = { - 'indentation': ' ' * indent_size, - 'view_name': project_report_section_name, - 'file_name': "README.md", - 'report_dir': markdown_output_dir_name + "indentation": " " * indent_size, + "view_name": project_report_section_name, + "file_name": "README.md", + "report_dir": markdown_output_dir_name, } - sum_f.write("{indentation}* [{view_name}]({report_dir}/{file_name})\n".format(**config_dict)) + sum_f.write( + "{indentation}* [{view_name}]({report_dir}/{file_name})\n".format(**config_dict) + ) for name in views_names_list: config_dict = { - 'indentation': ' ' * (indent_size + 2), - 'view_name': name, - 'file_name': name.replace(' ', '-') + ".md", - 'report_dir': markdown_output_dir_name + "indentation": " " * (indent_size + 2), + "view_name": name, + "file_name": name.replace(" ", "-") + ".md", + "report_dir": markdown_output_dir_name, } - sum_f.write("{indentation}* [{view_name}]({report_dir}/{file_name})\n".format(**config_dict)) + sum_f.write( + "{indentation}* [{view_name}]({report_dir}/{file_name})\n".format( + **config_dict + ) + ) # discard old entries of Jenkins projects is_old_line = True @@ -228,5 +243,5 @@ def main(args): sum_f.close() -if __name__ == '__main__': +if __name__ == "__main__": main(sys.argv[1:]) diff --git a/jenkins/parser/actions.py b/jenkins/parser/actions.py index c09d03e6fa00..dbd585561d48 100644 --- a/jenkins/parser/actions.py +++ b/jenkins/parser/actions.py @@ -9,21 +9,15 @@ email_addresses = "cms-sdt-logs@cern.ch" -html_file_path = ( - os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/json-web-info.json" -) +html_file_path = os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/json-web-info.json" retry_url_file = ( os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/json-retry-info.json" ) -retry_queue_path = ( - os.environ.get("HOME") + "/builds/jenkins-test-parser/retry_queue.json" -) +retry_queue_path = os.environ.get("HOME") + "/builds/jenkins-test-parser/retry_queue.json" def send_email(email_msg, email_subject, email_addresses): - email_cmd = ( - 'echo "' + email_msg + '" | mail -s "' + email_subject + '" ' + email_addresses - ) + email_cmd = 'echo "' + email_msg + '" | mail -s "' + email_subject + '" ' + email_addresses print(email_cmd) os.system(email_cmd) @@ -32,9 +26,7 @@ def trigger_create_gridnode_action(node_name): node_config_path = os.environ.get("HOME") + "/nodes/" + node_name + "/config.xml" if helpers.grep(node_config_path, "auto-recreate", True): print("Recreating grid node ...") - trigger_create_gridnode = ( - os.environ.get("JENKINS_CLI_CMD") + " build grid-create-node" - ) + trigger_create_gridnode = os.environ.get("JENKINS_CLI_CMD") + " build grid-create-node" print(trigger_create_gridnode) os.system(trigger_create_gridnode) else: @@ -54,13 +46,9 @@ def trigger_retry_action( ): # Skip autoretry if Jenkins already retries, unless connection issue. if regex not in force_retry_regex: - if helpers.grep( - os.path.join(build_dir_path, "build.xml"), "", True - ): + if helpers.grep(os.path.join(build_dir_path, "build.xml"), "", True): print("... Jenkins already takes care of retrying. Skipping ...") - if helpers.grep( - os.path.join(build_dir_path, "build.xml"), "", True - ): + if helpers.grep(os.path.join(build_dir_path, "build.xml"), "", True): return # Update description of the failed job update_label = ( @@ -95,13 +83,11 @@ def trigger_retry_action( ) elif action == "retryLate": # Store retry command into a file - print( - "This failure will be retried with a delay of " + str(delay_time) + " min" - ) + print("This failure will be retried with a delay of " + str(delay_time) + " min") retry_entry = job_to_retry + "#" + build_to_retry - retry_time = datetime.datetime.now().replace( - microsecond=0 - ) + datetime.timedelta(minutes=delay_time) + retry_time = datetime.datetime.now().replace(microsecond=0) + datetime.timedelta( + minutes=delay_time + ) retry_object["retryQueue"][retry_entry] = {} retry_object["retryQueue"][retry_entry]["retryTime"] = str(retry_time) retry_object["retryQueue"][retry_entry]["retryCommand"] = trigger_retry @@ -128,11 +114,7 @@ def trigger_retry_action( def trigger_nodeoff_action(job_to_retry, build_to_retry, job_url, node_name): nodeoff_msg = "'Node\ marked\ as\ offline\ beacuse\ of\ " + job_url + "'" take_nodeoff = ( - os.environ.get("JENKINS_CLI_CMD") - + " offline-node " - + node_name - + " -m " - + nodeoff_msg + os.environ.get("JENKINS_CLI_CMD") + " offline-node " + node_name + " -m " + nodeoff_msg ) print(take_nodeoff) os.system(take_nodeoff) @@ -153,15 +135,9 @@ def trigger_nodeoff_action(job_to_retry, build_to_retry, job_url, node_name): def trigger_reconnect_action(job_to_retry, build_to_retry, job_url, node_name): nodeoff_msg = "'Node\ reconnected\ by\ " + job_url + "'" disconnect_node = ( - os.environ.get("JENKINS_CLI_CMD") - + " disconnect-node " - + node_name - + " -m " - + nodeoff_msg - ) - connect_node = ( - os.environ.get("JENKINS_CLI_CMD") + " connect-node " + node_name + " -f" + os.environ.get("JENKINS_CLI_CMD") + " disconnect-node " + node_name + " -m " + nodeoff_msg ) + connect_node = os.environ.get("JENKINS_CLI_CMD") + " connect-node " + node_name + " -f" print(disconnect_node) os.system(disconnect_node) time.sleep(10) @@ -181,9 +157,7 @@ def trigger_reconnect_action(job_to_retry, build_to_retry, job_url, node_name): os.system(update_label) -def notify_nodeoff( - node_name, regex, job_to_retry, build_to_retry, job_url, node_url, parser_url -): +def notify_nodeoff(node_name, regex, job_to_retry, build_to_retry, job_url, node_url, parser_url): email_msg = ( "Node " + node_name @@ -227,9 +201,7 @@ def notify_nodereconnect( send_email(email_msg, email_subject, email_addresses) -def notify_pendingbuild( - display_name, build_to_retry, job_to_retry, duration, job_url, parser_url -): +def notify_pendingbuild(display_name, build_to_retry, job_to_retry, duration, job_url, parser_url): email_msg = ( "Build" + display_name @@ -246,17 +218,10 @@ def notify_pendingbuild( ) email_subject = ( - "Pending build " - + display_name - + " (#" - + build_to_retry - + ") from job " - + job_to_retry + "Pending build " + display_name + " (#" + build_to_retry + ") from job " + job_to_retry ) - email_cmd = ( - 'echo "' + email_msg + '" | mail -s "' + email_subject + '" ' + email_addresses - ) + email_cmd = 'echo "' + email_msg + '" | mail -s "' + email_subject + '" ' + email_addresses send_email(email_msg, email_subject, email_addresses) @@ -310,7 +275,6 @@ def notify_noaction(display_name, job_to_retry, build_to_retry, job_url): def update_cmssdt_page( html_file, job, build, error, job_url, retry_url, action, refresh_only=False ): - try: with open(html_file, "r") as openfile: json_object = json.load(openfile) @@ -323,7 +287,6 @@ def update_cmssdt_page( json.dump(json_object, json_file) if refresh_only == False: - id = str(job + "#" + build) retry_time = datetime.datetime.now().replace(microsecond=0) @@ -341,18 +304,14 @@ def update_cmssdt_page( with open(html_file, "w") as openfile: json.dump(json_object, openfile, indent=2) - trigger_web_update = ( - os.environ.get("JENKINS_CLI_CMD") + " build jenkins-test-parser-monitor" - ) + trigger_web_update = os.environ.get("JENKINS_CLI_CMD") + " build jenkins-test-parser-monitor" if refresh_only == False or cleanup_flag == 1: - print(trigger_web_update) os.system(trigger_web_update) def cleanup_cmssdt_page(json_object): - builds_dir = os.environ.get("HOME") + "/builds" cleanup_flag = 0 @@ -410,7 +369,6 @@ def cleanup_cmssdt_page(json_object): def update_retry_link_cmssdt_page(retry_url_file, job, build, retry_url): - with open(retry_url_file, "r") as openfile: json_object = json.load(openfile) diff --git a/jenkins/parser/helpers.py b/jenkins/parser/helpers.py index 537417121201..884bec54ffa1 100644 --- a/jenkins/parser/helpers.py +++ b/jenkins/parser/helpers.py @@ -37,15 +37,11 @@ def get_errors_list(jobs_object, job_id): # Check if forceRetry field has been set if jobs_object["jobsConfig"]["errorMsg"][ii].get("forceRetry") == "true": - force_retry_regex.extend( - jobs_object["jobsConfig"]["errorMsg"][ii]["errorStr"] - ) + force_retry_regex.extend(jobs_object["jobsConfig"]["errorMsg"][ii]["errorStr"]) else: # If not, check value from defaultConfig section if jobs_object["defaultConfig"]["forceRetry"] == "true": - force_retry_regex.extend( - jobs_object["jobsConfig"]["errorMsg"][ii]["errorStr"] - ) + force_retry_regex.extend(jobs_object["jobsConfig"]["errorMsg"][ii]["errorStr"]) # Get the error keys of the concrete job ii error_keys = jobs_object["jobsConfig"]["jenkinsJobs"][job_id]["errorType"][:] @@ -58,7 +54,7 @@ def get_errors_list(jobs_object, job_id): def append_actions(error_keys, jenkins_errors): - """ Match error regex with the action to perform.""" + """Match error regex with the action to perform.""" # Get the error messages of the error keys error_list = [] # We append the action to perform to the error message @@ -84,7 +80,8 @@ def get_finished_builds(job_dir, running_builds): build for build in running_builds if grep( - functools.reduce(os.path.join, [job_dir, build, "build.xml"]), "", + functools.reduce(os.path.join, [job_dir, build, "build.xml"]), + "", ) ] @@ -96,12 +93,8 @@ def get_running_builds(job_dir, last_processed_log): for dir in os.scandir(job_dir) if dir.name.isdigit() and int(dir.name) > int(last_processed_log) - and os.path.isfile( - functools.reduce(os.path.join, [job_dir, dir.name, "build.xml"]) - ) - and not grep( - functools.reduce(os.path.join, [job_dir, dir.name, "build.xml"]), "" - ) + and os.path.isfile(functools.reduce(os.path.join, [job_dir, dir.name, "build.xml"])) + and not grep(functools.reduce(os.path.join, [job_dir, dir.name, "build.xml"]), "") ] @@ -113,10 +106,6 @@ def get_missing_builds(job_dir, total_running_builds, last_processed_log): if dir.name.isdigit() and int(dir.name) > int(last_processed_log) and dir.name not in total_running_builds - and os.path.isfile( - functools.reduce(os.path.join, [job_dir, dir.name, "build.xml"]) - ) - and grep( - functools.reduce(os.path.join, [job_dir, dir.name, "build.xml"]), "" - ) + and os.path.isfile(functools.reduce(os.path.join, [job_dir, dir.name, "build.xml"])) + and grep(functools.reduce(os.path.join, [job_dir, dir.name, "build.xml"]), "") ] diff --git a/jenkins/parser/jenkins-parser-job.py b/jenkins/parser/jenkins-parser-job.py index 1b0f0cff699d..dd7dc523e039 100755 --- a/jenkins/parser/jenkins-parser-job.py +++ b/jenkins/parser/jenkins-parser-job.py @@ -24,7 +24,7 @@ def process_build(build, job_dir, job_to_retry, error_list, retry_object, retry_ else: # Mark as retried actions.mark_build_as_retried(job_dir, job_to_retry, build) - print("[" + job_to_retry + "] ... #" + str(build)+ " OK") + print("[" + job_to_retry + "] ... #" + str(build) + " OK") def check_and_trigger_action( @@ -42,9 +42,7 @@ def check_and_trigger_action( lines = text_log.readlines() text_log.close() - job_url = ( - os.environ.get("JENKINS_URL") + "job/" + job_to_retry + "/" + build_to_retry - ) + job_url = os.environ.get("JENKINS_URL") + "job/" + job_to_retry + "/" + build_to_retry print("Parsing build #" + build_to_retry + " (" + job_url + ") ...") @@ -56,11 +54,7 @@ def check_and_trigger_action( for line in reversed(lines): if re.search(regex, line): print( - "... Found message " - + regex - + " in " - + log_file_path - + ". Taking action ..." + "... Found message " + regex + " in " + log_file_path + ". Taking action ..." ) if "retry" in action: actions.trigger_retry_action( @@ -77,7 +71,7 @@ def check_and_trigger_action( else: # Take action on the nodes node_name = helpers.grep(envvars_file_path, "NODE_NAME=", True) or "" - node_name = (node_name.split("=")[1].replace("\n", "")) + node_name = node_name.split("=")[1].replace("\n", "") job_url = ( os.environ.get("JENKINS_URL") + "job/" @@ -172,34 +166,28 @@ def check_and_trigger_action( build_file_path = os.path.join(build_dir_path, "build.xml") display_name = helpers.grep(build_file_path, "", True) or "" - display_name = display_name.replace("", "").replace("", "").replace("\n", "") + display_name = ( + display_name.replace("", "") + .replace("", "") + .replace("\n", "") + ) actions.notify_noaction(display_name, job_to_retry, build_to_retry, job_url) def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=18): """Check builds running time and notify in case it exceeds the maximum time defined (default max time = 18h).""" - job_url = ( - os.environ.get("JENKINS_URL") + "job/" + job_to_retry + "/" + build_to_retry - ) - parser_url = ( - os.environ.get("JENKINS_URL") + "job/jenkins-test-parser/" + parser_build_id - ) + job_url = os.environ.get("JENKINS_URL") + "job/" + job_to_retry + "/" + build_to_retry + parser_url = os.environ.get("JENKINS_URL") + "job/jenkins-test-parser/" + parser_build_id - build_file_path = functools.reduce( - os.path.join, [job_dir, build_to_check, "build.xml"] - ) + build_file_path = functools.reduce(os.path.join, [job_dir, build_to_check, "build.xml"]) if not os.path.exists(build_file_path): print("[DEBUG] No time check for ", job_url) - processed_object["parserInfo"]["runningBuilds"][job_to_retry].pop( - build_to_retry - ) + processed_object["parserInfo"]["runningBuilds"][job_to_retry].pop(build_to_retry) return if helpers.grep(build_file_path, ""): print("[DEBUG] No time check for ", job_url) - processed_object["parserInfo"]["runningBuilds"][job_to_retry].pop( - build_to_retry - ) + processed_object["parserInfo"]["runningBuilds"][job_to_retry].pop(build_to_retry) return if ( @@ -219,14 +207,15 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 start_timestamp = start_timestamp.replace("", "").replace("", "") display_name = helpers.grep(build_file_path, "", True) or "" - display_name = display_name.replace("", "").replace("", "").replace("\n", "") - + display_name = ( + display_name.replace("", "").replace("", "").replace("\n", "") + ) + start_datetime = datetime.datetime.fromtimestamp(int(start_timestamp) / 1000) now = datetime.datetime.now() duration = now - start_datetime if duration > datetime.timedelta(hours=max_running_time): - print( "Build #" + build_to_retry @@ -237,13 +226,16 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 + " hours!" ) - processed_object["parserInfo"]["runningBuilds"][job_to_retry][ - build_to_retry - ] = "emailSent" + processed_object["parserInfo"]["runningBuilds"][job_to_retry][build_to_retry] = "emailSent" # Mark as notified actions.notify_pendingbuild( - display_name, build_to_retry, job_to_retry, duration, job_url, parser_url, + display_name, + build_to_retry, + job_to_retry, + duration, + job_url, + parser_url, ) else: @@ -260,15 +252,12 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 if __name__ == "__main__": - # Set start time start_time = datetime.datetime.now() # Parse the build id of the current job parser = argparse.ArgumentParser() - parser.add_argument( - "parser_build_id", help="Input current build id from Jenkins env vars" - ) + parser.add_argument("parser_build_id", help="Input current build id from Jenkins env vars") args = parser.parse_args() parser_build_id = args.parser_build_id @@ -279,12 +268,9 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 os.environ.get("HOME") + "/builds/jenkins-test-parser/parser-info.json" ) # This file keeps track of the last log processed and the pending builds html_file_path = ( - os.environ.get("HOME") - + "/builds/jenkins-test-parser-monitor/json-web-info.json" - ) - retry_queue_path = ( - os.environ.get("HOME") + "/builds/jenkins-test-parser/retry_queue.json" + os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/json-web-info.json" ) + retry_queue_path = os.environ.get("HOME") + "/builds/jenkins-test-parser/retry_queue.json" # Get job-config info - always present (cloned from github) with open(jobs_config_path, "r") as jobs_file: @@ -296,11 +282,11 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 with open(parser_info_path, "r") as processed_file: # Get last parsed object just once processed_object = json.load(processed_file) except (FileNotFoundError, json.decoder.JSONDecodeError) as e: - print(f"Error occurred: {str(e)}") - print("Restoring parser-info.json file...") - with open(parser_info_path, "w") as json_file: - processed_object = {"parserInfo":{"lastRevision":{},"runningBuilds":{}}} - json.dump(processed_object, json_file, indent=2) + print(f"Error occurred: {str(e)}") + print("Restoring parser-info.json file...") + with open(parser_info_path, "w") as json_file: + processed_object = {"parserInfo": {"lastRevision": {}, "runningBuilds": {}}} + json.dump(processed_object, json_file, indent=2) # Get retry queue try: @@ -308,12 +294,12 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 retry_object = json.load(retry_file) retry_entries = retry_object["retryQueue"] except (FileNotFoundError, json.decoder.JSONDecodeError) as e: - print(f"Error occurred: {str(e)}") - print("Restoring retry_queue.json file...") - with open(retry_queue_path, "w") as json_file: - retry_object = {"retryQueue": {}} - json.dump(retry_object, json_file, indent=2) - retry_entries = retry_object["retryQueue"] + print(f"Error occurred: {str(e)}") + print("Restoring retry_queue.json file...") + with open(retry_queue_path, "w") as json_file: + retry_object = {"retryQueue": {}} + json.dump(retry_object, json_file, indent=2) + retry_entries = retry_object["retryQueue"] T = 1 time_check = True @@ -344,9 +330,7 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 # Get revision number try: - latest_revision = processed_object["parserInfo"]["lastRevision"][ - job_to_retry - ] + latest_revision = processed_object["parserInfo"]["lastRevision"][job_to_retry] except KeyError: latest_revision = 0 processed_object["parserInfo"]["lastRevision"][job_to_retry] = "0" @@ -381,11 +365,13 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 retry_object, retry_delay, ) - + # Update last processed log only if greater than current revision number max_latest_revision = max([int(build_id) for build_id in missing_builds]) if max_latest_revision > int(latest_revision): - processed_object["parserInfo"]["lastRevision"][job_to_retry] = max_latest_revision + processed_object["parserInfo"]["lastRevision"][ + job_to_retry + ] = max_latest_revision # Update running builds checking > last revision number new_running_builds = helpers.get_running_builds(job_dir, latest_revision) @@ -398,13 +384,9 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 for build in sorted(total_running_builds): if ( build - not in processed_object["parserInfo"]["runningBuilds"][ - job_to_retry - ].keys() + not in processed_object["parserInfo"]["runningBuilds"][job_to_retry].keys() ): - processed_object["parserInfo"]["runningBuilds"][job_to_retry][ - build - ] = "" + processed_object["parserInfo"]["runningBuilds"][job_to_retry][build] = "" finished_builds = helpers.get_finished_builds(job_dir, total_running_builds) @@ -426,13 +408,13 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 retry_object, retry_delay, ) - processed_object["parserInfo"]["runningBuilds"][job_to_retry].pop( - build - ) + processed_object["parserInfo"]["runningBuilds"][job_to_retry].pop(build) # Update last processed log only if greater than current revision number max_latest_revision = max([int(build_id) for build_id in finished_builds]) if max_latest_revision > int(latest_revision): - processed_object["parserInfo"]["lastRevision"][job_to_retry] = max_latest_revision + processed_object["parserInfo"]["lastRevision"][ + job_to_retry + ] = max_latest_revision # Get updated value for total_running_builds total_running_builds = list( @@ -449,9 +431,7 @@ def check_running_time(job_dir, build_to_retry, job_to_retry, max_running_time=1 + job_to_retry ) for build_to_check in sorted(total_running_builds): - check_running_time( - job_dir, build_to_check, job_to_retry, max_running_time - ) + check_running_time(job_dir, build_to_check, job_to_retry, max_running_time) # print("[" + job_to_retry + "] ... Done") diff --git a/jenkins/parser/jenkins-parser-monitor-job.py b/jenkins/parser/jenkins-parser-monitor-job.py index 02a3d2cacf25..8f50fe3ef019 100755 --- a/jenkins/parser/jenkins-parser-monitor-job.py +++ b/jenkins/parser/jenkins-parser-monitor-job.py @@ -3,7 +3,9 @@ import json import os -webinfo_file = os.path.join(os.environ.get("HOME"), "builds/jenkins-test-parser-monitor/json-web-info.json") +webinfo_file = os.path.join( + os.environ.get("HOME"), "builds/jenkins-test-parser-monitor/json-web-info.json" +) try: with open(webinfo_file, "r") as json_file: # Keeps track of the actions taken by parser job @@ -15,7 +17,9 @@ json_object = {"parserActions": {}} json.dump(json_object, json_file, indent=2) -retryinfo_file = os.path.join(os.environ.get("HOME"), "builds/jenkins-test-parser-monitor/json-retry-info.json") +retryinfo_file = os.path.join( + os.environ.get("HOME"), "builds/jenkins-test-parser-monitor/json-retry-info.json" +) try: with open(retryinfo_file, "r") as json_file: # Keeps track of the links to the retry job @@ -29,11 +33,9 @@ json.dump(retryinfo_template, json_file, indent=2) with open( - os.environ.get("HOME") - + "/builds/jenkins-test-parser-monitor/test-parser-web-info.html", + os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/test-parser-web-info.html", "w", ) as html_file: # Static web page - head = '\n\ \n\ \n\ @@ -152,9 +154,7 @@ ) else: html_file.writelines( - " " - + json_object["parserActions"][id][item] - + "\n" + " " + json_object["parserActions"][id][item] + "\n" ) elif action == "Retry": @@ -178,9 +178,7 @@ ) else: html_file.writelines( - " " - + json_object["parserActions"][id][item] - + "\n" + " " + json_object["parserActions"][id][item] + "\n" ) html_file.writelines(" \n") diff --git a/jenkins/parser/jenkins-retry-job.py b/jenkins/parser/jenkins-retry-job.py index 5d39d9acf818..51e4811ddb33 100755 --- a/jenkins/parser/jenkins-retry-job.py +++ b/jenkins/parser/jenkins-retry-job.py @@ -44,7 +44,7 @@ def findParametersAction(root): - """ It finds Jenkins parameters under section ParametersAction in xml file.""" + """It finds Jenkins parameters under section ParametersAction in xml file.""" if root.tag == "parameters": return root for x in root: @@ -55,7 +55,7 @@ def findParametersAction(root): def getParameters(root, payload): - """ Append Jenkins parameters of the form parameter=value (n.text=v.text) elements to a list.""" + """Append Jenkins parameters of the form parameter=value (n.text=v.text) elements to a list.""" n = root.find("name") if n is not None: if n.text is None: @@ -123,20 +123,14 @@ def getParameters(root, payload): # Update static webpage -tracker_path = ( - os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/parser-web-info.html" -) +tracker_path = os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/parser-web-info.html" job_url = os.environ.get("JENKINS_URL") + "job/" + job_to_retry + "/" + build_to_retry -retry_url = ( - os.environ.get("JENKINS_URL") + "job/jenkins-test-retry/" + current_build_number -) +retry_url = os.environ.get("JENKINS_URL") + "job/jenkins-test-retry/" + current_build_number retry_url_file_path = ( os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/json-retry-info.json" ) -actions.update_retry_link_cmssdt_page( - retry_url_file_path, job_to_retry, build_to_retry, retry_url -) +actions.update_retry_link_cmssdt_page(retry_url_file_path, job_to_retry, build_to_retry, retry_url) # Format retry label depending on parser action times = "time" if retry_counter_update == 1 else "times" diff --git a/jenkins/parser/jobs-config.json b/jenkins/parser/jobs-config.json index 372348843c74..cdd85af3c10a 100644 --- a/jenkins/parser/jobs-config.json +++ b/jenkins/parser/jobs-config.json @@ -184,7 +184,8 @@ "errorMsg": { "timedOut": { "errorStr": [ - "Build timed out" + "Build timed out", + "Waiting for IB since 3600 secs" ], "action": "retryNow" }, diff --git a/jenkins/parser/paser-config-unittest.py b/jenkins/parser/paser-config-unittest.py index 5acdb1231723..117e84a9f975 100644 --- a/jenkins/parser/paser-config-unittest.py +++ b/jenkins/parser/paser-config-unittest.py @@ -20,9 +20,7 @@ _, output = getstatusoutput( 'curl -s https://raw.githubusercontent.com/cms-sw/cmssdt-wiki/master/jenkins_reports/All.md | grep "## \[.*\](.*"' ) -valid_job_names = [ - re.sub("\]\(.*", "", item.replace("## [", "")) for item in output.split("\n") -] +valid_job_names = [re.sub("\]\(.*", "", item.replace("## [", "")) for item in output.split("\n")] # Check that valid_job_names contains all elements of job_names assert all( item in valid_job_names for item in job_names @@ -48,9 +46,7 @@ "nodeOff", "nodeReconnect", ] # TODO: Find a better way to get all valid actions -defined_actions = [ - error_msg[error_category]["action"] for error_category in error_msg.keys() -] +defined_actions = [error_msg[error_category]["action"] for error_category in error_msg.keys()] # Check that valid_actions contains all defined actions assert all( item in valid_actions for item in defined_actions diff --git a/jenkins/report-jenkins-jobs.py b/jenkins/report-jenkins-jobs.py index b793c2fa6a17..288437f39e1c 100755 --- a/jenkins/report-jenkins-jobs.py +++ b/jenkins/report-jenkins-jobs.py @@ -1,82 +1,119 @@ #!/usr/bin/env python3 from __future__ import print_function + print("") -print('') +print("") print('') print('') -print('') -print('') -print('') +print( + '' +) +print("") +print("") print('
') print('
') print('
') print('

CMS Jenkins Projects

') -print('

This page displays a summary of all CMS Jenkins projects , their sub projects , upstream projects
and downstream projects. To see the deatil and confgiuration of a project in Jenkins , click on project name.


') -print('
') -print('
') -print('
') +print( + "

This page displays a summary of all CMS Jenkins projects , their sub projects , upstream projects
and downstream projects. To see the deatil and confgiuration of a project in Jenkins , click on project name.


" +) +print(" ") +print(" ") +print(" ") from collections import defaultdict + parents = defaultdict(list) import json import time try: - fd = open('/tmp/report_gen.txt') - txt = fd.read() + fd = open("/tmp/report_gen.txt") + txt = fd.read() except Exception as e: - print("Error reading the file") + print("Error reading the file") data_uns = json.loads(txt) -data = sorted(list(data_uns.items()),key=lambda s: s[0].lower()) +data = sorted(list(data_uns.items()), key=lambda s: s[0].lower()) for item in data: - name = item[1]['job_name'] - if name.startswith('DMWM'): - continue - print('
') - print('") + print("
") print(' ') -print('') -print('') - - +print(" ") +print(" ") +print("") +print("") diff --git a/jenkins_callback.py b/jenkins_callback.py index 4e412ab659a3..da2de5979dd4 100755 --- a/jenkins_callback.py +++ b/jenkins_callback.py @@ -1,34 +1,43 @@ -from _py2with3compatibility import Request, urlopen, urlencode, build_opener, install_opener, CookieJar, HTTPCookieProcessor, HTTPError +from _py2with3compatibility import ( + Request, + urlopen, + urlencode, + build_opener, + install_opener, + CookieJar, + HTTPCookieProcessor, + HTTPError, +) from json import loads + def update_crumb(jenkins_url, headers): try: - req = Request(url=jenkins_url+'/crumbIssuer/api/json', headers=headers) - crumb = loads(urlopen(req).read()) - headers[crumb['crumbRequestField']] = crumb['crumb'] - print ("OK crumbRequest") + req = Request(url=jenkins_url + "/crumbIssuer/api/json", headers=headers) + crumb = loads(urlopen(req).read()) + headers[crumb["crumbRequestField"]] = crumb["crumb"] + print("OK crumbRequest") except HTTPError as e: - print ("Running without Crumb Issuer: %s" % e) - pass + print("Running without Crumb Issuer: %s" % e) + pass return headers -def build_jobs(jenkins_url, jobs_data, headers = {}, user="cmssdt"): - for rk in ["OIDC_CLAIM_CERN_UPN"]: - if rk not in headers: - headers[rk] = user - install_opener(build_opener(HTTPCookieProcessor(CookieJar()))) - for prams,job in jobs_data: - if not job: continue - headers = update_crumb(jenkins_url, headers) - url = jenkins_url+'/job/'+ job + '/build' - data = { - "json": prams, - "Submit": "Build" - } - try: - data = urlencode(data).encode() - req = Request(url=url,data=data,headers=headers) - content = urlopen(req).read() - print ("ALL_OK") - except Exception as e: - print("Unable to start jenkins job: %s" % e) + +def build_jobs(jenkins_url, jobs_data, headers={}, user="cmssdt"): + for rk in ["OIDC_CLAIM_CERN_UPN"]: + if rk not in headers: + headers[rk] = user + install_opener(build_opener(HTTPCookieProcessor(CookieJar()))) + for prams, job in jobs_data: + if not job: + continue + headers = update_crumb(jenkins_url, headers) + url = jenkins_url + "/job/" + job + "/build" + data = {"json": prams, "Submit": "Build"} + try: + data = urlencode(data).encode() + req = Request(url=url, data=data, headers=headers) + content = urlopen(req).read() + print("ALL_OK") + except Exception as e: + print("Unable to start jenkins job: %s" % e) diff --git a/jenkins_monitor_queue.py b/jenkins_monitor_queue.py index 8a22696106ae..004d8fad9c47 100755 --- a/jenkins_monitor_queue.py +++ b/jenkins_monitor_queue.py @@ -6,81 +6,103 @@ from hashlib import sha1 from es_utils import get_payload_wscroll, send_payload, delete_hit -JENKINS_PREFIX="jenkins" -try: JENKINS_PREFIX=os.environ['JENKINS_URL'].strip("/").split("/")[-1] -except: JENKINS_PREFIX="jenkins" - -query_pending_builds = """{ +JENKINS_PREFIX = "jenkins" +try: + JENKINS_PREFIX = os.environ["JENKINS_URL"].strip("/").split("/")[-1] +except: + JENKINS_PREFIX = "jenkins" + +query_pending_builds = ( + """{ "query": {"bool": {"must": {"query_string": {"query": "_index:cmssdt-jenkins-queue-* AND in_queue:1 AND jenkins_server:%s", "default_operator": "AND"}}}}, "from": 0, "size": 10000 -}""" % JENKINS_PREFIX +}""" + % JENKINS_PREFIX +) -query_offline_nodes = """{ +query_offline_nodes = ( + """{ "query": {"bool": {"must": {"query_string": {"query": "jenkins_server:%s", "default_operator": "AND"}}}}, "from": 0, "size": 10000 -}""" % JENKINS_PREFIX +}""" + % JENKINS_PREFIX +) -queue_index="cmssdt-jenkins-offline-nodes" +queue_index = "cmssdt-jenkins-offline-nodes" queue_document = "offline-data" max_time = 60 current_offline_nodes = [] -content_hash = get_payload_wscroll('cmssdt-jenkins-queue-*', query_pending_builds) +content_hash = get_payload_wscroll("cmssdt-jenkins-queue-*", query_pending_builds) if content_hash: - if (not 'hits' in content_hash) or (not 'hits' in content_hash['hits']): - print("ERROR: ", content_hash) - sys.exit(1) - - print("Found " + str(len(content_hash['hits']['hits'])) + " jobs in queue!") - for hit in content_hash['hits']['hits']: - job_name = hit["_source"]["job_name"] - queue_time = int(hit["_source"]["wait_time"])/(60*1000) - print("[" + str(hit["_source"]["node_labels"]) + "] Job " + str(job_name) + " has been in queue " + str(queue_time) + " minutes..." ) - - payload = {} - if "offline" in hit["_source"]["node_labels"]: - offline_time = int(hit["_source"]["wait_time"])/(60*1000) - print("--> Found job in queue due to an offline node: ", hit["_source"]) - print("Offline minutes: ", offline_time) - if int(offline_time) > int(max_time): - node = hit["_source"]["node_labels"].split("-offline")[0] - current_offline_nodes.append(node) - print("[WARNING] Node " + str(node) + " has been offline for more than " + str(max_time) + " minutes!") - payload['jenkins_server'] = JENKINS_PREFIX - payload["node_name"] = node - payload["offline_time"] = offline_time - - # Update data on the same id for each node - unique_id = JENKINS_PREFIX + "-" + node - id = sha1(unique_id.encode('utf-8')).hexdigest() - - # Update timestamp in milliseconds - current_time = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1) - payload['@timestamp'] = round(current_time.total_seconds()*1000) - - send_payload(queue_index,queue_document,id,json.dumps(payload)) - -content_hash = get_payload_wscroll('cmssdt-jenkins-offline-node*', query_offline_nodes) + if (not "hits" in content_hash) or (not "hits" in content_hash["hits"]): + print("ERROR: ", content_hash) + sys.exit(1) + + print("Found " + str(len(content_hash["hits"]["hits"])) + " jobs in queue!") + for hit in content_hash["hits"]["hits"]: + job_name = hit["_source"]["job_name"] + queue_time = int(hit["_source"]["wait_time"]) / (60 * 1000) + print( + "[" + + str(hit["_source"]["node_labels"]) + + "] Job " + + str(job_name) + + " has been in queue " + + str(queue_time) + + " minutes..." + ) + + payload = {} + if "offline" in hit["_source"]["node_labels"]: + offline_time = int(hit["_source"]["wait_time"]) / (60 * 1000) + print("--> Found job in queue due to an offline node: ", hit["_source"]) + print("Offline minutes: ", offline_time) + if int(offline_time) > int(max_time): + node = hit["_source"]["node_labels"].split("-offline")[0] + current_offline_nodes.append(node) + print( + "[WARNING] Node " + + str(node) + + " has been offline for more than " + + str(max_time) + + " minutes!" + ) + payload["jenkins_server"] = JENKINS_PREFIX + payload["node_name"] = node + payload["offline_time"] = offline_time + + # Update data on the same id for each node + unique_id = JENKINS_PREFIX + "-" + node + id = sha1(unique_id.encode("utf-8")).hexdigest() + + # Update timestamp in milliseconds + current_time = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1) + payload["@timestamp"] = round(current_time.total_seconds() * 1000) + + send_payload(queue_index, queue_document, id, json.dumps(payload)) + +content_hash = get_payload_wscroll("cmssdt-jenkins-offline-node*", query_offline_nodes) es_offline_nodes = [] if content_hash: - if (not 'hits' in content_hash) or (not 'hits' in content_hash['hits']): - print("ERROR: ", content_hash) - sys.exit(1) + if (not "hits" in content_hash) or (not "hits" in content_hash["hits"]): + print("ERROR: ", content_hash) + sys.exit(1) - print("Found " + str(len(content_hash['hits']['hits'])) + " nodes already online!") - for hit in content_hash['hits']['hits']: - es_offline_nodes.append(str(hit["_source"]["node_name"])) + print("Found " + str(len(content_hash["hits"]["hits"])) + " nodes already online!") + for hit in content_hash["hits"]["hits"]: + es_offline_nodes.append(str(hit["_source"]["node_name"])) for node in es_offline_nodes: - if node not in current_offline_nodes: - unique_id = JENKINS_PREFIX + "-" + node - id = sha1(unique_id.encode('utf-8')).hexdigest() - hit = {"_index": "cmssdt-jenkins-offline-nodes", "_id": id} + if node not in current_offline_nodes: + unique_id = JENKINS_PREFIX + "-" + node + id = sha1(unique_id.encode("utf-8")).hexdigest() + hit = {"_index": "cmssdt-jenkins-offline-nodes", "_id": id} - print("--> Deleting entry for node " + str(node) + ":" + str(hit) ) - delete_hit(hit) + print("--> Deleting entry for node " + str(node) + ":" + str(hit)) + delete_hit(hit) diff --git a/jobs/create-relval-jobs.py b/jobs/create-relval-jobs.py index 2879e0556dbf..928f7b1eeb5d 100755 --- a/jobs/create-relval-jobs.py +++ b/jobs/create-relval-jobs.py @@ -17,99 +17,128 @@ SCRIPT_DIR = os.path.dirname(os.path.abspath(sys.argv[0])) CMS_BOT_DIR = os.path.dirname(SCRIPT_DIR) -sys.path.insert(0,CMS_BOT_DIR) -sys.path.insert(0,SCRIPT_DIR) +sys.path.insert(0, CMS_BOT_DIR) +sys.path.insert(0, SCRIPT_DIR) from _py2with3compatibility import run_cmd from cmsutils import MachineCPUCount from RelValArgs import GetMatrixOptions, FixWFArgs from es_utils import es_query, format, es_workflow_stats + def createJob(workflow, cmssw_ver, arch): - workflow_args = FixWFArgs(cmssw_ver, arch, workflow, GetMatrixOptions(cmssw_ver, arch)) - cmd = format("rm -rf %(workflow)s %(workflow)s_*; mkdir %(workflow)s; cd %(workflow)s; PATH=%(das_utils)s:$PATH runTheMatrix.py --maxSteps=0 -l %(workflow)s %(workflow_args)s",workflow=workflow,workflow_args=workflow_args, das_utils=CMS_BOT_DIR+"/das-utils") - print("Running ",cmd) - e, o = run_cmd(cmd) - if e: print("ERROR:%s:%s" % (workflow, o)) - try: - workflow_dir = glob.glob(format("%(workflow)s/%(workflow)s_*", workflow=workflow))[0] - run_cmd(format("mv %(workflow)s/runall-report-step123-.log %(workflow_dir)s/workflow.log; touch %(workflow_dir)s/cmdLog; mv %(workflow_dir)s .; rm -rf %(workflow)s", workflow=workflow, workflow_dir=workflow_dir)) - print("Commands for workflow %s generated" % workflow) - except Exception as e: - print("ERROR: Creating workflow job:",workflow,str(e)) - run_cmd("rm -rf %s %s_*" % (workflow,workflow)) + workflow_args = FixWFArgs(cmssw_ver, arch, workflow, GetMatrixOptions(cmssw_ver, arch)) + cmd = format( + "rm -rf %(workflow)s %(workflow)s_*; mkdir %(workflow)s; cd %(workflow)s; PATH=%(das_utils)s:$PATH runTheMatrix.py --maxSteps=0 -l %(workflow)s %(workflow_args)s", + workflow=workflow, + workflow_args=workflow_args, + das_utils=CMS_BOT_DIR + "/das-utils", + ) + print("Running ", cmd) + e, o = run_cmd(cmd) + if e: + print("ERROR:%s:%s" % (workflow, o)) + try: + workflow_dir = glob.glob(format("%(workflow)s/%(workflow)s_*", workflow=workflow))[0] + run_cmd( + format( + "mv %(workflow)s/runall-report-step123-.log %(workflow_dir)s/workflow.log; touch %(workflow_dir)s/cmdLog; mv %(workflow_dir)s .; rm -rf %(workflow)s", + workflow=workflow, + workflow_dir=workflow_dir, + ) + ) + print("Commands for workflow %s generated" % workflow) + except Exception as e: + print("ERROR: Creating workflow job:", workflow, str(e)) + run_cmd("rm -rf %s %s_*" % (workflow, workflow)) + -pyRunDir=os.path.join(os.environ["CMSSW_BASE"],"pyRelval") +pyRunDir = os.path.join(os.environ["CMSSW_BASE"], "pyRelval") run_cmd("rm -rf %s; mkdir -p %s" % (pyRunDir, pyRunDir)) os.chdir(pyRunDir) cmssw_ver = os.environ["CMSSW_VERSION"] arch = os.environ["SCRAM_ARCH"] -#Run runTheMatrix with maxStep=0 -thrds=[] -jobs=MachineCPUCount -wf_query="" +# Run runTheMatrix with maxStep=0 +thrds = [] +jobs = MachineCPUCount +wf_query = "" print("Creating jobs (%s) ...." % jobs) for wf in sys.argv[1].split(","): - wf_query+=" OR workflow:"+wf - while len(thrds)>=jobs: - sleep(0.1) - thrds = [ t for t in thrds if t.is_alive() ] - t = threading.Thread(target=createJob, args=(wf, cmssw_ver, arch)) - thrds.append(t) - t.start() -for t in thrds: t.join() + wf_query += " OR workflow:" + wf + while len(thrds) >= jobs: + sleep(0.1) + thrds = [t for t in thrds if t.is_alive()] + t = threading.Thread(target=createJob, args=(wf, cmssw_ver, arch)) + thrds.append(t) + t.start() +for t in thrds: + t.join() -#Get Workflow stats from ES +# Get Workflow stats from ES print("Getting Workflow stats from ES.....") stats = {} -release_cycle=str.lower(cmssw_ver.split("_X_")[0]+"_X") -days_history=10 -#if ('_ppc64le_' in arch) or ('_aarch64_' in arch) or ('cc8_' in arch) or ('gcc10' in arch): +release_cycle = str.lower(cmssw_ver.split("_X_")[0] + "_X") +days_history = 10 +# if ('_ppc64le_' in arch) or ('_aarch64_' in arch) or ('cc8_' in arch) or ('gcc10' in arch): # days_history=3 print("Searching for last %s days data" % days_history) while True: - stats = es_query(index='relvals_stats_*', - query=format('(NOT cpu_max:0) AND exit_code:0 AND release:%(release_cycle)s AND architecture:%(architecture)s AND (%(workflows)s)', - release_cycle=release_cycle+"_*", - architecture=arch, - workflows=wf_query[4:] - ), - start_time=1000*int(time()-(86400*days_history)), - end_time=1000*int(time()), - scroll=True) - if (not 'hits' in stats) or (not 'hits' in stats['hits']) or (not stats['hits']['hits']): - xrelease_cycle = str.lower("_".join(cmssw_ver.split("_",4)[0:3])+"_X") - if xrelease_cycle!=release_cycle: - release_cycle=xrelease_cycle - print("Retry: Setting release cycle to ",release_cycle) - continue - break + stats = es_query( + index="relvals_stats_*", + query=format( + "(NOT cpu_max:0) AND exit_code:0 AND release:%(release_cycle)s AND architecture:%(architecture)s AND (%(workflows)s)", + release_cycle=release_cycle + "_*", + architecture=arch, + workflows=wf_query[4:], + ), + start_time=1000 * int(time() - (86400 * days_history)), + end_time=1000 * int(time()), + scroll=True, + ) + if (not "hits" in stats) or (not "hits" in stats["hits"]) or (not stats["hits"]["hits"]): + xrelease_cycle = str.lower("_".join(cmssw_ver.split("_", 4)[0:3]) + "_X") + if xrelease_cycle != release_cycle: + release_cycle = xrelease_cycle + print("Retry: Setting release cycle to ", release_cycle) + continue + break -dump(stats, open("all.json","w"), sort_keys=True,indent=2) +dump(stats, open("all.json", "w"), sort_keys=True, indent=2) wf_stats = es_workflow_stats(stats) -#Create Jobs +# Create Jobs print("Creating jobs.json file ....") jobs = {} jobs["final_job"] = "echo All Done" -jobs["final_per_group"] = {"command": SCRIPT_DIR+"/workflow_final.py %(jobs_results)s", "cpu": 10, "rss": 10*1024*1024, "time" : 30} -jobs["env"]={} -jobs["jobs"]=[] -e , o = run_cmd ("find . -name workflow.log -type f | sed 's|^./||'") +jobs["final_per_group"] = { + "command": SCRIPT_DIR + "/workflow_final.py %(jobs_results)s", + "cpu": 10, + "rss": 10 * 1024 * 1024, + "time": 30, +} +jobs["env"] = {} +jobs["jobs"] = [] +e, o = run_cmd("find . -name workflow.log -type f | sed 's|^./||'") for cmds_log in o.split("\n"): - cmds = os.path.join(os.path.dirname(cmds_log),"wf_steps.txt") - wf = cmds.split("_")[0] - group ={"name": wf, "commands":[]} - if os.path.exists(cmds): - e, o = run_cmd ("cat %s | grep ^step" % cmds) - for c in o.split("\n"): - job = {"cpu" : 300, "rss" : 4.5*1024*1024*1024, "time" : 120, "command" : re.sub("\s*;\s*$","",c.split(":",1)[-1])} - step = c.split(":")[0] - if (wf in wf_stats) and (step in wf_stats[wf]): - job["time"] = wf_stats[wf][step]["time"] - for x in ["cpu", "rss"]: - job[x] = wf_stats[wf][step][x] - for t in [x+"_avg", x+"_max"]: job[t] = wf_stats[wf][step][t] - group["commands"].append(job) - jobs["jobs"].append(group) -dump(jobs, open("jobs.json","w"), sort_keys=True,indent=2) + cmds = os.path.join(os.path.dirname(cmds_log), "wf_steps.txt") + wf = cmds.split("_")[0] + group = {"name": wf, "commands": []} + if os.path.exists(cmds): + e, o = run_cmd("cat %s | grep ^step" % cmds) + for c in o.split("\n"): + job = { + "cpu": 300, + "rss": 4.5 * 1024 * 1024 * 1024, + "time": 120, + "command": re.sub("\s*;\s*$", "", c.split(":", 1)[-1]), + } + step = c.split(":")[0] + if (wf in wf_stats) and (step in wf_stats[wf]): + job["time"] = wf_stats[wf][step]["time"] + for x in ["cpu", "rss"]: + job[x] = wf_stats[wf][step][x] + for t in [x + "_avg", x + "_max"]: + job[t] = wf_stats[wf][step][t] + group["commands"].append(job) + jobs["jobs"].append(group) +dump(jobs, open("jobs.json", "w"), sort_keys=True, indent=2) diff --git a/jobs/jobscheduler.py b/jobs/jobscheduler.py index bb8e5edebec4..4598efe82297 100755 --- a/jobs/jobscheduler.py +++ b/jobs/jobscheduler.py @@ -15,180 +15,344 @@ from subprocess import Popen from os.path import abspath, dirname import sys + sys.path.append(dirname(dirname(abspath(sys.argv[0])))) from cmsutils import MachineCPUCount, MachineMemoryGB global simulation_time global simulation + + def gettime(addtime=0): - if not simulation: return int(time()) - global simulation_time - simulation_time+=addtime - return simulation_time + if not simulation: + return int(time()) + global simulation_time + simulation_time += addtime + return simulation_time + + def simulate_done_job(thrds, resources): - thdtime = 99999999 - print(gettime(),":",len(thrds),":",",".join([ str(n) for n in sorted([thrds[t]["time2finish"] for t in thrds])]),resources["available"]) - for t in thrds: - if thrds[t]["time2finish"]= 9999999: return [] - xthrds = [ t for t in thrds if thrds[t]["time2finish"]==thdtime ] - for t in xthrds: - f = open(thrds[t]["jobid"],"w") - f.close() - while [ t for t in xthrds if t.is_alive() ]: sleep(0.001) - for t in thrds: - if not t in xthrds: thrds[t]["time2finish"]=thrds[t]["time2finish"]-thdtime - return xthrds - -def format(s, **kwds): return s % kwds + thdtime = 99999999 + print( + gettime(), + ":", + len(thrds), + ":", + ",".join([str(n) for n in sorted([thrds[t]["time2finish"] for t in thrds])]), + resources["available"], + ) + for t in thrds: + if thrds[t]["time2finish"] < thdtime: + thdtime = thrds[t]["time2finish"] + if thdtime >= 9999999: + return [] + xthrds = [t for t in thrds if thrds[t]["time2finish"] == thdtime] + for t in xthrds: + f = open(thrds[t]["jobid"], "w") + f.close() + while [t for t in xthrds if t.is_alive()]: + sleep(0.001) + for t in thrds: + if not t in xthrds: + thrds[t]["time2finish"] = thrds[t]["time2finish"] - thdtime + return xthrds + + +def format(s, **kwds): + return s % kwds + + def runJob(job): - if simulation: - while not os.path.exists(job["jobid"]): sleep(0.001) - os.remove(job["jobid"]) - job["exit_code"] = 0 - else: - p = Popen(job["command"], shell=True) - job["exit_code"] = os.waitpid(p.pid,0)[1] + if simulation: + while not os.path.exists(job["jobid"]): + sleep(0.001) + os.remove(job["jobid"]) + job["exit_code"] = 0 + else: + p = Popen(job["command"], shell=True) + job["exit_code"] = os.waitpid(p.pid, 0)[1] + def getFinalCommand(group, jobs, resources): - if not "final" in group: group["final"] = deepcopy(jobs["final_per_group"]) - job = group.pop("final") - job["jobid"]=group["name"]+"-final" - group["state"]="Done" - jobs_results = group["name"]+"-results.json" - ref = open(jobs_results, 'w') - ref.write(json.dumps(group, indent=2, sort_keys=True, separators=(',',': '))) - ref.close() - resources["done_groups"]=resources["done_groups"]+1 - job["command"]=format(job["command"],group_name=group["name"],jobs_results=jobs_results) - if simulation: job["time2finish"]=10 - job["origtime"]=60 - return job + if not "final" in group: + group["final"] = deepcopy(jobs["final_per_group"]) + job = group.pop("final") + job["jobid"] = group["name"] + "-final" + group["state"] = "Done" + jobs_results = group["name"] + "-results.json" + ref = open(jobs_results, "w") + ref.write(json.dumps(group, indent=2, sort_keys=True, separators=(",", ": "))) + ref.close() + resources["done_groups"] = resources["done_groups"] + 1 + job["command"] = format(job["command"], group_name=group["name"], jobs_results=jobs_results) + if simulation: + job["time2finish"] = 10 + job["origtime"] = 60 + return job + def getJob(jobs, resources, order): - pending_jobs = [] - pending_groups = [ g for g in jobs["jobs"] if g["state"]=="Pending" ] - for group in pending_groups: - if [ j for j in group["commands"] if j["state"]=="Running" ]: continue - if not [ j for j in group["commands"] if j["state"]=="Pending" ]: return True,getFinalCommand(group, jobs, resources) - for job in group["commands"]: - if job["state"]=="Pending": - if (job["rss"]<=resources["available"]["rss"]) and (job["cpu"]<=resources["available"]["cpu"]): pending_jobs.append(job) - break - if job["exit_code"]!=0: return True,getFinalCommand(group, jobs, resources) - if not pending_jobs: return len(pending_groups)>0,{} - sort_by = order - if order=="dynamic": - rss_v = 100.0*resources["available"]["rss"]/resources["total"]["rss"] - cpu_v = 100.0*resources["available"]["cpu"]/resources["total"]["cpu"] - sort_by = "rss" if rss_v>cpu_v else "cpu" - if not simulation: print("Sort by ",sort_by,rss_v,"vs",cpu_v) - return True, sorted(pending_jobs,key=itemgetter(sort_by),reverse=True)[0] + pending_jobs = [] + pending_groups = [g for g in jobs["jobs"] if g["state"] == "Pending"] + for group in pending_groups: + if [j for j in group["commands"] if j["state"] == "Running"]: + continue + if not [j for j in group["commands"] if j["state"] == "Pending"]: + return True, getFinalCommand(group, jobs, resources) + for job in group["commands"]: + if job["state"] == "Pending": + if (job["rss"] <= resources["available"]["rss"]) and ( + job["cpu"] <= resources["available"]["cpu"] + ): + pending_jobs.append(job) + break + if job["exit_code"] != 0: + return True, getFinalCommand(group, jobs, resources) + if not pending_jobs: + return len(pending_groups) > 0, {} + sort_by = order + if order == "dynamic": + rss_v = 100.0 * resources["available"]["rss"] / resources["total"]["rss"] + cpu_v = 100.0 * resources["available"]["cpu"] / resources["total"]["cpu"] + sort_by = "rss" if rss_v > cpu_v else "cpu" + if not simulation: + print("Sort by ", sort_by, rss_v, "vs", cpu_v) + return True, sorted(pending_jobs, key=itemgetter(sort_by), reverse=True)[0] + def startJob(job, resources, thrds): - job["state"]="Running" - job["start_time"]=gettime() - for pram in ["rss", "cpu"]: resources["available"][pram]=resources["available"][pram]-job[pram] - t = threading.Thread(target=runJob, args=(job,)) - thrds[t]=job - if not simulation: print("Run",len(thrds),job["jobid"],job["rss"],job["cpu"],job["time"],resources["available"]) - t.start() + job["state"] = "Running" + job["start_time"] = gettime() + for pram in ["rss", "cpu"]: + resources["available"][pram] = resources["available"][pram] - job[pram] + t = threading.Thread(target=runJob, args=(job,)) + thrds[t] = job + if not simulation: + print( + "Run", + len(thrds), + job["jobid"], + job["rss"], + job["cpu"], + job["time"], + resources["available"], + ) + t.start() + def checkJobs(thrds, resources): - done_thrds = [] - if simulation: done_thrds = simulate_done_job(thrds, resources) - while not done_thrds: sleep(1) ; done_thrds = [ t for t in thrds if not t.is_alive() ] - for t in done_thrds: - job = thrds.pop(t) - job["end_time"]=gettime(0 if not simulation else job["time2finish"]) - job["state"]="Done" - job["exec_time"]=job["end_time"]-job["start_time"] - if not simulation: - dtime = job["exec_time"]-job["origtime"] - if dtime > 60: - print("===> SLOW JOB:",job["exec_time"],"secs vs ",job["origtime"],"secs. Diff:",dtime) - resources["done_jobs"]=resources["done_jobs"]+1 - for pram in ["rss", "cpu"]: resources["available"][pram]=resources["available"][pram]+job[pram] - if not simulation: - print("Done",len(thrds),job["jobid"],job["exec_time"],job["exit_code"],resources["available"],"JOBS:",resources["done_jobs"],"/",resources["total_jobs"],"GROUPS:",resources["done_groups"],"/",resources["total_groups"]) + done_thrds = [] + if simulation: + done_thrds = simulate_done_job(thrds, resources) + while not done_thrds: + sleep(1) + done_thrds = [t for t in thrds if not t.is_alive()] + for t in done_thrds: + job = thrds.pop(t) + job["end_time"] = gettime(0 if not simulation else job["time2finish"]) + job["state"] = "Done" + job["exec_time"] = job["end_time"] - job["start_time"] + if not simulation: + dtime = job["exec_time"] - job["origtime"] + if dtime > 60: + print( + "===> SLOW JOB:", + job["exec_time"], + "secs vs ", + job["origtime"], + "secs. Diff:", + dtime, + ) + resources["done_jobs"] = resources["done_jobs"] + 1 + for pram in ["rss", "cpu"]: + resources["available"][pram] = resources["available"][pram] + job[pram] + if not simulation: + print( + "Done", + len(thrds), + job["jobid"], + job["exec_time"], + job["exit_code"], + resources["available"], + "JOBS:", + resources["done_jobs"], + "/", + resources["total_jobs"], + "GROUPS:", + resources["done_groups"], + "/", + resources["total_groups"], + ) + def initJobs(jobs, resources, otype): - if not "final" in jobs: jobs["final"]="true" - if not "final_per_group" in jobs: jobs["final_per_group"]={"command": "true", "cpu": 1, "rss": 1, "time" : 1} - for env,value in jobs["env"].items(): os.putenv(env,value) - total_groups=0 - total_jobs=0 - for group in jobs["jobs"]: - total_groups+=1 - group["state"]="Pending" - cmd_count = len(group["commands"]) - job_time=0 - for i in reversed(list(range(cmd_count))): - total_jobs+=1 - job = group["commands"][i] - job["origtime"] = job["time"] - if simulation: job["time2finish"] = job["time"] - job_time += job["time"] - job["time"] = job_time - if job['cpu']==0: job['cpu']=300 - if job['rss']==0: job['rss']=1024*1024*1024*6 - for x in ["rss","cpu"]: - for y in [x+"_avg", x+"_max"]: - if (not y in job) or (job[y]==0): job[y]=job[x] - if not simulation: - print (">>",group["name"],job) - for x in [ "rss", "cpu" ]: print (" ",x,int(job[x]*100/job[x+"_max"]),int(job[x+"_avg"]*100/job[x+"_max"])) - if otype: - for x in [ "rss", "cpu" ]: job[x] = job[ x + "_" + otype ] - job["state"]="Pending" - job["exit_code"]=-1 - job["jobid"]=group["name"]+"(%s/%s)" % (i+1, cmd_count) - job["jobid"]="%s-%sof%s" % (group["name"], i+1, cmd_count) - for item in ["rss", "cpu"]: - if resources["total"][item]>", group["name"], job) + for x in ["rss", "cpu"]: + print( + " ", + x, + int(job[x] * 100 / job[x + "_max"]), + int(job[x + "_avg"] * 100 / job[x + "_max"]), + ) + if otype: + for x in ["rss", "cpu"]: + job[x] = job[x + "_" + otype] + job["state"] = "Pending" + job["exit_code"] = -1 + job["jobid"] = group["name"] + "(%s/%s)" % (i + 1, cmd_count) + job["jobid"] = "%s-%sof%s" % (group["name"], i + 1, cmd_count) + for item in ["rss", "cpu"]: + if resources["total"][item] < job[item]: + resources["total"][item] = job[item] + 1 + resources["available"] = deepcopy(resources["total"]) + resources["total_groups"] = total_groups + resources["total_jobs"] = total_jobs + total_groups + print("Total Resources:", resources["available"]) + return jobs + if __name__ == "__main__": - parser = OptionParser(usage="%prog [-m|--memory ] [-c|--cpu ] [-j|--jobs ]") - parser.add_option("-x", "--maxmemory", dest="maxmemory", default=0, type="int", help="Override max memory to use. Default is 0 i.e. use the available memory count with -m option.") - parser.add_option("-X", "--maxcpu", dest="maxcpu", default=0, type="int", help="Override max CPU % to use. Default is 0 i.e. use the available cpu count with -c option.") - parser.add_option("-m", "--memory", dest="memory", default=100, type="int", help="Percentage of total memory available for jobs") - parser.add_option("-c", "--cpu", dest="cpu", default=200, type="int", help="Percentage of total cpu available for jobs e.g. on a 8 core machine it can use 1600% cpu.") - parser.add_option("-j", "--jobs", dest="jobs", default="jobs.json", help="Json file path with groups/jobs to run") - parser.add_option("-o", "--order", dest="order", default="dynamic", help="Order the jobs based on selected criteria. Valid values are time|rss|cpu|dynamic. Default value dynamic") - parser.add_option("-t", "--type", dest="type", default="", help="Order type. Valid values are avg|max. Default value ''") - parser.add_option("-M", "--max-jobs", dest="maxJobs", default=-1, type="int", help="Maximum jobs to run in parallel. Default is -1 which means no limit. Special value 0 means maximum jobs=CPU counts") - parser.add_option("-s", "--simulate", dest="simulate", action="store_true", help="Do not run the jobs but simulate the timings.", default=False) - opts, args = parser.parse_args() - simulation_time = 0 - simulation = opts.simulate - if opts.memory>200: opts.memory=200 - if opts.cpu>300: opts.cpu=300 - if not opts.type in [ "", "avg", "max" ]: parser.error("Invalid -t|--type value '%s' provided." % opts.type) - if not opts.order in ["dynamic", "time", "rss", "cpu"]: parser.error("Invalid -o|--order value '%s' provided." % opts.order) - if opts.maxJobs<=0: opts.maxJobs=MachineCPUCount - resources={"total": - { - "cpu" : opts.maxcpu if (opts.maxcpu>0) else MachineCPUCount*opts.cpu, - "rss" : opts.maxmemory if (opts.maxmemory>0) else int(MachineMemoryGB*1024*1024*10.24*opts.memory) - }, - "total_groups" : 0, "total_jobs" : 0, "done_groups" : 0, "done_jobs" : 0 - } - print(MachineCPUCount,MachineMemoryGB,resources) - jobs=initJobs(json.load(open(opts.jobs)), resources, opts.type) - thrds={} - wait_for_jobs = False - has_jobs = True - while has_jobs: - while (wait_for_jobs or ((opts.maxJobs>0) and (len(thrds)>=opts.maxJobs))): - wait_for_jobs = False - checkJobs(thrds, resources) - has_jobs, job = getJob(jobs,resources, opts.order) - if job: startJob(job, resources, thrds) - else: wait_for_jobs = True - while len(thrds): checkJobs(thrds, resources) - os.system(jobs["final"]) + parser = OptionParser( + usage="%prog [-m|--memory ] [-c|--cpu ] [-j|--jobs ]" + ) + parser.add_option( + "-x", + "--maxmemory", + dest="maxmemory", + default=0, + type="int", + help="Override max memory to use. Default is 0 i.e. use the available memory count with -m option.", + ) + parser.add_option( + "-X", + "--maxcpu", + dest="maxcpu", + default=0, + type="int", + help="Override max CPU % to use. Default is 0 i.e. use the available cpu count with -c option.", + ) + parser.add_option( + "-m", + "--memory", + dest="memory", + default=100, + type="int", + help="Percentage of total memory available for jobs", + ) + parser.add_option( + "-c", + "--cpu", + dest="cpu", + default=200, + type="int", + help="Percentage of total cpu available for jobs e.g. on a 8 core machine it can use 1600% cpu.", + ) + parser.add_option( + "-j", + "--jobs", + dest="jobs", + default="jobs.json", + help="Json file path with groups/jobs to run", + ) + parser.add_option( + "-o", + "--order", + dest="order", + default="dynamic", + help="Order the jobs based on selected criteria. Valid values are time|rss|cpu|dynamic. Default value dynamic", + ) + parser.add_option( + "-t", + "--type", + dest="type", + default="", + help="Order type. Valid values are avg|max. Default value ''", + ) + parser.add_option( + "-M", + "--max-jobs", + dest="maxJobs", + default=-1, + type="int", + help="Maximum jobs to run in parallel. Default is -1 which means no limit. Special value 0 means maximum jobs=CPU counts", + ) + parser.add_option( + "-s", + "--simulate", + dest="simulate", + action="store_true", + help="Do not run the jobs but simulate the timings.", + default=False, + ) + opts, args = parser.parse_args() + simulation_time = 0 + simulation = opts.simulate + if opts.memory > 200: + opts.memory = 200 + if opts.cpu > 300: + opts.cpu = 300 + if not opts.type in ["", "avg", "max"]: + parser.error("Invalid -t|--type value '%s' provided." % opts.type) + if not opts.order in ["dynamic", "time", "rss", "cpu"]: + parser.error("Invalid -o|--order value '%s' provided." % opts.order) + if opts.maxJobs <= 0: + opts.maxJobs = MachineCPUCount + resources = { + "total": { + "cpu": opts.maxcpu if (opts.maxcpu > 0) else MachineCPUCount * opts.cpu, + "rss": opts.maxmemory + if (opts.maxmemory > 0) + else int(MachineMemoryGB * 1024 * 1024 * 10.24 * opts.memory), + }, + "total_groups": 0, + "total_jobs": 0, + "done_groups": 0, + "done_jobs": 0, + } + print(MachineCPUCount, MachineMemoryGB, resources) + jobs = initJobs(json.load(open(opts.jobs)), resources, opts.type) + thrds = {} + wait_for_jobs = False + has_jobs = True + while has_jobs: + while wait_for_jobs or ((opts.maxJobs > 0) and (len(thrds) >= opts.maxJobs)): + wait_for_jobs = False + checkJobs(thrds, resources) + has_jobs, job = getJob(jobs, resources, opts.order) + if job: + startJob(job, resources, thrds) + else: + wait_for_jobs = True + while len(thrds): + checkJobs(thrds, resources) + os.system(jobs["final"]) diff --git a/jobs/stats.py b/jobs/stats.py index e206b6f711a2..02ebf2e8ce38 100644 --- a/jobs/stats.py +++ b/jobs/stats.py @@ -9,28 +9,30 @@ from __future__ import print_function import sys from os.path import getmtime, join, dirname, abspath + sys.path.append(dirname(dirname(abspath(__file__)))) # in order to import cms-bot level modules from _py2with3compatibility import run_cmd -cache={} -e,o = run_cmd("ls -d wf*of*") +cache = {} +e, o = run_cmd("ls -d wf*of*") for d in o.split("\n"): - s,s1=d.split(".list-",1) - xt = int(getmtime(d)-getmtime(join(d,"jobs.json"))) - if not s in cache:cache[s]={} - if not xt in cache[s]: cache[s][xt]=[] - e, o = run_cmd("find %s -name 'workflow.log' -type f" % d) - tp=0 - tf=0 - for l in o.split("\n"): - e, o = run_cmd("grep 'tests passed' %s" % l) - x = o.replace(" failed","").split(" tests passed, ") - tp=tp+sum([int(i) for i in x[0].split(" ")]) - tf=tf+sum([int(i) for i in x[1].split(" ")]) - cache[s][xt].append({"order": s1, "passed": tp, "failed":tf}) + s, s1 = d.split(".list-", 1) + xt = int(getmtime(d) - getmtime(join(d, "jobs.json"))) + if not s in cache: + cache[s] = {} + if not xt in cache[s]: + cache[s][xt] = [] + e, o = run_cmd("find %s -name 'workflow.log' -type f" % d) + tp = 0 + tf = 0 + for l in o.split("\n"): + e, o = run_cmd("grep 'tests passed' %s" % l) + x = o.replace(" failed", "").split(" tests passed, ") + tp = tp + sum([int(i) for i in x[0].split(" ")]) + tf = tf + sum([int(i) for i in x[1].split(" ")]) + cache[s][xt].append({"order": s1, "passed": tp, "failed": tf}) for s in sorted(cache.keys()): - print(s) - for xt in sorted(cache[s].keys()): - for item in cache[s][xt]: - print(" ",xt," \t",item) - + print(s) + for xt in sorted(cache[s].keys()): + for item in cache[s][xt]: + print(" ", xt, " \t", item) diff --git a/jobs/workflow_final.py b/jobs/workflow_final.py index 1c4a177dc8d9..faea2b995e73 100755 --- a/jobs/workflow_final.py +++ b/jobs/workflow_final.py @@ -8,151 +8,177 @@ from __future__ import print_function import sys, json, glob, os, re + SCRIPT_DIR = os.path.dirname(os.path.abspath(sys.argv[0])) CMS_BOT_DIR = os.path.dirname(SCRIPT_DIR) -sys.path.insert(0,CMS_BOT_DIR) -sys.path.insert(0,SCRIPT_DIR) +sys.path.insert(0, CMS_BOT_DIR) +sys.path.insert(0, SCRIPT_DIR) from cmssw_known_errors import get_known_errors from logUpdater import LogUpdater from _py2with3compatibility import run_cmd + def update_cmdlog(workflow_dir, jobs): - if not jobs["commands"]: return - workflow_cmdlog=os.path.join(workflow_dir,"cmdLog") - if not os.path.exists(workflow_cmdlog): return - wfile=open(workflow_cmdlog,"a") - for job in jobs["commands"]: - if job["exit_code"]>=0: - wfile.write("\n# in: /some/build/directory going to execute ") - for cmd in job["command"].split(";"): - if cmd: wfile.write(cmd+"\n") - wfile.close() - return + if not jobs["commands"]: + return + workflow_cmdlog = os.path.join(workflow_dir, "cmdLog") + if not os.path.exists(workflow_cmdlog): + return + wfile = open(workflow_cmdlog, "a") + for job in jobs["commands"]: + if job["exit_code"] >= 0: + wfile.write("\n# in: /some/build/directory going to execute ") + for cmd in job["command"].split(";"): + if cmd: + wfile.write(cmd + "\n") + wfile.close() + return + def fix_lognames(workflow_dir): - workflow_id = os.path.basename(workflow_dir).split("_",1)[1] - for log in glob.glob(os.path.join(workflow_dir,"step*_*.log")): - logname = os.path.basename(log) - step = logname.split("_",1)[0] - deslog = step+".log" - if logname.endswith('_dasquery.log'): deslog = '%s_%s.log' % (step, workflow_id) - run_cmd("ln -s %s %s/%s" % (logname, workflow_dir, deslog)) + workflow_id = os.path.basename(workflow_dir).split("_", 1)[1] + for log in glob.glob(os.path.join(workflow_dir, "step*_*.log")): + logname = os.path.basename(log) + step = logname.split("_", 1)[0] + deslog = step + ".log" + if logname.endswith("_dasquery.log"): + deslog = "%s_%s.log" % (step, workflow_id) + run_cmd("ln -s %s %s/%s" % (logname, workflow_dir, deslog)) + def update_worklog(workflow_dir, jobs): - if not jobs["commands"]: return False - workflow_logfile=os.path.join(workflow_dir,"workflow.log") - if not os.path.exists(workflow_logfile): return False - workflow_time=0 - exit_codes="" - test_passed="" - test_failed="" - steps_res=[] - failed=False - step_num = 0 - for job in jobs["commands"]: - step_num+=1 - try: - m = re.match("^.*\s+step([1-9][0-9]*)\s+.*$",job['command']) - if m: - cmd_step = int(m.group(1)) - else: - m = re.match(".*\s*>\s*step([1-9][0-9]*)_[^\s]+\.log.*$",job['command']) - if m: - cmd_step = int(m.group(1)) - else: - cmd_step = int(job['command'].split(" step",1)[-1].strip().split(" ")[0]) - while cmd_step>step_num: - das_log = os.path.join(workflow_dir,"step%s_dasquery.log" % step_num) - step_num+=1 - if os.path.exists(das_log): - e, o = run_cmd("grep ' tests passed,' %s" % workflow_logfile) - if o=="": return False - ecodes = o.split() - if ecodes[step_num-2]=="0": - exit_codes+=" 1" - test_passed+=" 0" - test_failed+=" 1" - failed=True + if not jobs["commands"]: + return False + workflow_logfile = os.path.join(workflow_dir, "workflow.log") + if not os.path.exists(workflow_logfile): + return False + workflow_time = 0 + exit_codes = "" + test_passed = "" + test_failed = "" + steps_res = [] + failed = False + step_num = 0 + for job in jobs["commands"]: + step_num += 1 + try: + m = re.match("^.*\s+step([1-9][0-9]*)\s+.*$", job["command"]) + if m: + cmd_step = int(m.group(1)) + else: + m = re.match(".*\s*>\s*step([1-9][0-9]*)_[^\s]+\.log.*$", job["command"]) + if m: + cmd_step = int(m.group(1)) + else: + cmd_step = int(job["command"].split(" step", 1)[-1].strip().split(" ")[0]) + while cmd_step > step_num: + das_log = os.path.join(workflow_dir, "step%s_dasquery.log" % step_num) + step_num += 1 + if os.path.exists(das_log): + e, o = run_cmd("grep ' tests passed,' %s" % workflow_logfile) + if o == "": + return False + ecodes = o.split() + if ecodes[step_num - 2] == "0": + exit_codes += " 1" + test_passed += " 0" + test_failed += " 1" + failed = True + steps_res.append("FAILED") + continue + exit_codes += " 0" + test_passed += " 1" + test_failed += " 0" + steps_res.append("PASSED") + except Exception as e: + print("ERROR: Unable to find step number:", job["command"]) + pass + if job["exit_code"] == -1: + failed = True + if job["exit_code"] > 0: + exit_codes += " " + str(job["exit_code"]) + test_passed += " 0" + test_failed += " 1" + failed = True steps_res.append("FAILED") - continue - exit_codes+=" 0" - test_passed+=" 1" - test_failed+=" 0" - steps_res.append("PASSED") - except Exception as e: - print("ERROR: Unable to find step number:", job['command']) - pass - if job["exit_code"]==-1: failed=True - if job["exit_code"]>0: - exit_codes+=" "+str(job["exit_code"]) - test_passed+=" 0" - test_failed+=" 1" - failed=True - steps_res.append("FAILED") - else: - exit_codes+=" 0" - test_failed+=" 0" - if failed: test_passed+=" 0" - else: test_passed+=" 1" - steps_res.append("NORUN" if failed else "PASSED") - step_str = "" - for step, res in enumerate(steps_res): step_str = "%s Step%s-%s" % (step_str, step, res) - e, o = run_cmd("grep ' exit: ' %s | sed 's|exit:.*$|exit: %s|'" % (workflow_logfile, exit_codes.strip())) - o = re.sub("\s+Step0-.+\s+-\s+time\s+",step_str+" - time ",o) - wfile = open(workflow_logfile,"w") - wfile.write(o+"\n") - wfile.write("%s tests passed, %s failed\n" % (test_passed.strip(), test_failed.strip())) - wfile.close() - return True + else: + exit_codes += " 0" + test_failed += " 0" + if failed: + test_passed += " 0" + else: + test_passed += " 1" + steps_res.append("NORUN" if failed else "PASSED") + step_str = "" + for step, res in enumerate(steps_res): + step_str = "%s Step%s-%s" % (step_str, step, res) + e, o = run_cmd( + "grep ' exit: ' %s | sed 's|exit:.*$|exit: %s|'" % (workflow_logfile, exit_codes.strip()) + ) + o = re.sub("\s+Step0-.+\s+-\s+time\s+", step_str + " - time ", o) + wfile = open(workflow_logfile, "w") + wfile.write(o + "\n") + wfile.write("%s tests passed, %s failed\n" % (test_passed.strip(), test_failed.strip())) + wfile.close() + return True + def update_timelog(workflow_dir, jobs): - workflow_time=os.path.join(workflow_dir,"time.log") - wf_time=5 - for job in jobs["commands"]: - if job["state"]=="Done": wf_time+=job["exec_time"] - wfile = open(workflow_time,"w") - wfile.write("%s\n" % wf_time) - wfile.close() + workflow_time = os.path.join(workflow_dir, "time.log") + wf_time = 5 + for job in jobs["commands"]: + if job["state"] == "Done": + wf_time += job["exec_time"] + wfile = open(workflow_time, "w") + wfile.write("%s\n" % wf_time) + wfile.close() + + +def update_hostname(workflow_dir): + run_cmd("hostname > %s/hostname" % workflow_dir) -def update_hostname(workflow_dir): run_cmd("hostname > %s/hostname" % workflow_dir) def update_known_error(worflow, workflow_dir): - known_errors = get_known_errors(os.environ["CMSSW_VERSION"], os.environ["SCRAM_ARCH"], "relvals") - if worflow in known_errors: - json.dump(known_errors[workflow], open("%s/known_error.json" % workflow_dir,"w")) - return - -def upload_logs(workflow, workflow_dir,exit_code): - files_to_keep = [ ".txt", ".xml", ".log", ".py", ".json","/cmdLog", "/hostname",".done" ] - basedir = os.path.dirname(workflow_dir) - for wf_file in glob.glob("%s/*" % workflow_dir): - found=False - for ext in files_to_keep: - if wf_file.endswith(ext): - found=True - break - if not found: - print("Removing ",wf_file) - run_cmd("rm -rf %s" % wf_file) - logger=LogUpdater(dirIn=os.environ["CMSSW_BASE"]) - logger.updateRelValMatrixPartialLogs(basedir, os.path.basename(workflow_dir)) + known_errors = get_known_errors( + os.environ["CMSSW_VERSION"], os.environ["SCRAM_ARCH"], "relvals" + ) + if worflow in known_errors: + json.dump(known_errors[workflow], open("%s/known_error.json" % workflow_dir, "w")) + return + + +def upload_logs(workflow, workflow_dir, exit_code): + files_to_keep = [".txt", ".xml", ".log", ".py", ".json", "/cmdLog", "/hostname", ".done"] + basedir = os.path.dirname(workflow_dir) + for wf_file in glob.glob("%s/*" % workflow_dir): + found = False + for ext in files_to_keep: + if wf_file.endswith(ext): + found = True + break + if not found: + print("Removing ", wf_file) + run_cmd("rm -rf %s" % wf_file) + logger = LogUpdater(dirIn=os.environ["CMSSW_BASE"]) + logger.updateRelValMatrixPartialLogs(basedir, os.path.basename(workflow_dir)) + if __name__ == "__main__": - jobs=json.load(open(sys.argv[1])) - exit_code = 0 - for cmd in jobs["commands"]: - if cmd["exit_code"]>0: - exit_code=cmd["exit_code"] - break - workflow = jobs["name"] - workflow_dir=os.path.abspath(glob.glob("%s_*" % workflow)[0]) - run_cmd("mv %s %s/job.json" % (sys.argv[1], workflow_dir)) - fix_lognames(workflow_dir) - if update_worklog(workflow_dir, jobs): - update_cmdlog(workflow_dir, jobs) - update_timelog(workflow_dir, jobs) - update_hostname(workflow_dir) - update_known_error(workflow, workflow_dir) - if not 'CMSSW_DRY_RUN' in os.environ: - upload_logs(workflow, workflow_dir, exit_code) - run_cmd("touch %s/workflow_upload_done" % workflow_dir) + jobs = json.load(open(sys.argv[1])) + exit_code = 0 + for cmd in jobs["commands"]: + if cmd["exit_code"] > 0: + exit_code = cmd["exit_code"] + break + workflow = jobs["name"] + workflow_dir = os.path.abspath(glob.glob("%s_*" % workflow)[0]) + run_cmd("mv %s %s/job.json" % (sys.argv[1], workflow_dir)) + fix_lognames(workflow_dir) + if update_worklog(workflow_dir, jobs): + update_cmdlog(workflow_dir, jobs) + update_timelog(workflow_dir, jobs) + update_hostname(workflow_dir) + update_known_error(workflow, workflow_dir) + if not "CMSSW_DRY_RUN" in os.environ: + upload_logs(workflow, workflow_dir, exit_code) + run_cmd("touch %s/workflow_upload_done" % workflow_dir) diff --git a/lizard-processing/src/lizard_to_html.py b/lizard-processing/src/lizard_to_html.py index 0ea68cbc2f60..c17db03a2e24 100755 --- a/lizard-processing/src/lizard_to_html.py +++ b/lizard-processing/src/lizard_to_html.py @@ -5,7 +5,7 @@ import os # constants -html_start = ''' +html_start = """ @@ -25,9 +25,9 @@

{title}


-''' +""" -html_end = ''' +html_end = """
-''' +""" g_total_col_nr = 0 # global value -g_link_root = 'https://test/adress.com' +g_link_root = "https://test/adress.com" g_table_data = [] a_href = '{text}' -table = '''\n\t{0}\n
\n''' -table_start = '''\n''' -table_end = '''\n
\n''' +table = """\n\t{0}\n
\n""" +table_start = """\n""" +table_end = """\n
\n""" -tr = '\n{0}\n\n' # row -th = '{0}' # label column -td = '{0}' # column -h1_bold = '

{0}\n

\n' -h2 = '

{0}\n

\n' +tr = "\n{0}\n\n" # row +th = "{0}" # label column +td = "{0}" # column +h1_bold = "

{0}\n

\n" +h2 = "

{0}\n

\n" # regex -regex_dashes = '^(-|=)*$' -regex_td = '^[ ]*[\d *]+.*\..+$' +regex_dashes = "^(-|=)*$" +regex_td = "^[ ]*[\d *]+.*\..+$" # regex_th = '^[^\d\W]+$' -regex_th = '.*(NLOC)' -regex_th_total = '^Total nloc' -regex_H1_warnings = ' *^!+.*!+ *$' -regex_H1_no_warnings = '^No thresholds exceeded \(' -regex_H1_files = '^\d+ file analyzed' +regex_th = ".*(NLOC)" +regex_th_total = "^Total nloc" +regex_H1_warnings = " *^!+.*!+ *$" +regex_H1_no_warnings = "^No thresholds exceeded \(" +regex_H1_files = "^\d+ file analyzed" regex_split = "[ ]{2,}|[ ]*$]" regex_split_td = "[ ]{1,}|[ ]*$]" regex_line_to_url = "[a-zA-Z]" @@ -84,16 +84,14 @@ def get_args(): """ # Assign description to the help doc - parser = argparse.ArgumentParser( - description='Script converts lizard .txt output to .html') + parser = argparse.ArgumentParser(description="Script converts lizard .txt output to .html") # Add arguments + parser.add_argument("-s", "--source", type=str, help="Source file", required=True) + parser.add_argument("-d", "--dir", type=str, help="Local output directory", required=True) parser.add_argument( - '-s', '--source', type=str, help='Source file', required=True) - parser.add_argument( - '-d', '--dir', type=str, help='Local output directory', required=True) - parser.add_argument( - '-l', '--link_root', type=str, help="Project's repository at Github", required=True) + "-l", "--link_root", type=str, help="Project's repository at Github", required=True + ) # Array for all arguments passed to script args = parser.parse_args() @@ -113,13 +111,11 @@ def text_with_href(url_base, line): line_numbers_group = re.search(regex_has_line_numbers, line) if bool(line_numbers_group): lines_string = line_numbers_group.group(0) - lines_string = lines_string.replace('@', '') - lines = lines_string.split('-') + lines_string = lines_string.replace("@", "") + lines = lines_string.split("-") line_split = re.split(regex_has_line_numbers, line) - url = url_base + line_split[1] \ - + "#" \ - + "L{0}-L{1}".format(lines[0], lines[1]) + url = url_base + line_split[1] + "#" + "L{0}-L{1}".format(lines[0], lines[1]) return a_href.format(url=url, text=line) else: url = url_base + line @@ -134,11 +130,12 @@ def parse(f_out, line): if bool(re.search(regex_dashes, line)): return False - elif bool(re.search(regex_H1_warnings, line) - or (re.search(regex_H1_no_warnings, line)) - or (re.search(regex_th_total, line)) - or re.search(regex_H1_files, line) - ): + elif bool( + re.search(regex_H1_warnings, line) + or (re.search(regex_H1_no_warnings, line)) + or (re.search(regex_th_total, line)) + or re.search(regex_H1_files, line) + ): return True elif bool(re.search(regex_th, line)): @@ -150,9 +147,7 @@ def parse(f_out, line): row_dataset = [] for td_val in table_row_values[:-1]: row_dataset.append(td_val) - row_dataset.append( - text_with_href(g_link_root, table_row_values[-1]) - ) + row_dataset.append(text_with_href(g_link_root, table_row_values[-1])) g_table_data.append(row_dataset) return False @@ -162,12 +157,10 @@ def parse(f_out, line): def write_table_th(f_out, line): global g_total_col_nr table_header_values = re.split(regex_split, line.strip()) - generated_row = '' + generated_row = "" for th_val in table_header_values: generated_row += th.format(th_val) - f_out.write( - '' + tr.format(generated_row) + '\n' - ) + f_out.write("" + tr.format(generated_row) + "\n") g_total_col_nr = len(table_header_values) - 1 @@ -176,13 +169,12 @@ def main(source_f_path, output_d, link_root): global g_link_root, g_table_data g_link_root = link_root - with open(source_f_path, 'r') as source_f: - + with open(source_f_path, "r") as source_f: do_split = False # --- { all_functions.html } - html_0 = open(os.path.join(output_d, 'all_functions.html'), 'w') - html_0.write(html_start.format(title='Statistics of all functions')) + html_0 = open(os.path.join(output_d, "all_functions.html"), "w") + html_0.write(html_start.format(title="Statistics of all functions")) html_0.write(table_start) while do_split is False: @@ -191,15 +183,15 @@ def main(source_f_path, output_d, link_root): if not line: break html_0.write(table_end) - html_0.write(html_end.format(data=g_table_data, comment_out_scrollX='')) + html_0.write(html_end.format(data=g_table_data, comment_out_scrollX="")) html_0.close() g_table_data = [] # --- {END all_functions.html } # --- { file_statistics.html } - html_0 = open(os.path.join(output_d, 'file_statistics.html'), 'w') - html_0.write(html_start.format(title='Files statistics')) - html_0.write(h2.format(line, klass='')) + html_0 = open(os.path.join(output_d, "file_statistics.html"), "w") + html_0.write(html_start.format(title="Files statistics")) + html_0.write(h2.format(line, klass="")) html_0.write(table_start) do_split = False while do_split is False: @@ -208,16 +200,16 @@ def main(source_f_path, output_d, link_root): if not line: break html_0.write(table_end) - html_0.write(html_end.format(data=g_table_data, comment_out_scrollX='')) + html_0.write(html_end.format(data=g_table_data, comment_out_scrollX="")) html_0.close() g_table_data = [] # --- {END file_statistics.html } # --- { warnings.html } - html_0 = open(os.path.join(output_d, 'warnings.html'), 'w') - html_0.write(html_start.format(title='Warnings')) + html_0 = open(os.path.join(output_d, "warnings.html"), "w") + html_0.write(html_start.format(title="Warnings")) - h1_class = '' + h1_class = "" if bool(re.search(regex_H1_warnings, line)): h1_class = 'class="alert alert-danger"' @@ -232,14 +224,14 @@ def main(source_f_path, output_d, link_root): break html_0.write(table_end) - html_0.write(html_end.format(data=g_table_data, comment_out_scrollX='')) + html_0.write(html_end.format(data=g_table_data, comment_out_scrollX="")) html_0.close() g_table_data = [] # --- {END warnings.html } # --- { total.html } - html_0 = open(os.path.join(output_d, 'total.html'), 'w') - html_0.write(html_start.format(title='Total scan statistics')) + html_0 = open(os.path.join(output_d, "total.html"), "w") + html_0.write(html_start.format(title="Total scan statistics")) html_0.write(table_start) write_table_th(html_0, line) do_split = False @@ -249,11 +241,11 @@ def main(source_f_path, output_d, link_root): if not line: break html_0.write(table_end) - html_0.write(html_end.format(data=g_table_data, comment_out_scrollX='//')) + html_0.write(html_end.format(data=g_table_data, comment_out_scrollX="//")) html_0.close() g_table_data = [] # --- {END total.html } -if __name__ == '__main__': +if __name__ == "__main__": main(*get_args()) diff --git a/lizard-processing/test/test_lizard_to_html.py b/lizard-processing/test/test_lizard_to_html.py index 0122d0f54d39..1901c1db7806 100755 --- a/lizard-processing/test/test_lizard_to_html.py +++ b/lizard-processing/test/test_lizard_to_html.py @@ -9,21 +9,30 @@ lines_th = [ "NLOC CCN token PARAM length location ", "NLOC Avg.NLOC AvgCCN Avg.token function_cnt file", - "Total nloc Avg.NLOC AvgCCN Avg.token Fun Cnt Warning cnt Fun Rt nloc Rt" + "Total nloc Avg.NLOC AvgCCN Avg.token Fun Cnt Warning cnt Fun Rt nloc Rt", ] -line_td = "6 3 28 0 6 AlignableDetOrUnitPtr::operator Alignable " \ - "*@20-25@cms-sw-cmssw-630acaf/Alignment/CommonAlignment/src/AlignableDetOrUnitPtr.cc " +line_td = ( + "6 3 28 0 6 AlignableDetOrUnitPtr::operator Alignable " + "*@20-25@cms-sw-cmssw-630acaf/Alignment/CommonAlignment/src/AlignableDetOrUnitPtr.cc " +) -line_warning = '!!!! Warnings (cyclomatic_complexity > 5 or length > 1000 or parameter_count > 100) !!!!' -line_no_warning = 'No thresholds exceeded (cyclomatic_complexity > 15 or length > 1000 or parameter_count > 100)' -line_files = '21 file analyzed.' +line_warning = ( + "!!!! Warnings (cyclomatic_complexity > 5 or length > 1000 or parameter_count > 100) !!!!" +) +line_no_warning = ( + "No thresholds exceeded (cyclomatic_complexity > 15 or length > 1000 or parameter_count > 100)" +) +line_files = "21 file analyzed." class TestSequenceFunctions(unittest.TestCase): def test_main(self): - main(os.path.join(os.path.dirname(__file__), "../", './test-data/lizard-test-output.txt'), '/tmp', - 'https://github.com/cms-sw/cmssw/blob/master/') + main( + os.path.join(os.path.dirname(__file__), "../", "./test-data/lizard-test-output.txt"), + "/tmp", + "https://github.com/cms-sw/cmssw/blob/master/", + ) def test_reg_th(self): for line in lines_th: @@ -59,5 +68,6 @@ def test_split_1(self): self.assertEqual(len(re.split(regex_split, lines_th[1].strip())), 6) self.assertEqual(len(re.split(regex_split, lines_th[2].strip())), 8) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/logRootQA.py b/logRootQA.py index a3c105d54225..e1df780716ce 100644 --- a/logRootQA.py +++ b/logRootQA.py @@ -9,91 +9,109 @@ import subprocess as sub Log_Lines_Filter = [ - ('This TensorFlow binary is optimized with'), - ('[PostMaster', '[Error'), - ('Initiating request to open file', 'root://'), - ('Successfully opened file', 'root://'), - ('Closed file', 'root://') + ("This TensorFlow binary is optimized with"), + ("[PostMaster", "[Error"), + ("Initiating request to open file", "root://"), + ("Successfully opened file", "root://"), + ("Closed file", "root://"), ] + def openfile(filename): - if sys.version_info[0] == 2: - return open(filename) - return open(filename, encoding="utf8", errors='ignore') + if sys.version_info[0] == 2: + return open(filename) + return open(filename, encoding="utf8", errors="ignore") + + +def getFiles(d, pattern): + return [ + os.path.join(dp, f) + for dp, dn, filenames in os.walk(d) + for f in filenames + if fnmatch(f, pattern) + ] + -def getFiles(d,pattern): - return [os.path.join(dp, f) for dp, dn, filenames in os.walk(d) for f in filenames if fnmatch(f, pattern)] # return [ f for f in listdir(d) if isfile(join(d,f)) ] -def getCommonFiles(d1,d2,pattern): - l1=getFiles(d1,pattern) - l2=getFiles(d2,pattern) - common=[] + +def getCommonFiles(d1, d2, pattern): + l1 = getFiles(d1, pattern) + l2 = getFiles(d2, pattern) + common = [] for l in l1: - lT=l[len(d1):] - if 'runall' in lT or 'dasquery' in lT: continue - if d2+lT in l2: + lT = l[len(d1) :] + if "runall" in lT or "dasquery" in lT: + continue + if d2 + lT in l2: common.append(lT) return common + def getWorkflow(f): m = re.search("/\d+\.\d+_", f) - if not m: return "(none)" - return m.group().replace("/","").replace("_", "") + if not m: + return "(none)" + return m.group().replace("/", "").replace("_", "") -def checkLines(l1,l2): +def checkLines(l1, l2): filt1 = filteredLines(l1) filt2 = filteredLines(l2) lines = len(filt2) - len(filt1) - if lines>0: - print("You added "+str(lines)+" to "+l2) - if lines<0: - print("You removed "+str(-1*lines)+" from "+l2) - + if lines > 0: + print("You added " + str(lines) + " to " + l2) + if lines < 0: + print("You removed " + str(-1 * lines) + " from " + l2) + return (lines, filt1, filt2) + def filteredLines(f): - retval={} + retval = {} for l in openfile(f): - sl=l.strip() - skip=False + sl = l.strip() + skip = False for data in Log_Lines_Filter: - skip = True - for s in data: - if not s in sl: - skip = False - break - if not skip: continue - break - if skip: continue - if 'P Y T H H III A A' in l:continue + skip = True + for s in data: + if not s in sl: + skip = False + break + if not skip: + continue + break + if skip: + continue + if "P Y T H H III A A" in l: + continue # look for and remove timestamps - if '-' in l and ':' in l: - sp=l.strip().split() - - ds=[] - for i in range(0,len(sp)-1): - if sp[i].count('-')==2 and sp[i+1].count(':')==2 and '-20' in sp[i]: - ds.append(sp[i]) #its a date - ds.append(sp[i+1]) #its a date - if len(ds)!=0: - sp2=l.strip().split(' ') - sp3=[] - for i in range(0,len(sp2)): + if "-" in l and ":" in l: + sp = l.strip().split() + + ds = [] + for i in range(0, len(sp) - 1): + if sp[i].count("-") == 2 and sp[i + 1].count(":") == 2 and "-20" in sp[i]: + ds.append(sp[i]) # its a date + ds.append(sp[i + 1]) # its a date + if len(ds) != 0: + sp2 = l.strip().split(" ") + sp3 = [] + for i in range(0, len(sp2)): if sp2[i] not in ds: sp3.append(sp2[i]) - sl=' '.join(sp3) - retval[sl]=1 + sl = " ".join(sp3) + retval[sl] = 1 return retval -def getRelevantDiff(filt1, filt2, l1, l2 ,maxInFile=20): - nPrintTot=0 - keys1=filt1.keys() - keys2=filt2.keys() - newIn1=[] - newIn2=[] +def getRelevantDiff(filt1, filt2, l1, l2, maxInFile=20): + nPrintTot = 0 + + keys1 = filt1.keys() + keys2 = filt2.keys() + newIn1 = [] + newIn2 = [] for k in keys1: if k not in filt2: newIn1.append(k) @@ -101,95 +119,116 @@ def getRelevantDiff(filt1, filt2, l1, l2 ,maxInFile=20): if k not in filt1: newIn2.append(k) - if len(newIn1)>0 or len(newIn2)>0: - print('') - print(len(newIn1),'Lines only in',l1) - nPrint=0 - for l in newIn1: - nPrint=nPrint+1 - if nPrint>maxInFile: break - print(' ',l) - nPrintTot=nPrint - print(len(newIn2),'Lines only in',l2) - nPrint=0 - for l in newIn2: - nPrint=nPrint+1 - if nPrint>maxInFile: break - print(' ',l) - nPrintTot=nPrintTot+nPrint + if len(newIn1) > 0 or len(newIn2) > 0: + print("") + print(len(newIn1), "Lines only in", l1) + nPrint = 0 + for l in newIn1: + nPrint = nPrint + 1 + if nPrint > maxInFile: + break + print(" ", l) + nPrintTot = nPrint + print(len(newIn2), "Lines only in", l2) + nPrint = 0 + for l in newIn2: + nPrint = nPrint + 1 + if nPrint > maxInFile: + break + print(" ", l) + nPrintTot = nPrintTot + nPrint return nPrintTot - def runCommand(c): - p=sub.Popen(c,stdout=sub.PIPE,stderr=sub.PIPE,universal_newlines=True) - output=p.communicate() + p = sub.Popen(c, stdout=sub.PIPE, stderr=sub.PIPE, universal_newlines=True) + output = p.communicate() return output -def checkEventContent(r1,r2): - retVal=True - - output1=runCommand(['ls','-l',r1]) - output2=runCommand(['ls','-l',r2]) - s1=output1[0].split()[4] - s2=output2[0].split()[4] - if abs(float(s2)-float(s1))>0.1*float(s1): - print("Big output file size change? in ",r1,s1,s2) - retVal=False - - cmd1 = ['edmEventSize','-v',r1] - cmd2 = ['edmEventSize','-v',r2] - if os.path.exists(r1+'.edmEventSize'): - cmd1 = ['cat',r1+'.edmEventSize'] - if os.path.exists(r2+'.edmEventSize'): - cmd2 = ['cat',r2+'.edmEventSize'] - output1=runCommand(cmd1) - output2=runCommand(cmd2) - - if 'contains no' in output1[1] and 'contains no' in output2[1]: - w=1 + +def checkEventContent(r1, r2): + retVal = True + + output1 = runCommand(["ls", "-l", r1]) + output2 = runCommand(["ls", "-l", r2]) + s1 = output1[0].split()[4] + s2 = output2[0].split()[4] + if abs(float(s2) - float(s1)) > 0.1 * float(s1): + print("Big output file size change? in ", r1, s1, s2) + retVal = False + + cmd1 = ["edmEventSize", "-v", r1] + cmd2 = ["edmEventSize", "-v", r2] + if os.path.exists(r1 + ".edmEventSize"): + cmd1 = ["cat", r1 + ".edmEventSize"] + if os.path.exists(r2 + ".edmEventSize"): + cmd2 = ["cat", r2 + ".edmEventSize"] + output1 = runCommand(cmd1) + output2 = runCommand(cmd2) + + if "contains no" in output1[1] and "contains no" in output2[1]: + w = 1 else: - sp=output1[0].split('\n') - p1=[] + sp = output1[0].split("\n") + p1 = [] for p in sp: - if len(p.split())>0: + if len(p.split()) > 0: p1.append(p.split()[0]) - sp=output2[0].split('\n') - p2=[] + sp = output2[0].split("\n") + p2 = [] for p in sp: - if len(p.split())>0: + if len(p.split()) > 0: p2.append(p.split()[0]) - common=[] + common = [] for p in p1: - if p in p2: common.append(p) - if len(common)!=len(p1) or len(common)!=len(p2): - print('Change in products found in',r1) + if p in p2: + common.append(p) + if len(common) != len(p1) or len(common) != len(p2): + print("Change in products found in", r1) for p in p1: - if p not in common: print(' Product missing '+p) + if p not in common: + print(" Product missing " + p) for p in p2: - if p not in common: print(' Product added '+p) - retVal=False + if p not in common: + print(" Product added " + p) + retVal = False return retVal -def checkDQMSize(r1,r2,diff, wfs): - haveDQMChecker=False + +def checkDQMSize(r1, r2, diff, wfs): + haveDQMChecker = False for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') -# print(path) - exe_file = os.path.join(path, 'dqmMemoryStats.py') + # print(path) + exe_file = os.path.join(path, "dqmMemoryStats.py") if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK): - haveDQMChecker=True + haveDQMChecker = True break - if not haveDQMChecker: - print('Missing dqmMemoryStats in this release') + if not haveDQMChecker: + print("Missing dqmMemoryStats in this release") return -1 - output,error=runCommand(['dqmMemoryStats.py','-x','-u','KiB','-p3','-c0','-d2','--summary','-r',r1,'-i',r2]) + output, error = runCommand( + [ + "dqmMemoryStats.py", + "-x", + "-u", + "KiB", + "-p3", + "-c0", + "-d2", + "--summary", + "-r", + r1, + "-i", + r2, + ] + ) lines = output.splitlines() total = re.search("-?\d+\.\d+", lines[-1]) if not total: - print('Weird output',r1) + print("Weird output", r1) print(output) return -2 kib = float(total.group()) @@ -197,80 +236,90 @@ def checkDQMSize(r1,r2,diff, wfs): print(lines, diff) maxdiff = 10 for line in lines: - if re.match("\s*-?\d+.*", line): # normal output line + if re.match("\s*-?\d+.*", line): # normal output line if line not in diff: if len(diff) == maxdiff: - diff.append(" ... "); + diff.append(" ... ") wfs.append(getWorkflow(r1)) - if len(diff) >= maxdiff: continue # limit amount of output + if len(diff) >= maxdiff: + continue # limit amount of output diff.append(line) wfs.append(getWorkflow(r1)) else: idx = diff.index(line) if not wfs[idx].endswith(",..."): wfs[idx] += ",..." - + return kib def summaryJR(jrDir): - nDiff=0 + nDiff = 0 print(jrDir) - dirs=[] - #find directories at top level + dirs = [] + # find directories at top level for root, dirs, _ in os.walk(jrDir): break - nAll=0 - nOK=0 + nAll = 0 + nOK = 0 for d, subdir, files in os.walk(jrDir): - if not d.split('/')[-1].startswith('all_'): continue - if not '_' in d: continue - relative_d = d.replace(root,'') - diffs=[file for file in files if file.endswith('.png')] - if len(diffs)>0: - print('JR results differ',len(diffs),relative_d) - nDiff=nDiff+len(diffs) - logs=[file for file in files if file.endswith('.log')] - nAll+=len(logs) + if not d.split("/")[-1].startswith("all_"): + continue + if not "_" in d: + continue + relative_d = d.replace(root, "") + diffs = [file for file in files if file.endswith(".png")] + if len(diffs) > 0: + print("JR results differ", len(diffs), relative_d) + nDiff = nDiff + len(diffs) + logs = [file for file in files if file.endswith(".log")] + nAll += len(logs) for log in logs: - log = os.path.join(d,log) - output=runCommand(['grep','DONE calling validate',log]) - if len(output[0])>0: - nOK+=1 + log = os.path.join(d, log) + output = runCommand(["grep", "DONE calling validate", log]) + if len(output[0]) > 0: + nOK += 1 else: - print('JR results failed',relative_d) - return nDiff,nAll,nOK + print("JR results failed", relative_d) + return nDiff, nAll, nOK + def parseNum(s): - return int(s[1:-1].split('/')[0]) - + return int(s[1:-1].split("/")[0]) + def summaryComp(compDir): print(compDir) - files=[] + files = [] for root, dirs, files in os.walk(compDir): break - comps=[] + comps = [] for f in files: - if 'log' in f[-3:]: - comps.append(root+'/'+f) + if "log" in f[-3:]: + comps.append(root + "/" + f) - results=[0,0,0,0,0,0,0] + results = [0, 0, 0, 0, 0, 0, 0] for comp in comps: - loc=[0,0,0,0,0,0] + loc = [0, 0, 0, 0, 0, 0] for l in open(comp): - if '- summary of' in l: loc[0]=int(l.split()[3]) - if 'o Failiures:' in l: loc[1]=parseNum(l.split()[3]) - if 'o Nulls:' in l: loc[2]=parseNum(l.split()[3]) - if 'o Successes:' in l: loc[3]=parseNum(l.split()[3]) - if 'o Skipped:' in l: loc[4]=parseNum(l.split()[3]) - if 'o Missing objects:' in l: loc[5]=int(l.split()[3]) - print('Histogram comparison details',comp,loc) - for i in range(0,5): - results[i]=results[i]+loc[i] - results[6]=results[6]+1 + if "- summary of" in l: + loc[0] = int(l.split()[3]) + if "o Failiures:" in l: + loc[1] = parseNum(l.split()[3]) + if "o Nulls:" in l: + loc[2] = parseNum(l.split()[3]) + if "o Successes:" in l: + loc[3] = parseNum(l.split()[3]) + if "o Skipped:" in l: + loc[4] = parseNum(l.split()[3]) + if "o Missing objects:" in l: + loc[5] = int(l.split()[3]) + print("Histogram comparison details", comp, loc) + for i in range(0, 5): + results[i] = results[i] + loc[i] + results[6] = results[6] + 1 return results @@ -278,137 +327,172 @@ def summaryComp(compDir): # # # -qaIssues=False +qaIssues = False # one way to set up for local tests.. -#login to ssh cmssdt server (see CMSSDT_SERVER in ./cmssdt.sh for server name) -#copy out data from a recent pull request comparison -#cd /data/sdt/SDT/jenkins-artifacts/ib-baseline-tests/CMSSW_10_0_X_2017-11-05-2300/slc6_amd64_gcc630/-GenuineIntel -#scp -r matrix-results/ dlange@cmsdev01:/build/dlange/171103/t1/ -#cd ../../../../pull-request-integration/PR-21181/24200/ -#scp -r runTheMatrix-results/ dlange@cmsdev01:/build/dlange/171103/t1/. -#cd ../../../../baseLineComparions/CMSSW_10_0_X_2017-11-05-2300+21181/ -#scp -r 23485 dlange@cmsdev01:/build/dlange/171103/t1/. - -#https://cmssdt.cern.ch/SDT/jenkins-artifacts/baseLineComparisons/CMSSW_9_0_X_2017-03-22-1100+18042/18957/validateJR/ -baseDir='../t1/runTheMatrix-results' -testDir='../t1/matrix-results' -jrDir='../t1/23485/validateJR' -compDir='../t1/23485' - -run="all" -if len(sys.argv)==6: - run = sys.argv[5] -if len(sys.argv)>=5: - baseDir=sys.argv[1].rstrip("/") - testDir=sys.argv[2].rstrip("/") - jrDir=sys.argv[3].rstrip("/") - compDir=sys.argv[4].rstrip("/") +# login to ssh cmssdt server (see CMSSDT_SERVER in ./cmssdt.sh for server name) +# copy out data from a recent pull request comparison +# cd /data/sdt/SDT/jenkins-artifacts/ib-baseline-tests/CMSSW_10_0_X_2017-11-05-2300/slc6_amd64_gcc630/-GenuineIntel +# scp -r matrix-results/ dlange@cmsdev01:/build/dlange/171103/t1/ +# cd ../../../../pull-request-integration/PR-21181/24200/ +# scp -r runTheMatrix-results/ dlange@cmsdev01:/build/dlange/171103/t1/. +# cd ../../../../baseLineComparions/CMSSW_10_0_X_2017-11-05-2300+21181/ +# scp -r 23485 dlange@cmsdev01:/build/dlange/171103/t1/. + +# https://cmssdt.cern.ch/SDT/jenkins-artifacts/baseLineComparisons/CMSSW_9_0_X_2017-03-22-1100+18042/18957/validateJR/ +baseDir = "../t1/runTheMatrix-results" +testDir = "../t1/matrix-results" +jrDir = "../t1/23485/validateJR" +compDir = "../t1/23485" + +run = "all" +if len(sys.argv) == 6: + run = sys.argv[5] +if len(sys.argv) >= 5: + baseDir = sys.argv[1].rstrip("/") + testDir = sys.argv[2].rstrip("/") + jrDir = sys.argv[3].rstrip("/") + compDir = sys.argv[4].rstrip("/") #### check the printouts -lines=0 -lChanges=False -nLog=0 -nPrintTot=0 -stopPrint=0 -sameEvts=True -nRoot=0 -newDQM=0 -nDQM=0 -diff,wfs=[],[] -if run in ['all', 'events']: - if not os.path.exists('comparison-events.json'): - for l in getCommonFiles(baseDir,testDir,'step*.log'): - lCount, filt1, filt2 = checkLines(baseDir+l,testDir+l) - lines=lines+lCount - if nPrintTot<1000: - nprint=getRelevantDiff(filt1, filt2, baseDir+l, testDir+l) - nPrintTot=nPrintTot+nprint - else: - if stopPrint==0: - print('Skipping further diff comparisons. Too many diffs') - stopPrint=1 - nLog=nLog+1 - if lines>0: - lChanges=True - #### compare edmEventSize on each to look for new missing candidates - for r in getCommonFiles(baseDir,testDir,'step*.root'): - if 'inDQM.root' not in r: - checkResult=checkEventContent(baseDir+r,testDir+r) - sameEvts=sameEvts and checkResult - nRoot=nRoot+1 - for r in getCommonFiles(baseDir,testDir,'DQM*.root'): - t=checkDQMSize(baseDir+r,testDir+r,diff,wfs) - print(r,t) - newDQM=newDQM+t - nDQM=nDQM+1 - with open('comparison-events.json', 'w') as f: - json.dump([lines, lChanges, nLog, nPrintTot, stopPrint, sameEvts, nRoot, newDQM, nDQM, diff, wfs], f) - else: - with open('comparison-events.json') as f: - (lines, lChanges, nLog, nPrintTot, stopPrint, sameEvts, nRoot, newDQM, nDQM, diff, wfs) = json.load(f) - - print("Logs:", lines, lChanges, nLog, nPrintTot, stopPrint) - print("Events:", sameEvts, nRoot, newDQM, nDQM, diff, wfs) - if lines>0 : - print("SUMMARY You potentially added "+str(lines)+" lines to the logs") - elif lines<0 : - print("SUMMARY You potentially removed "+str(-1*lines)+" lines from the logs") - else: - print("SUMMARY No significant changes to the logs found") - - if lChanges: - qaIssues=True - - if not sameEvts: - qaIssues=True - print('SUMMARY ROOTFileChecks: Some differences in event products or their sizes found') - print('\n') - if run == "events": - sys.exit(0) +lines = 0 +lChanges = False +nLog = 0 +nPrintTot = 0 +stopPrint = 0 +sameEvts = True +nRoot = 0 +newDQM = 0 +nDQM = 0 +diff, wfs = [], [] +if run in ["all", "events"]: + if not os.path.exists("comparison-events.json"): + for l in getCommonFiles(baseDir, testDir, "step*.log"): + lCount, filt1, filt2 = checkLines(baseDir + l, testDir + l) + lines = lines + lCount + if nPrintTot < 1000: + nprint = getRelevantDiff(filt1, filt2, baseDir + l, testDir + l) + nPrintTot = nPrintTot + nprint + else: + if stopPrint == 0: + print("Skipping further diff comparisons. Too many diffs") + stopPrint = 1 + nLog = nLog + 1 + if lines > 0: + lChanges = True + #### compare edmEventSize on each to look for new missing candidates + for r in getCommonFiles(baseDir, testDir, "step*.root"): + if "inDQM.root" not in r: + checkResult = checkEventContent(baseDir + r, testDir + r) + sameEvts = sameEvts and checkResult + nRoot = nRoot + 1 + for r in getCommonFiles(baseDir, testDir, "DQM*.root"): + t = checkDQMSize(baseDir + r, testDir + r, diff, wfs) + print(r, t) + newDQM = newDQM + t + nDQM = nDQM + 1 + with open("comparison-events.json", "w") as f: + json.dump( + [ + lines, + lChanges, + nLog, + nPrintTot, + stopPrint, + sameEvts, + nRoot, + newDQM, + nDQM, + diff, + wfs, + ], + f, + ) + else: + with open("comparison-events.json") as f: + ( + lines, + lChanges, + nLog, + nPrintTot, + stopPrint, + sameEvts, + nRoot, + newDQM, + nDQM, + diff, + wfs, + ) = json.load(f) + + print("Logs:", lines, lChanges, nLog, nPrintTot, stopPrint) + print("Events:", sameEvts, nRoot, newDQM, nDQM, diff, wfs) + if lines > 0: + print("SUMMARY You potentially added " + str(lines) + " lines to the logs") + elif lines < 0: + print("SUMMARY You potentially removed " + str(-1 * lines) + " lines from the logs") + else: + print("SUMMARY No significant changes to the logs found") + + if lChanges: + qaIssues = True + + if not sameEvts: + qaIssues = True + print("SUMMARY ROOTFileChecks: Some differences in event products or their sizes found") + print("\n") + if run == "events": + sys.exit(0) # now check the JR comparisons for differences nDiff = 0 nAll = 0 nOK = 0 -if run in ['all', 'JR']: - if not os.path.exists('comparison-JR.json'): - nDiff,nAll,nOK=summaryJR(jrDir) - with open('comparison-JR.json', 'w') as f: - json.dump([nDiff,nAll,nOK], f) - else: - with open('comparison-JR.json') as f: - (nDiff,nAll,nOK) = json.load(f) - print('SUMMARY Reco comparison results:',nDiff,'differences found in the comparisons') - if nAll!=nOK: - print('SUMMARY Reco comparison had ',nAll-nOK,'failed jobs') - print('\n') - if run == "JR": - sys.exit(0) +if run in ["all", "JR"]: + if not os.path.exists("comparison-JR.json"): + nDiff, nAll, nOK = summaryJR(jrDir) + with open("comparison-JR.json", "w") as f: + json.dump([nDiff, nAll, nOK], f) + else: + with open("comparison-JR.json") as f: + (nDiff, nAll, nOK) = json.load(f) + print("SUMMARY Reco comparison results:", nDiff, "differences found in the comparisons") + if nAll != nOK: + print("SUMMARY Reco comparison had ", nAll - nOK, "failed jobs") + print("\n") + if run == "JR": + sys.exit(0) # not check for default comparison compSummary = [] -if not os.path.exists('comparison-comp.json'): - compSummary=summaryComp(compDir) - with open('comparison-comp.json', 'w') as f: - json.dump(compSummary, f) +if not os.path.exists("comparison-comp.json"): + compSummary = summaryComp(compDir) + with open("comparison-comp.json", "w") as f: + json.dump(compSummary, f) else: - with open('comparison-comp.json') as f: + with open("comparison-comp.json") as f: compSummary = json.load(f) -print('SUMMARY DQMHistoTests: Total files compared:',compSummary[6]) -print('SUMMARY DQMHistoTests: Total histograms compared:',compSummary[0]) -print('SUMMARY DQMHistoTests: Total failures:',compSummary[1]) -print('SUMMARY DQMHistoTests: Total nulls:',compSummary[2]) -print('SUMMARY DQMHistoTests: Total successes:',compSummary[3]) -print('SUMMARY DQMHistoTests: Total skipped:',compSummary[4]) -print('SUMMARY DQMHistoTests: Total Missing objects:',compSummary[5]) +print("SUMMARY DQMHistoTests: Total files compared:", compSummary[6]) +print("SUMMARY DQMHistoTests: Total histograms compared:", compSummary[0]) +print("SUMMARY DQMHistoTests: Total failures:", compSummary[1]) +print("SUMMARY DQMHistoTests: Total nulls:", compSummary[2]) +print("SUMMARY DQMHistoTests: Total successes:", compSummary[3]) +print("SUMMARY DQMHistoTests: Total skipped:", compSummary[4]) +print("SUMMARY DQMHistoTests: Total Missing objects:", compSummary[5]) -print('SUMMARY DQMHistoSizes: Histogram memory added:',newDQM,'KiB(',nDQM,'files compared)') -for line, wf in zip(diff,wfs): - print('SUMMARY DQMHistoSizes: changed (',wf,'):',line) +print("SUMMARY DQMHistoSizes: Histogram memory added:", newDQM, "KiB(", nDQM, "files compared)") +for line, wf in zip(diff, wfs): + print("SUMMARY DQMHistoSizes: changed (", wf, "):", line) #### conclude -print("SUMMARY Checked",nLog,"log files,",nRoot,"edm output root files,",compSummary[6],"DQM output files") +print( + "SUMMARY Checked", + nLog, + "log files,", + nRoot, + "edm output root files,", + compSummary[6], + "DQM output files", +) if not qaIssues: print("No potential problems in log/root QA checks!") diff --git a/logUpdater.py b/logUpdater.py index b19f0e66a097..819c03c8f624 100755 --- a/logUpdater.py +++ b/logUpdater.py @@ -11,112 +11,147 @@ from os.path import dirname, abspath, join from cmsutils import doCmd, getIBReleaseInfo from time import sleep -SCRIPT_DIR=dirname(abspath(__file__)) -class LogUpdater(object): +SCRIPT_DIR = dirname(abspath(__file__)) + +class LogUpdater(object): def __init__(self, dirIn=None, dryRun=False, remote=None, webDir="/data/sdt/buildlogs/"): if not remote: - with open(join(SCRIPT_DIR,"cmssdt.sh")) as ref: - remote = "cmsbuild@"+[ line.split("=")[-1].strip() for line in ref.readlines() if "CMSSDT_SERVER=" in line][0] + with open(join(SCRIPT_DIR, "cmssdt.sh")) as ref: + remote = ( + "cmsbuild@" + + [ + line.split("=")[-1].strip() + for line in ref.readlines() + if "CMSSDT_SERVER=" in line + ][0] + ) self.dryRun = dryRun self.remote = remote self.cmsswBuildDir = dirIn rel = os.path.basename(dirIn) self.release = rel rc, day, hour = getIBReleaseInfo(rel) - self.webTargetDir = webDir + "/" + os.environ[ - "SCRAM_ARCH"] + "/www/" + day + "/" + rc + "-" + day + "-" + hour + "/" + self.release + self.webTargetDir = ( + webDir + + "/" + + os.environ["SCRAM_ARCH"] + + "/www/" + + day + + "/" + + rc + + "-" + + day + + "-" + + hour + + "/" + + self.release + ) self.ssh_opt = "-o CheckHostIP=no -o ConnectTimeout=60 -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o BatchMode=yes -o PasswordAuthentication=no" return def updateUnitTestLogs(self, subdir=""): - - print("\n--> going to copy unit test logs to", self.webTargetDir, '... \n') + print("\n--> going to copy unit test logs to", self.webTargetDir, "... \n") # copy back the test and relval logs to the install area # check size first ... sometimes the log _grows_ to tens of GB !! - testLogs = ['unitTestLogs.zip', 'unitTests-summary.log', 'unitTestResults.pkl', 'unitTests1.log'] + testLogs = [ + "unitTestLogs.zip", + "unitTests-summary.log", + "unitTestResults.pkl", + "unitTests1.log", + ] for tl in testLogs: - self.copyLogs(tl, '.', self.webTargetDir + "/" + subdir) + self.copyLogs(tl, ".", self.webTargetDir + "/" + subdir) return def updateGeomTestLogs(self): - print("\n--> going to copy Geom test logs to", self.webTargetDir, '... \n') - testLogs = ['dddreport.log', 'domcount.log'] + print("\n--> going to copy Geom test logs to", self.webTargetDir, "... \n") + testLogs = ["dddreport.log", "domcount.log"] for tl in testLogs: - self.copyLogs(tl, '.', self.webTargetDir) - self.copyLogs(tl, '.', os.path.join(self.webTargetDir, 'testLogs')) + self.copyLogs(tl, ".", self.webTargetDir) + self.copyLogs(tl, ".", os.path.join(self.webTargetDir, "testLogs")) return def updateDupDictTestLogs(self): - print("\n--> going to copy dup dict test logs to", self.webTargetDir, '... \n') - testLogs = ['dupDict-*.log'] + print("\n--> going to copy dup dict test logs to", self.webTargetDir, "... \n") + testLogs = ["dupDict-*.log"] for tl in testLogs: - self.copyLogs(tl, '.', self.webTargetDir) - self.copyLogs(tl, '.', os.path.join(self.webTargetDir, 'testLogs')) + self.copyLogs(tl, ".", self.webTargetDir) + self.copyLogs(tl, ".", os.path.join(self.webTargetDir, "testLogs")) return def updateLogFile(self, fileIn, subTrgDir=None): desdir = self.webTargetDir - if subTrgDir: desdir = os.path.join(desdir, subTrgDir) - print("\n--> going to copy " + fileIn + " log to ", desdir, '... \n') - self.copyLogs(fileIn, '.', desdir) + if subTrgDir: + desdir = os.path.join(desdir, subTrgDir) + print("\n--> going to copy " + fileIn + " log to ", desdir, "... \n") + self.copyLogs(fileIn, ".", desdir) return def updateCodeRulesCheckerLogs(self): - print("\n--> going to copy cms code rules logs to", self.webTargetDir, '... \n') - self.copyLogs('codeRules', '.', self.webTargetDir) + print("\n--> going to copy cms code rules logs to", self.webTargetDir, "... \n") + self.copyLogs("codeRules", ".", self.webTargetDir) return def updateRelValMatrixPartialLogs(self, partialSubDir, dirToSend): - destination = os.path.join(self.webTargetDir, 'pyRelValPartialLogs') - print("\n--> going to copy pyrelval partial matrix logs to", destination, '... \n') + destination = os.path.join(self.webTargetDir, "pyRelValPartialLogs") + print("\n--> going to copy pyrelval partial matrix logs to", destination, "... \n") self.copyLogs(dirToSend, partialSubDir, destination) self.runRemoteCmd("touch " + os.path.join(destination, dirToSend, "wf.done")) return def getDoneRelvals(self): wfDoneFile = "wf.done" - destination = os.path.join(self.webTargetDir, 'pyRelValPartialLogs', "*", wfDoneFile) + destination = os.path.join(self.webTargetDir, "pyRelValPartialLogs", "*", wfDoneFile) code, out = self.runRemoteCmd("ls " + destination, debug=False) - return [ wf.split("/")[-2].split("_")[0] for wf in out.split("\n") if wf.endswith(wfDoneFile)] + return [ + wf.split("/")[-2].split("_")[0] for wf in out.split("\n") if wf.endswith(wfDoneFile) + ] def relvalAlreadyDone(self, wf): wfDoneFile = "wf.done" - destination = os.path.join(self.webTargetDir, 'pyRelValPartialLogs', str(wf) + "_*", wfDoneFile) + destination = os.path.join( + self.webTargetDir, "pyRelValPartialLogs", str(wf) + "_*", wfDoneFile + ) code, out = self.runRemoteCmd("ls -d " + destination) - return ((code == 0) and out.endswith(wfDoneFile)) + return (code == 0) and out.endswith(wfDoneFile) def updateAddOnTestsLogs(self): - print("\n--> going to copy addOn logs to", self.webTargetDir, '... \n') - self.copyLogs('addOnTests.log', '.', self.webTargetDir) - self.copyLogs('addOnTests.zip', 'addOnTests/logs', self.webTargetDir) - self.copyLogs('addOnTests.pkl', 'addOnTests/logs', os.path.join(self.webTargetDir, 'addOnTests/logs')) + print("\n--> going to copy addOn logs to", self.webTargetDir, "... \n") + self.copyLogs("addOnTests.log", ".", self.webTargetDir) + self.copyLogs("addOnTests.zip", "addOnTests/logs", self.webTargetDir) + self.copyLogs( + "addOnTests.pkl", "addOnTests/logs", os.path.join(self.webTargetDir, "addOnTests/logs") + ) return def updateIgnominyLogs(self): - print("\n--> going to copy ignominy logs to", self.webTargetDir, '... \n') - testLogs = ['dependencies.txt.gz', 'products.txt.gz', 'logwarnings.gz', 'metrics'] + print("\n--> going to copy ignominy logs to", self.webTargetDir, "... \n") + testLogs = ["dependencies.txt.gz", "products.txt.gz", "logwarnings.gz", "metrics"] for tl in testLogs: - self.copyLogs(tl, 'igRun', os.path.join(self.webTargetDir, 'igRun')) + self.copyLogs(tl, "igRun", os.path.join(self.webTargetDir, "igRun")) return def updateProductionRelValLogs(self, workFlows): - print("\n--> going to copy Production RelVals logs to", self.webTargetDir, '... \n') - wwwProdDir = os.path.join(self.webTargetDir, 'prodRelVal') - self.copyLogs('prodRelVal.log', '.', wwwProdDir) + print("\n--> going to copy Production RelVals logs to", self.webTargetDir, "... \n") + wwwProdDir = os.path.join(self.webTargetDir, "prodRelVal") + self.copyLogs("prodRelVal.log", ".", wwwProdDir) for wf in workFlows: - self.copyLogs('timingInfo.txt', 'prodRelVal/wf/' + wf, os.path.join(wwwProdDir, 'wf', wf)) + self.copyLogs( + "timingInfo.txt", "prodRelVal/wf/" + wf, os.path.join(wwwProdDir, "wf", wf) + ) return - def updateBuildSetLogs(self, appType='fwlite'): - print("\n--> going to copy BuildSet logs to", self.webTargetDir, '... \n') - wwwBSDir = os.path.join(self.webTargetDir, 'BuildSet') - self.copyLogs(appType, 'BuildSet', wwwBSDir) + def updateBuildSetLogs(self, appType="fwlite"): + print("\n--> going to copy BuildSet logs to", self.webTargetDir, "... \n") + wwwBSDir = os.path.join(self.webTargetDir, "BuildSet") + self.copyLogs(appType, "BuildSet", wwwBSDir) return def copyLogs(self, what, logSubDir="", tgtDirIn=None): - if not tgtDirIn: tgtDirIn = self.webTargetDir + if not tgtDirIn: + tgtDirIn = self.webTargetDir self.runRemoteCmd("mkdir -p " + tgtDirIn) self.copy2Remote(os.path.join(self.cmsswBuildDir, logSubDir, what), tgtDirIn + "/") @@ -134,9 +169,11 @@ def runRemoteHostCmd(self, cmd, host, debug=True): else: for i in range(10): err, out = doCmd(cmd, debug=debug) - if not err: return (err, out) + if not err: + return (err, out) for l in out.split("\n"): - if "CONNECTION=OK" in l: return (err, out) + if "CONNECTION=OK" in l: + return (err, out) sleep(60) return doCmd(cmd, debug=debug) except Exception as e: @@ -151,7 +188,8 @@ def copy2RemoteHost(self, src, des, host): else: for i in range(10): err, out = doCmd(cmd) - if not err: return (err, out) + if not err: + return (err, out) sleep(60) return doCmd(cmd) except Exception as e: diff --git a/logreaderUtils.py b/logreaderUtils.py index 5eb621ebb071..1653200c31d2 100644 --- a/logreaderUtils.py +++ b/logreaderUtils.py @@ -12,9 +12,7 @@ class ResultTypeEnum(object): # Do not forget to include to list if ResultTypeEnum is updated # Will be same ordering as in Log reader interface -all_controls = [ - ResultTypeEnum.ISSUE, ResultTypeEnum.TEST -] +all_controls = [ResultTypeEnum.ISSUE, ResultTypeEnum.TEST] def add_exception_to_config(line, index, config_list, custom_rule_list=[]): @@ -23,33 +21,33 @@ def add_exception_to_config(line, index, config_list, custom_rule_list=[]): # will ignore " IgnoreCompletely" messages "str_to_match": "Begin(?! IgnoreCompletely)(.*Exception)", "name": "{0}", - "control_type": ResultTypeEnum.ISSUE + "control_type": ResultTypeEnum.ISSUE, }, { "str_to_match": "edm::service::InitRootHandlers", "name": "Segmentation fault", - "control_type": ResultTypeEnum.ISSUE + "control_type": ResultTypeEnum.ISSUE, }, { "str_to_match": "sig_dostack_then_abort", "name": "sig_dostack_then_abort", - "control_type": ResultTypeEnum.ISSUE + "control_type": ResultTypeEnum.ISSUE, }, { "str_to_match": ": runtime error:", "name": "Runtime error", - "control_type": ResultTypeEnum.ISSUE + "control_type": ResultTypeEnum.ISSUE, }, { "str_to_match": ": Assertion .* failed", "name": "Assertion failure", - "control_type": ResultTypeEnum.ISSUE + "control_type": ResultTypeEnum.ISSUE, }, { "str_to_match": "==ERROR: AddressSanitizer:", "name": "Address Sanitizer error", - "control_type": ResultTypeEnum.ISSUE - } + "control_type": ResultTypeEnum.ISSUE, + }, ] line_nr = index + 1 @@ -64,7 +62,7 @@ def add_exception_to_config(line, index, config_list, custom_rule_list=[]): "lineStart": line_nr, "lineEnd": line_nr, "name": name + " at line #" + str(line_nr), - "control_type": rule["control_type"] + "control_type": rule["control_type"], } config_list.append(new_exception_config) return config_list diff --git a/logwatch.py b/logwatch.py index 0ccd5b0b932c..66c8cde15c22 100755 --- a/logwatch.py +++ b/logwatch.py @@ -6,70 +6,86 @@ from hashlib import sha256 from time import time -LOGWATCH_APACHE_IGNORE_AGENTS = ["www.google.com/bot.html", "ahrefs.com", "yandex.com", "www.exabot.com", "www.bing.com"] +LOGWATCH_APACHE_IGNORE_AGENTS = [ + "www.google.com/bot.html", + "ahrefs.com", + "yandex.com", + "www.exabot.com", + "www.bing.com", +] -def run_cmd (cmd, exit_on_error=True): - err, out = getstatusoutput(cmd) - if err and exit_on_error: - print(out) - exit (1) - return out -class logwatch (object): - def __init__ (self, service, log_dir="/var/log"): - self.log_dir = join(log_dir,"logwatch_" + service) +def run_cmd(cmd, exit_on_error=True): + err, out = getstatusoutput(cmd) + if err and exit_on_error: + print(out) + exit(1) + return out - def process(self, logs, callback, **kwrds): - if not logs: return True, 0 - info_file = join(self.log_dir, "info") - if not exists ("%s/logs" % self.log_dir): run_cmd ("mkdir -p %s/logs" % self.log_dir) - prev_lnum, prev_hash, count, data = 1, "", 0, [] - if exists(info_file): - prev_hash,ln = run_cmd("head -1 %s" % info_file).strip().split(" ",1) - prev_lnum = int(ln) - if prev_lnum<1: prev_lnum=1 - found = False - for log in reversed(logs): - service_log = join (self.log_dir, "logs", basename(log)) - if (len(data)>0) and ((time()-getmtime(log))<600):return True, 0 - if found: - if exists (service_log): - run_cmd("rm -f %s" % service_log) - continue - else: break - run_cmd ("rsync -a %s %s" % (log, service_log)) - cur_hash = sha256(run_cmd("head -1 %s" % service_log).encode()).hexdigest() - data.insert(0,[log , service_log, 1, cur_hash, False]) - if cur_hash == prev_hash: - found = True - data[0][2] = prev_lnum - data[-1][4] = True - for item in data: - lnum, service_log = item[2], item[1] - get_lines_cmd = "tail -n +%s %s" % (str(lnum), service_log) - if lnum<=1: get_lines_cmd = "cat %s" % service_log - print("Processing %s:%s" % (item[0], str(lnum))) - lnum -= 1 - xlines = 0 - for line in run_cmd (get_lines_cmd).split ("\n"): - count += 1 - lnum += 1 - xlines += 1 - try: ok = callback(line, count, **kwrds) - except: ok = False - if not ok: - if (prev_lnum!=lnum) or (prev_hash!=item[3]): - run_cmd("echo '%s %s' > %s" % (item[3], str(lnum),info_file)) - return ok, count - if (xlines%1000)==0: - prev_lnum = lnum - prev_hash = item[3] - run_cmd("echo '%s %s' > %s" % (item[3], str(lnum),info_file)) - if (prev_lnum!=lnum) or (prev_hash!=item[3]): - prev_lnum=lnum - prev_hash=item[3] - cmd = "echo '%s %s' > %s" % (item[3], str(lnum),info_file) - if not item[4]: cmd = cmd + " && rm -f %s" % service_log - run_cmd(cmd) - return True, count +class logwatch(object): + def __init__(self, service, log_dir="/var/log"): + self.log_dir = join(log_dir, "logwatch_" + service) + + def process(self, logs, callback, **kwrds): + if not logs: + return True, 0 + info_file = join(self.log_dir, "info") + if not exists("%s/logs" % self.log_dir): + run_cmd("mkdir -p %s/logs" % self.log_dir) + prev_lnum, prev_hash, count, data = 1, "", 0, [] + if exists(info_file): + prev_hash, ln = run_cmd("head -1 %s" % info_file).strip().split(" ", 1) + prev_lnum = int(ln) + if prev_lnum < 1: + prev_lnum = 1 + found = False + for log in reversed(logs): + service_log = join(self.log_dir, "logs", basename(log)) + if (len(data) > 0) and ((time() - getmtime(log)) < 600): + return True, 0 + if found: + if exists(service_log): + run_cmd("rm -f %s" % service_log) + continue + else: + break + run_cmd("rsync -a %s %s" % (log, service_log)) + cur_hash = sha256(run_cmd("head -1 %s" % service_log).encode()).hexdigest() + data.insert(0, [log, service_log, 1, cur_hash, False]) + if cur_hash == prev_hash: + found = True + data[0][2] = prev_lnum + data[-1][4] = True + for item in data: + lnum, service_log = item[2], item[1] + get_lines_cmd = "tail -n +%s %s" % (str(lnum), service_log) + if lnum <= 1: + get_lines_cmd = "cat %s" % service_log + print("Processing %s:%s" % (item[0], str(lnum))) + lnum -= 1 + xlines = 0 + for line in run_cmd(get_lines_cmd).split("\n"): + count += 1 + lnum += 1 + xlines += 1 + try: + ok = callback(line, count, **kwrds) + except: + ok = False + if not ok: + if (prev_lnum != lnum) or (prev_hash != item[3]): + run_cmd("echo '%s %s' > %s" % (item[3], str(lnum), info_file)) + return ok, count + if (xlines % 1000) == 0: + prev_lnum = lnum + prev_hash = item[3] + run_cmd("echo '%s %s' > %s" % (item[3], str(lnum), info_file)) + if (prev_lnum != lnum) or (prev_hash != item[3]): + prev_lnum = lnum + prev_hash = item[3] + cmd = "echo '%s %s' > %s" % (item[3], str(lnum), info_file) + if not item[4]: + cmd = cmd + " && rm -f %s" % service_log + run_cmd(cmd) + return True, count diff --git a/lxr/checkout-version.py b/lxr/checkout-version.py index b8947ff9872d..16ae4f0c49e1 100755 --- a/lxr/checkout-version.py +++ b/lxr/checkout-version.py @@ -3,25 +3,28 @@ from sys import exit from os.path import isfile, islink from subprocess import getstatusoutput as cmd -e, total =cmd("find . -type f | grep -v '/.git/' |wc -l") -e, o = cmd ('git log --name-only --pretty=format:"T:%at"') + +e, total = cmd("find . -type f | grep -v '/.git/' |wc -l") +e, o = cmd('git log --name-only --pretty=format:"T:%at"') if e: - print (o) - exit(1) + print(o) + exit(1) cache = {} -time=0 -cnt=0 +time = 0 +cnt = 0 for l in o.split("\n"): - if not l: continue - if l[:2]=='T:': - time=int(l[2:]) - continue - if l in cache: continue - if isfile(l) and not islink(l): - cnt += 1 - cache[l]=time - utime(l, (time, time)) - print ("[%s/%s] %s: %s" % (cnt, total, l, time)) - else: - cache[l]=0 + if not l: + continue + if l[:2] == "T:": + time = int(l[2:]) + continue + if l in cache: + continue + if isfile(l) and not islink(l): + cnt += 1 + cache[l] = time + utime(l, (time, time)) + print("[%s/%s] %s: %s" % (cnt, total, l, time)) + else: + cache[l] = 0 diff --git a/mark_commit_status.py b/mark_commit_status.py index 86dfed31b503..22a2dd7507d5 100755 --- a/mark_commit_status.py +++ b/mark_commit_status.py @@ -8,33 +8,92 @@ from __future__ import print_function from optparse import OptionParser -from github_utils import api_rate_limits, mark_commit_status, get_combined_statuses, get_pr_latest_commit +from github_utils import ( + api_rate_limits, + mark_commit_status, + get_combined_statuses, + get_pr_latest_commit, +) from sys import exit if __name__ == "__main__": - parser = OptionParser(usage="%prog") - parser.add_option("-c", "--commit", dest="commit", help="git commit for which set the status", type=str, default=None) - parser.add_option("-p", "--pr", dest="pr", help="github pr for which set the status", type=str, default=None) - parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw") - parser.add_option("-d", "--description", dest="description", help="Description of the status", type=str, default="Test running") - parser.add_option("-C", "--context", dest="context", help="Status context", type=str, default="default") - parser.add_option("-u", "--url", dest="url", help="Status results URL", type=str, default="") - parser.add_option("-s", "--state", dest="state", help="State of the status e.g. pending, failure, error or success", type=str, default='pending') - parser.add_option("-R", "--reset-all", dest="reset_all", help="Reset all matching contexts", action="store_true", default=False) - parser.add_option("-e", "--if-exists", dest="if_exists", help="Only set the status if context already exists", action="store_true", default=False) - opts, args = parser.parse_args() - - if opts.pr: - opts.commit = get_pr_latest_commit(opts.pr, opts.repository) - if opts.if_exists: - statues = get_combined_statuses(opts.commit, opts.repository) - if 'statuses' in statues: - found = False - for s in statues['statuses']: - if s['context'] != opts.context: - continue - found = True - break - if not found: exit(0) - mark_commit_status(opts.commit, opts.repository, opts.context, opts.state, opts.url, opts.description, reset=opts.reset_all) + parser = OptionParser(usage="%prog") + parser.add_option( + "-c", + "--commit", + dest="commit", + help="git commit for which set the status", + type=str, + default=None, + ) + parser.add_option( + "-p", "--pr", dest="pr", help="github pr for which set the status", type=str, default=None + ) + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default="cms-sw/cmssw", + ) + parser.add_option( + "-d", + "--description", + dest="description", + help="Description of the status", + type=str, + default="Test running", + ) + parser.add_option( + "-C", "--context", dest="context", help="Status context", type=str, default="default" + ) + parser.add_option("-u", "--url", dest="url", help="Status results URL", type=str, default="") + parser.add_option( + "-s", + "--state", + dest="state", + help="State of the status e.g. pending, failure, error or success", + type=str, + default="pending", + ) + parser.add_option( + "-R", + "--reset-all", + dest="reset_all", + help="Reset all matching contexts", + action="store_true", + default=False, + ) + parser.add_option( + "-e", + "--if-exists", + dest="if_exists", + help="Only set the status if context already exists", + action="store_true", + default=False, + ) + opts, args = parser.parse_args() + if opts.pr: + opts.commit = get_pr_latest_commit(opts.pr, opts.repository) + if opts.if_exists: + statues = get_combined_statuses(opts.commit, opts.repository) + if "statuses" in statues: + found = False + for s in statues["statuses"]: + if s["context"] != opts.context: + continue + found = True + break + if not found: + exit(0) + mark_commit_status( + opts.commit, + opts.repository, + opts.context, + opts.state, + opts.url, + opts.description, + reset=opts.reset_all, + ) diff --git a/material_budget_ref.py b/material_budget_ref.py index cc61e1510c0c..02247cf6ce3d 100644 --- a/material_budget_ref.py +++ b/material_budget_ref.py @@ -1,23 +1,26 @@ from __future__ import print_function + MATERIAL_BUDGET_REF = { - "CMSSW_8_1_X" : "CMSSW_8_1_X_2017-03-12-0000", - "CMSSW_9_0_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_9_1_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_9_2_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_9_3_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_9_4_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_10_0_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_10_1_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_10_2_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_10_3_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_10_4_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_10_5_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_10_6_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_11_0_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_11_1_X" : "CMSSW_9_0_X_2017-03-14-1100", - "CMSSW_11_2_X" : "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_8_1_X": "CMSSW_8_1_X_2017-03-12-0000", + "CMSSW_9_0_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_9_1_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_9_2_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_9_3_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_9_4_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_10_0_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_10_1_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_10_2_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_10_3_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_10_4_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_10_5_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_10_6_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_11_0_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_11_1_X": "CMSSW_9_0_X_2017-03-14-1100", + "CMSSW_11_2_X": "CMSSW_9_0_X_2017-03-14-1100", } + def get_ref(): - from os import environ - print(MATERIAL_BUDGET_REF["_".join(environ['CMSSW_VERSION'].split("_")[0:3])+"_X"]) + from os import environ + + print(MATERIAL_BUDGET_REF["_".join(environ["CMSSW_VERSION"].split("_")[0:3]) + "_X"]) diff --git a/merge-pull-request b/merge-pull-request deleted file mode 100755 index 87d747143ce8..000000000000 --- a/merge-pull-request +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -from argparse import ArgumentParser -from github import Github -from os.path import expanduser -from sys import exit -from socket import setdefaulttimeout -setdefaulttimeout(120) - -if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument("pr", type=int) - parser.add_argument("-m", dest="message", type=str, default=None) - args = parser.parse_args() - - gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) - try: - pr = gh.get_repo( 'cms-sw/cmssw' ).get_pull(args.pr) - except: - print("Could not find pull request. Maybe this is an issue?") - exit(0) - print(pr.number, ":", pr.title) - if pr.is_merged(): - print("Pull request is already merged.") - exit(0) - - if args.message: - pr.merge(commit_message=message) - else: - pr.merge() diff --git a/merge-pull-request b/merge-pull-request new file mode 120000 index 000000000000..35e293fb342c --- /dev/null +++ b/merge-pull-request @@ -0,0 +1 @@ +merge-pull-request.py \ No newline at end of file diff --git a/merge-pull-request.py b/merge-pull-request.py new file mode 100755 index 000000000000..1f58206ce7b8 --- /dev/null +++ b/merge-pull-request.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +from __future__ import print_function +from argparse import ArgumentParser +from github import Github +from os.path import expanduser +from sys import exit +from socket import setdefaulttimeout + +setdefaulttimeout(120) + +if __name__ == "__main__": + parser = ArgumentParser() + parser.add_argument("pr", type=int) + parser.add_argument("-m", dest="message", type=str, default=None) + args = parser.parse_args() + + gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) + try: + pr = gh.get_repo("cms-sw/cmssw").get_pull(args.pr) + except: + print("Could not find pull request. Maybe this is an issue?") + exit(0) + print(pr.number, ":", pr.title) + if pr.is_merged(): + print("Pull request is already merged.") + exit(0) + + if args.message: + pr.merge(commit_message=message) + else: + pr.merge() diff --git a/milestones.py b/milestones.py index be39edf22a7b..3bfbba15034a 100644 --- a/milestones.py +++ b/milestones.py @@ -1,65 +1,65 @@ -#Map of cmssw branch to milestone +# Map of cmssw branch to milestone RELEASE_BRANCH_MILESTONE = { - "CMSSW_9_2_6_patchX": 70, - "CMSSW_9_2_3_patchX": 68, - "CMSSW_9_2_0_patchX": 67, - "CMSSW_8_0_10_patchX": 63, - "CMSSW_8_0_8_patchX": 62, - "CMSSW_7_5_5_patchX": 58, - "CMSSW_8_0_X": 57, - "CMSSW_7_6_X": 55, - "CMSSW_7_5_X": 51, - "CMSSW_7_4_X": 50, - "CMSSW_7_3_X": 49, - "CMSSW_7_0_X": 38, - "CMSSW_7_1_X": 47, - "CMSSW_7_2_X": 48, - "CMSSW_6_2_X": 21, - "CMSSW_6_2_X_SLHC": 9, - "CMSSW_5_3_X": 20, - "CMSSW_4_4_X": 8, - "CMSSW_4_2_X": 35, - "CMSSW_4_1_X": 7, - "CMSSW_6_2_SLHCDEV_X": 52, - "CMSSW_7_1_4_patchX": 53, - "CMSSW_7_4_1_patchX": 54, - "CMSSW_7_4_12_patchX": 56, + "CMSSW_9_2_6_patchX": 70, + "CMSSW_9_2_3_patchX": 68, + "CMSSW_9_2_0_patchX": 67, + "CMSSW_8_0_10_patchX": 63, + "CMSSW_8_0_8_patchX": 62, + "CMSSW_7_5_5_patchX": 58, + "CMSSW_8_0_X": 57, + "CMSSW_7_6_X": 55, + "CMSSW_7_5_X": 51, + "CMSSW_7_4_X": 50, + "CMSSW_7_3_X": 49, + "CMSSW_7_0_X": 38, + "CMSSW_7_1_X": 47, + "CMSSW_7_2_X": 48, + "CMSSW_6_2_X": 21, + "CMSSW_6_2_X_SLHC": 9, + "CMSSW_5_3_X": 20, + "CMSSW_4_4_X": 8, + "CMSSW_4_2_X": 35, + "CMSSW_4_1_X": 7, + "CMSSW_6_2_SLHCDEV_X": 52, + "CMSSW_7_1_4_patchX": 53, + "CMSSW_7_4_1_patchX": 54, + "CMSSW_7_4_12_patchX": 56, } -#PR created for these BRANCHES will be closed by cms-bot +# PR created for these BRANCHES will be closed by cms-bot RELEASE_BRANCH_CLOSED = [ - "CMSSW_4_1_X", - "CMSSW_4_2_X", - "CMSSW_4_4_X", - "CMSSW_6_1_X", - "CMSSW_6_1_X_SLHC", - "CMSSW_6_2_X", - "CMSSW_7_0_X", - "CMSSW_.+_Patatrack_X", + "CMSSW_4_1_X", + "CMSSW_4_2_X", + "CMSSW_4_4_X", + "CMSSW_6_1_X", + "CMSSW_6_1_X_SLHC", + "CMSSW_6_2_X", + "CMSSW_7_0_X", + "CMSSW_.+_Patatrack_X", ] -#All these releases require ORP signicatures +# All these releases require ORP signicatures RELEASE_BRANCH_PRODUCTION = [ - "CMSSW_8_0_X", - "CMSSW_7_6_X", - "CMSSW_7_5_X", - "CMSSW_7_4_X", - "CMSSW_7_3_X", - "CMSSW_7_2_X", - "CMSSW_7_1_X", - "CMSSW_7_0_X", - "CMSSW_6_2_X_SLHC", - "CMSSW_5_3_X", + "CMSSW_8_0_X", + "CMSSW_7_6_X", + "CMSSW_7_5_X", + "CMSSW_7_4_X", + "CMSSW_7_3_X", + "CMSSW_7_2_X", + "CMSSW_7_1_X", + "CMSSW_7_0_X", + "CMSSW_6_2_X_SLHC", + "CMSSW_5_3_X", ] SPECIAL_RELEASE_MANAGERS = [] -RELEASE_MANAGERS={} -RELEASE_MANAGERS["CMSSW_.+_Patatrack_X"]=["fwyzard"] +RELEASE_MANAGERS = {} +RELEASE_MANAGERS["CMSSW_.+_Patatrack_X"] = ["fwyzard"] ###################################################################### # Automatically added by cms-bot for CMSSW_8_1_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_8_1_X"]=59 +RELEASE_BRANCH_MILESTONE["CMSSW_8_1_X"] = 59 RELEASE_BRANCH_PRODUCTION.append("CMSSW_8_1_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_8_1_DEVEL_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_8_1_ROOT6_X") @@ -67,33 +67,33 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_8_0_0_patchX release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_8_0_0_patchX"]=60 +RELEASE_BRANCH_MILESTONE["CMSSW_8_0_0_patchX"] = 60 -#CMSSW_9_0_X release cycle -RELEASE_BRANCH_MILESTONE["CMSSW_9_0_X"]=64 +# CMSSW_9_0_X release cycle +RELEASE_BRANCH_MILESTONE["CMSSW_9_0_X"] = 64 RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_0_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_0_ROOT6_X") -#CMSSW_9_1_X release cycle -RELEASE_BRANCH_MILESTONE["CMSSW_9_1_X"]=65 +# CMSSW_9_1_X release cycle +RELEASE_BRANCH_MILESTONE["CMSSW_9_1_X"] = 65 RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_1_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_1_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_1_DEVEL_X") -#CMSSW_9_2_X release cycle -RELEASE_BRANCH_MILESTONE["CMSSW_9_2_X"]=66 +# CMSSW_9_2_X release cycle +RELEASE_BRANCH_MILESTONE["CMSSW_9_2_X"] = 66 RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_2_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_2_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_2_DEVEL_X") -#CMSSW_9_3_X release cycle -RELEASE_BRANCH_MILESTONE["CMSSW_9_3_X"]=69 +# CMSSW_9_3_X release cycle +RELEASE_BRANCH_MILESTONE["CMSSW_9_3_X"] = 69 RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_3_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_3_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_3_DEVEL_X") -#CMSSW_9_4_X release cycle -RELEASE_BRANCH_MILESTONE["CMSSW_9_4_X"]=71 +# CMSSW_9_4_X release cycle +RELEASE_BRANCH_MILESTONE["CMSSW_9_4_X"] = 71 RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_4_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_4_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_4_DEVEL_X") @@ -101,7 +101,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_10_0_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_10_0_X"]=72 +RELEASE_BRANCH_MILESTONE["CMSSW_10_0_X"] = 72 RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_0_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_0_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_0_DEVEL_X") @@ -109,7 +109,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_10_1_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_10_1_X"]=73 +RELEASE_BRANCH_MILESTONE["CMSSW_10_1_X"] = 73 RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_1_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_1_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_1_DEVEL_X") @@ -117,20 +117,20 @@ ###################################################################### # Manually added by Shahzad MUZAFFAR for CMSSW_9_4_MAOD_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_9_4_MAOD_X"]=74 +RELEASE_BRANCH_MILESTONE["CMSSW_9_4_MAOD_X"] = 74 RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_4_MAOD_X") ###################################################################### # Manually added by Shahzad MUZAFFAR for CMSSW_9_4_AN_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_9_4_AN_X"]=75 +RELEASE_BRANCH_MILESTONE["CMSSW_9_4_AN_X"] = 75 RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_4_AN_X") ###################################################################### # Automatically added by cms-bot for CMSSW_10_2_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_10_2_X"]=76 +RELEASE_BRANCH_MILESTONE["CMSSW_10_2_X"] = 76 RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_2_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_2_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_2_DEVEL_X") @@ -138,7 +138,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_10_3_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_10_3_X"]=77 +RELEASE_BRANCH_MILESTONE["CMSSW_10_3_X"] = 77 RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_3_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_3_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_3_DEVEL_X") @@ -146,7 +146,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_10_4_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_10_4_X"]=78 +RELEASE_BRANCH_MILESTONE["CMSSW_10_4_X"] = 78 RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_4_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_4_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_4_DEVEL_X") @@ -155,19 +155,19 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_10_5_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_10_5_X"]=79 +RELEASE_BRANCH_MILESTONE["CMSSW_10_5_X"] = 79 RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_5_X") ###################################################################### # Automatically added by cms-bot for CMSSW_10_6_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_10_6_X"]=80 +RELEASE_BRANCH_MILESTONE["CMSSW_10_6_X"] = 80 RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_6_X") ###################################################################### # Automatically added by cms-bot for CMSSW_11_0_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_11_0_X"]=81 +RELEASE_BRANCH_MILESTONE["CMSSW_11_0_X"] = 81 RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_0_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_0_DEVEL_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_0_CXXMODULE_X") @@ -176,14 +176,14 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_11_1_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_11_1_X"]=82 +RELEASE_BRANCH_MILESTONE["CMSSW_11_1_X"] = 82 RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_1_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_1_DEVEL_X") ###################################################################### # Automatically added by cms-bot for CMSSW_11_2_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_11_2_X"]=83 +RELEASE_BRANCH_MILESTONE["CMSSW_11_2_X"] = 83 RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_2_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_2_CLANG_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_2_Patatrack_X") @@ -192,7 +192,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_11_3_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_11_3_X"]=84 +RELEASE_BRANCH_MILESTONE["CMSSW_11_3_X"] = 84 RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_3_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_3_CLANG_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_3_DEVEL_X") @@ -201,7 +201,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_12_0_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_12_0_X"]=85 +RELEASE_BRANCH_MILESTONE["CMSSW_12_0_X"] = 85 RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_0_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_0_Patatrack_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_0_GEANT4_X") @@ -210,7 +210,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_12_1_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_12_1_X"]=86 +RELEASE_BRANCH_MILESTONE["CMSSW_12_1_X"] = 86 RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_1_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_1_GEANT4_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_1_DEVEL_X") @@ -219,7 +219,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_12_2_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_12_2_X"]=87 +RELEASE_BRANCH_MILESTONE["CMSSW_12_2_X"] = 87 RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_2_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_2_DEVEL_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_2_ROOT6_X") @@ -227,7 +227,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_12_3_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_12_3_X"]=88 +RELEASE_BRANCH_MILESTONE["CMSSW_12_3_X"] = 88 RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_3_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_3_DEVEL_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_3_ROOT6_X") @@ -235,7 +235,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_12_4_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_12_4_X"]=89 +RELEASE_BRANCH_MILESTONE["CMSSW_12_4_X"] = 89 RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_4_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_4_DEVEL_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_4_ROOT6_X") @@ -243,7 +243,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_12_5_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_12_5_X"]=90 +RELEASE_BRANCH_MILESTONE["CMSSW_12_5_X"] = 90 RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_5_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_5_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_5_DEVEL_X") @@ -251,7 +251,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_12_6_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_12_6_X"]=91 +RELEASE_BRANCH_MILESTONE["CMSSW_12_6_X"] = 91 RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_6_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_6_DEVEL_X") @@ -259,7 +259,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_13_0_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_13_0_X"]=92 +RELEASE_BRANCH_MILESTONE["CMSSW_13_0_X"] = 92 RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_0_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_0_ROOT6_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_0_DEVEL_X") @@ -267,7 +267,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_13_1_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_13_1_X"]=93 +RELEASE_BRANCH_MILESTONE["CMSSW_13_1_X"] = 93 RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_1_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_1_DEVEL_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_1_ROOT6_X") @@ -275,7 +275,7 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_13_2_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_13_2_X"]=94 +RELEASE_BRANCH_MILESTONE["CMSSW_13_2_X"] = 94 RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_2_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_2_DEVEL_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_2_ROOT6_X") @@ -283,6 +283,6 @@ ###################################################################### # Automatically added by cms-bot for CMSSW_13_3_X release cycle ###################################################################### -RELEASE_BRANCH_MILESTONE["CMSSW_13_3_X"]=95 +RELEASE_BRANCH_MILESTONE["CMSSW_13_3_X"] = 95 RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_3_X") RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_3_DEVEL_X") diff --git a/modify_comment.py b/modify_comment.py index ee4882ddca35..754e375e01cc 100755 --- a/modify_comment.py +++ b/modify_comment.py @@ -7,38 +7,79 @@ from optparse import OptionParser import sys from socket import setdefaulttimeout + setdefaulttimeout(120) SCRIPT_DIR = dirname(abspath(sys.argv[0])) valid_types = {} -valid_types['JENKINS_TEST_URL']=[ "", None ] -valid_types['JENKINS_STYLE_URL']=[ "", None ] +valid_types["JENKINS_TEST_URL"] = ["", None] +valid_types["JENKINS_STYLE_URL"] = ["", None] all_types = "|".join(valid_types) if __name__ == "__main__": - parser = OptionParser(usage="%prog [-n|--dry-run] [-r|--repository ] -t|--type "+all_types+" -m|--message ") - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) - parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw") - parser.add_option("-t", "--type", dest="msgtype", help="Message type e.g. JENKINS_TEST_URL", type=str, default=None) - parser.add_option("-m", "--message", dest="message", help="Message to be appened to the existing comment e.g. url of jenkins test job.", type=str, default=None) + parser = OptionParser( + usage="%prog [-n|--dry-run] [-r|--repository ] -t|--type " + + all_types + + " -m|--message " + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, + ) + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default="cms-sw/cmssw", + ) + parser.add_option( + "-t", + "--type", + dest="msgtype", + help="Message type e.g. JENKINS_TEST_URL", + type=str, + default=None, + ) + parser.add_option( + "-m", + "--message", + dest="message", + help="Message to be appened to the existing comment e.g. url of jenkins test job.", + type=str, + default=None, + ) + + opts, args = parser.parse_args() + if len(args) != 1: + parser.error("Too many/few arguments") + if not opts.message: + parser.error("Missing message to append") + if not opts.msgtype: + parser.error("Missing message type") + if not opts.msgtype in valid_types: + parser.error("Invalid message type " + opts.msgtype) + + repo_dir = join(SCRIPT_DIR, "repos", opts.repository.replace("-", "_")) + if exists(join(repo_dir, "repo_config.py")): + sys.path.insert(0, repo_dir) + import repo_config + from process_pr import modify_comment, find_last_comment + from process_pr import TRIGERING_TESTS_MSG, TRIGERING_STYLE_TEST_MSG - opts, args = parser.parse_args() - if len(args) != 1: parser.error("Too many/few arguments") - if not opts.message: parser.error("Missing message to append") - if not opts.msgtype: parser.error("Missing message type") - if not opts.msgtype in valid_types: parser.error("Invalid message type "+opts.msgtype) - - repo_dir = join(SCRIPT_DIR,'repos',opts.repository.replace("-","_")) - if exists(join(repo_dir,"repo_config.py")): sys.path.insert(0,repo_dir) - import repo_config - from process_pr import modify_comment, find_last_comment - from process_pr import TRIGERING_TESTS_MSG, TRIGERING_STYLE_TEST_MSG - valid_types['JENKINS_TEST_URL']=[ "^\s*"+TRIGERING_TESTS_MSG+".*$", None ] - valid_types['JENKINS_STYLE_URL']=[ "^\s*"+TRIGERING_STYLE_TEST_MSG+".*$", None ] - gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) - issue = gh.get_repo(opts.repository).get_issue(int(args[0])) - last_comment = find_last_comment(issue, repo_config.CMSBUILD_USER ,valid_types[opts.msgtype][0]) - if not last_comment: - print("Warning: Not comment matched") - sys.exit(1) - print(last_comment.body) - sys.exit(modify_comment(last_comment,valid_types[opts.msgtype][1],opts.message,opts.dryRun)) + valid_types["JENKINS_TEST_URL"] = ["^\s*" + TRIGERING_TESTS_MSG + ".*$", None] + valid_types["JENKINS_STYLE_URL"] = ["^\s*" + TRIGERING_STYLE_TEST_MSG + ".*$", None] + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) + issue = gh.get_repo(opts.repository).get_issue(int(args[0])) + last_comment = find_last_comment( + issue, repo_config.CMSBUILD_USER, valid_types[opts.msgtype][0] + ) + if not last_comment: + print("Warning: Not comment matched") + sys.exit(1) + print(last_comment.body) + sys.exit(modify_comment(last_comment, valid_types[opts.msgtype][1], opts.message, opts.dryRun)) diff --git a/monitor_workflow.py b/monitor_workflow.py index b03c7eab9196..27a482346b49 100755 --- a/monitor_workflow.py +++ b/monitor_workflow.py @@ -6,65 +6,100 @@ from threading import Thread import subprocess -job = {'exit_code':0, 'command':'true'} -def run_job(job): job['exit_code']=subprocess.call(job['command']) +job = {"exit_code": 0, "command": "true"} + + +def run_job(job): + job["exit_code"] = subprocess.call(job["command"]) + def update_stats(proc): - stats = {"rss":0, "vms":0, "shared":0, "data":0, "uss":0, "pss":0,"num_fds":0,"num_threads":0, "processes":0, "cpu": 0} - children = proc.children(recursive=True) - clds = len(children) - if clds==0: return stats - stats['processes'] = clds - for cld in children: - try: - cld.cpu_percent(interval=None) - sleep(0.1) - stats['cpu'] += int(cld.cpu_percent(interval=None)) - stats['num_fds'] += cld.num_fds() - stats['num_threads'] += cld.num_threads() - mem = None - try: - mem = cld.memory_full_info() - for a in ["uss", "pss"]: stats[a]+=getattr(mem,a) - except: - try: mem = cld.memory_info() - except: mem = cld.memory_info_ex() - for a in ["rss", "vms", "shared", "data"]: stats[a]+=getattr(mem,a) - except: pass - return stats + stats = { + "rss": 0, + "vms": 0, + "shared": 0, + "data": 0, + "uss": 0, + "pss": 0, + "num_fds": 0, + "num_threads": 0, + "processes": 0, + "cpu": 0, + } + children = proc.children(recursive=True) + clds = len(children) + if clds == 0: + return stats + stats["processes"] = clds + for cld in children: + try: + cld.cpu_percent(interval=None) + sleep(0.1) + stats["cpu"] += int(cld.cpu_percent(interval=None)) + stats["num_fds"] += cld.num_fds() + stats["num_threads"] += cld.num_threads() + mem = None + try: + mem = cld.memory_full_info() + for a in ["uss", "pss"]: + stats[a] += getattr(mem, a) + except: + try: + mem = cld.memory_info() + except: + mem = cld.memory_info_ex() + for a in ["rss", "vms", "shared", "data"]: + stats[a] += getattr(mem, a) + except: + pass + return stats + def monitor(stop): - stime = int(time()) - p = psutil.Process(getpid()) - cmdline = " ".join(p.parent().cmdline()) - if "cmsDriver.py " in cmdline: - cmdargs=cmdline.split("cmsDriver.py ",1)[1].strip() - step=None - if cmdargs.startswith("step"): - step=cmdargs.split(" ")[0] - elif ' --fileout ' in cmdargs: - step =cmdargs.split(' --fileout ',1)[1].strip().split(" ")[0].replace("file:","").replace(".root","") - if not "step" in step: step="step1" - else: step=stime - data = [] - sleep_time = 1 - while not stop(): - try: - stats = update_stats(p) - if stats['processes']==0: break - sleep_time = 1.0-stats['processes']*0.1 - stats['time'] = int(time()-stime) - data.append(stats) - except: pass - if sleep_time>0.1: sleep(sleep_time) - from json import dump - stat_file =open("wf_stats-%s.json" % step,"w") - dump(data, stat_file) - stat_file.close() - return + stime = int(time()) + p = psutil.Process(getpid()) + cmdline = " ".join(p.parent().cmdline()) + if "cmsDriver.py " in cmdline: + cmdargs = cmdline.split("cmsDriver.py ", 1)[1].strip() + step = None + if cmdargs.startswith("step"): + step = cmdargs.split(" ")[0] + elif " --fileout " in cmdargs: + step = ( + cmdargs.split(" --fileout ", 1)[1] + .strip() + .split(" ")[0] + .replace("file:", "") + .replace(".root", "") + ) + if not "step" in step: + step = "step1" + else: + step = stime + data = [] + sleep_time = 1 + while not stop(): + try: + stats = update_stats(p) + if stats["processes"] == 0: + break + sleep_time = 1.0 - stats["processes"] * 0.1 + stats["time"] = int(time() - stime) + data.append(stats) + except: + pass + if sleep_time > 0.1: + sleep(sleep_time) + from json import dump + + stat_file = open("wf_stats-%s.json" % step, "w") + dump(data, stat_file) + stat_file.close() + return + stop_monitoring = False -job['command']=argv[1:] +job["command"] = argv[1:] job_thd = Thread(target=run_job, args=(job,)) mon_thd = Thread(target=monitor, args=(lambda: stop_monitoring,)) job_thd.start() @@ -73,5 +108,4 @@ def monitor(stop): job_thd.join() stop_monitoring = True mon_thd.join() -exit(job['exit_code']) - +exit(job["exit_code"]) diff --git a/new-release-cycle b/new-release-cycle deleted file mode 100755 index b89490cfa920..000000000000 --- a/new-release-cycle +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -from github import Github, GithubException -from os.path import expanduser, dirname, abspath, join -from optparse import OptionParser -from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, GH_CMSDIST_REPO -from sys import exit, argv -from _py2with3compatibility import run_cmd -from socket import setdefaulttimeout -from releases import CMSSW_DEVEL_BRANCH -setdefaulttimeout(120) - -# python2 compatibility -try: - input = raw_input -except NameError: - pass - -try: - scriptPath = dirname(abspath(__file__)) -except Exception as e : - scriptPath = dirname(abspath(argv[0])) - -############################################################### -def create_branch(repo, base_branch, new_branch, dryRun=False): - while True: - print("Creating new branch '%s' based on '%s'" % (new_branch, base_branch)) - res = input("OK to create this branch [Y/N/Q]: ") - if res=="Y": break - if res=="N": return - if res=="Q": exit(1) - base_ref = repo.get_branch(base_branch) - print("Base branch %s has sha %s" % (base_branch, base_ref.commit.sha)) - try: - repo.get_branch(new_branch) - print("Branch already exists: ",new_branch) - return - except GithubException as e: - if not "Branch not found" in e.data['message']: raise e - if not dryRun: - repo.create_git_ref ("refs/heads/"+new_branch, base_ref.commit.sha) - print("Created new branch ",new_branch," based on ",base_ref.commit.sha) - else: - print("DryRun: Creating new branch ",new_branch," based on ",base_ref.commit.sha) - return - -def get_milestone(repo, milestone_name): - milestones = repo.get_milestones() - for item in repo.get_milestones(): - if milestone_name in item.title: return item - return None - -def create_milestone(repo, milestone_name, dryRun=False): - print("Creating new milestone '%s'" % milestone_name) - milestone = get_milestone (repo, milestone_name) - if milestone: return milestone - if not dryRun: - milestone = repo.create_milestone (milestone_name) - print("Created milestone %s with number %s" % (milestone_name, str(milestone.number))) - else: - print("DryRun: Creating new milestone %s" % milestone_name) - return milestone - -def update_milestone(repo, source, srcMilestone, desMilestone, dryRun=False): - pulls = repo.get_pulls(base=source, state="open", sort="created", direction="asc") - for pr in pulls: - print("Wroking on PR ",pr.number,"with milestone",pr.milestone.number) - if (pr.milestone.number == desMilestone.number): - print(" Milestone already updated for PR:",pr.number) - elif pr.milestone.number == srcMilestone.number: - if not dryRun: - issue = repo.get_issue(pr.number) - issue.edit(milestone=desMilestone) - print(" Updated milestone for PR:",pr.number) - else: - print(" Invalid Source Milestone:",pr.milestone.number) - return - -def add_milestone_in_cmsbot(new_br, cmssw_brs, milestone, dryRun=False): - print("Updating milestones.py") - from releases import RELEASE_BRANCH_MILESTONE - if new_br in RELEASE_BRANCH_MILESTONE: - print("Warning: Not updating milestones.py as it already have changes for new release cycle %s" % new_br) - return - with open(join(scriptPath,"milestones.py"), "a") as relFile: - relFile.write('\n######################################################################\n') - relFile.write('# Automatically added by cms-bot for %s release cycle\n' % (new_br)) - relFile.write('######################################################################\n') - relFile.write('RELEASE_BRANCH_MILESTONE["%s"]=%s\n' % (new_br, 0 if dryRun else milestone.number)) - relFile.write('RELEASE_BRANCH_PRODUCTION.append("%s")\n' % (new_br)) - for br in cmssw_brs: - if new_br!=br: relFile.write('RELEASE_BRANCH_PRODUCTION.append("%s")\n' % (br)) - return - -def update_dev_branch(new_br, dryRun=False): - print("Updating releases.py") - err, out = run_cmd("sed -i -e 's|^ *CMSSW_DEVEL_BRANCH *=.*$|CMSSW_DEVEL_BRANCH = \"%s\"|' %s" % (new_br,join(scriptPath,"releases.py"))) - return - -def config_map_branches(new_br, dev_br, config_file): - cmssw_brs = {} - cmsdist_brs = {} - new_cyc = new_br[:-1] - dev_cyc = dev_br[:-1] - new_ibs = [] - new_config = [] - for l in [ l.strip() for l in open(config_file).readlines()]: - if "RELEASE_QUEUE="+new_cyc in l: continue - l = l.replace("RELEASE_BRANCH=master;","RELEASE_BRANCH=%s;" % dev_br) - new_config.append(l) - if ("RELEASE_BRANCH="+dev_cyc in l) and (not 'DISABLED=' in l): - cmssw_br = l.split("RELEASE_BRANCH=")[1].split(";")[0] - cmssw_brs[cmssw_br]=cmssw_br.replace(dev_cyc, new_cyc) - cmsdist_br = l.split("CMSDIST_TAG=")[1].split(";")[0] - cmsdist_brs[cmsdist_br]=cmsdist_br.replace(dev_br, new_br) - new_ibs.append(l.replace(dev_cyc, new_cyc).replace("RELEASE_BRANCH="+new_br+";","RELEASE_BRANCH=master;")) - return (cmssw_brs, cmsdist_brs, new_ibs+new_config) - -def update_config_map(new_br, dryRun=False): - print("Updating config.map") - config_file = join(scriptPath,"config.map") - e , o = run_cmd("grep 'RELEASE_QUEUE=%s;' %s | grep 'PROD_ARCH=1'" % (new_br, config_file)) - if new_br in o: - print("Warning: Not updating config.map as it already have changes for new release cycle %s" % new_br) - cmssw_brs, cmsdist_brs, new_config = config_map_branches(new_br, CMSSW_DEVEL_BRANCH, config_file) - return (cmssw_brs, cmsdist_brs) - e, dev_br = run_cmd("grep 'RELEASE_BRANCH=master;' %s | grep 'PROD_ARCH=1' | sed 's|.*RELEASE_QUEUE=||;s|;.*||' | sort -u" % config_file) - if e: - print("ERROR: unable to find current dev release") - exit(1) - if len(dev_br.split("\n"))!=1: - print("ERROR: None or more than one dev release cycles found. %s" % dev_br) - exit(1) - if dev_br != CMSSW_DEVEL_BRANCH: - print("ERROR: current dev branch '%s' found in config.map does not match the one set in release.py '%s'" % (dev_br, CMSSW_DEVEL_BRANCH)) - exit(1) - cmssw_brs, cmsdist_brs, new_config = config_map_branches(new_br, dev_br, config_file) - new_config_ref = open(config_file,"w") - for l in new_config: new_config_ref.write(l+"\n") - new_config_ref.close() - return (cmssw_brs, cmsdist_brs) - -def update_forward_port(new_br, dryRun=False): - print("Updating forward_ports_map.py") - from forward_ports_map import GIT_REPO_FWPORTS - if new_br in GIT_REPO_FWPORTS["cmssw"]: - print("Warning: Not updating forward_ports_map.py as it already have changes for new release cycle %s" % new_br) - return - fwdport_file = join(scriptPath,"forward_ports_map.py") - new_fwd = [] - e, o = run_cmd("grep GIT_REPO_FWPORTS %s | grep '%s'" % (fwdport_file, CMSSW_DEVEL_BRANCH)) - if e: - print("ERROR: Unable to find forward ports for existsing development release %s" % CMSSW_DEVEL_BRANCH) - exit (1) - new_cyc = new_br[:-1] - dev_cyc = CMSSW_DEVEL_BRANCH[:-1] - with open(fwdport_file, "a") as ref: - ref.write('\n#Automatically added\n') - for l in o.split("\n"): - ref.write('%s\n' % l.replace(dev_cyc, new_cyc)) - return - -def update_release_map(new_br, dryRun=False): - print("Updating releases.map") - relmap_file = join(scriptPath,"releases.map") - e, o = run_cmd("grep 'type=Development;state=IB;prodarch=1;' %s | grep 'label=%s;'" % (relmap_file, new_br)) - if new_br in o: - print("Warning: Not updating releases.map as it already have changes for new release cycle %s" % new_br) - return - e, map_line = run_cmd("grep 'type=Development;state=IB;prodarch=1;' %s | grep 'label=%s;'" % (relmap_file, CMSSW_DEVEL_BRANCH)) - if e: - print("ERROR: Unable to find current development release '%s' in releases.map" % CMSSW_DEVEL_BRANCH) - exit(1) - if len(map_line.split("\n"))>1: - print("ERROR: Found multiple entrie for '%s' in releases.map" % CMSSW_DEVEL_BRANCH) - print(map_line) - exit(1) - run_cmd("echo '%s' > %s.new" % (map_line.replace('label=%s;' % CMSSW_DEVEL_BRANCH, 'label=%s;' % new_br),relmap_file)) - run_cmd("cat %s >> %s.new" % (relmap_file,relmap_file)) - run_cmd("mv %s.new %s" % (relmap_file,relmap_file)) - -def process(cycle, dryRun): - gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) - cmssw_repo = gh.get_repo(GH_CMSSW_ORGANIZATION+"/"+GH_CMSSW_REPO) - srcMileStone = get_milestone(cmssw_repo, CMSSW_DEVEL_BRANCH) - - if not srcMileStone: - print("ERROR: Unable to get milestone for %s" % CMSSW_DEVEL_BRANCH) - exit(1) - - #make sure that existing dev IB use the dev branch instead of master branch - cmssw_brs, cmsdist_brs = update_config_map(cycle, dryRun) - - #update forward port map - update_forward_port(cycle, dryRun) - - #update forward port map - update_release_map(cycle, dryRun) - - #Create milestone - desMileStone = create_milestone(cmssw_repo, cycle, dryRun) - - #Add milestone on - add_milestone_in_cmsbot (cycle, list(cmssw_brs.values()), desMileStone, dryRun) - - #Add devel branch - update_dev_branch(cycle, dryRun) - - #Create cmssw branches - create_branch (cmssw_repo, "master", cycle, dryRun) - - #Update milestone for existing Open PRs - if dryRun: desMileStone = srcMileStone - update_milestone(cmssw_repo, "master", srcMileStone, desMileStone, dryRun) - - #create cmssw branches - for dev_br in list(cmssw_brs.keys()): - new_br = cmssw_brs[dev_br] - if new_br==cycle: continue - create_branch (cmssw_repo, dev_br, new_br, dryRun) - - #create cmsdist branches - cmsdist_repo = gh.get_repo(GH_CMSSW_ORGANIZATION+"/"+GH_CMSDIST_REPO) - for dev_br in list(cmsdist_brs.keys()): - new_br = cmsdist_brs[dev_br] - create_branch (cmsdist_repo, dev_br, new_br, dryRun) - - err, out = run_cmd("cd %s; git diff origin" % scriptPath) - print("GIT DIFF:\n",out) - print("\nIf the changes above looks good then please commit and push these to github") - return True - -############################################################### -if __name__ == "__main__": - parser = OptionParser(usage="%prog ") - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) - parser.add_option("-c", "--cycle", dest="cycle", help="Release cycle name e.g CMSSW_10_1_X", type=str, default='None') - parser.add_option("-o", "--old-cycle", dest="old_cycle", help="Existing development release cycle e.g CMSSW_10_0_X. Default is "+CMSSW_DEVEL_BRANCH+" obtained from releases.py", type=str, default=CMSSW_DEVEL_BRANCH) - opts, args = parser.parse_args() - - if len(args) > 0: parser.error("Too many arguments") - if not opts.cycle or not opts.cycle.endswith("_X"): parser.error("Invalid cycle '"+str(opts.cycle)+"' it must end with _X") - if opts.old_cycle != CMSSW_DEVEL_BRANCH: CMSSW_DEVEL_BRANCH=opts.old_cycle - process (opts.cycle, opts.dryRun) diff --git a/new-release-cycle b/new-release-cycle new file mode 120000 index 000000000000..4a6380778761 --- /dev/null +++ b/new-release-cycle @@ -0,0 +1 @@ +new-release-cycle.py \ No newline at end of file diff --git a/new-release-cycle.py b/new-release-cycle.py new file mode 100755 index 000000000000..a9828641004d --- /dev/null +++ b/new-release-cycle.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python +from __future__ import print_function +from github import Github, GithubException +from os.path import expanduser, dirname, abspath, join +from optparse import OptionParser +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, GH_CMSDIST_REPO +from sys import exit, argv +from _py2with3compatibility import run_cmd +from socket import setdefaulttimeout +from releases import CMSSW_DEVEL_BRANCH + +setdefaulttimeout(120) + +# python2 compatibility +try: + input = raw_input +except NameError: + pass + +try: + scriptPath = dirname(abspath(__file__)) +except Exception as e: + scriptPath = dirname(abspath(argv[0])) + + +############################################################### +def create_branch(repo, base_branch, new_branch, dryRun=False): + while True: + print("Creating new branch '%s' based on '%s'" % (new_branch, base_branch)) + res = input("OK to create this branch [Y/N/Q]: ") + if res == "Y": + break + if res == "N": + return + if res == "Q": + exit(1) + base_ref = repo.get_branch(base_branch) + print("Base branch %s has sha %s" % (base_branch, base_ref.commit.sha)) + try: + repo.get_branch(new_branch) + print("Branch already exists: ", new_branch) + return + except GithubException as e: + if not "Branch not found" in e.data["message"]: + raise e + if not dryRun: + repo.create_git_ref("refs/heads/" + new_branch, base_ref.commit.sha) + print("Created new branch ", new_branch, " based on ", base_ref.commit.sha) + else: + print("DryRun: Creating new branch ", new_branch, " based on ", base_ref.commit.sha) + return + + +def get_milestone(repo, milestone_name): + milestones = repo.get_milestones() + for item in repo.get_milestones(): + if milestone_name in item.title: + return item + return None + + +def create_milestone(repo, milestone_name, dryRun=False): + print("Creating new milestone '%s'" % milestone_name) + milestone = get_milestone(repo, milestone_name) + if milestone: + return milestone + if not dryRun: + milestone = repo.create_milestone(milestone_name) + print("Created milestone %s with number %s" % (milestone_name, str(milestone.number))) + else: + print("DryRun: Creating new milestone %s" % milestone_name) + return milestone + + +def update_milestone(repo, source, srcMilestone, desMilestone, dryRun=False): + pulls = repo.get_pulls(base=source, state="open", sort="created", direction="asc") + for pr in pulls: + print("Wroking on PR ", pr.number, "with milestone", pr.milestone.number) + if pr.milestone.number == desMilestone.number: + print(" Milestone already updated for PR:", pr.number) + elif pr.milestone.number == srcMilestone.number: + if not dryRun: + issue = repo.get_issue(pr.number) + issue.edit(milestone=desMilestone) + print(" Updated milestone for PR:", pr.number) + else: + print(" Invalid Source Milestone:", pr.milestone.number) + return + + +def add_milestone_in_cmsbot(new_br, cmssw_brs, milestone, dryRun=False): + print("Updating milestones.py") + from releases import RELEASE_BRANCH_MILESTONE + + if new_br in RELEASE_BRANCH_MILESTONE: + print( + "Warning: Not updating milestones.py as it already have changes for new release cycle %s" + % new_br + ) + return + with open(join(scriptPath, "milestones.py"), "a") as relFile: + relFile.write("\n######################################################################\n") + relFile.write("# Automatically added by cms-bot for %s release cycle\n" % (new_br)) + relFile.write("######################################################################\n") + relFile.write( + 'RELEASE_BRANCH_MILESTONE["%s"]=%s\n' % (new_br, 0 if dryRun else milestone.number) + ) + relFile.write('RELEASE_BRANCH_PRODUCTION.append("%s")\n' % (new_br)) + for br in cmssw_brs: + if new_br != br: + relFile.write('RELEASE_BRANCH_PRODUCTION.append("%s")\n' % (br)) + return + + +def update_dev_branch(new_br, dryRun=False): + print("Updating releases.py") + err, out = run_cmd( + "sed -i -e 's|^ *CMSSW_DEVEL_BRANCH *=.*$|CMSSW_DEVEL_BRANCH = \"%s\"|' %s" + % (new_br, join(scriptPath, "releases.py")) + ) + return + + +def config_map_branches(new_br, dev_br, config_file): + cmssw_brs = {} + cmsdist_brs = {} + new_cyc = new_br[:-1] + dev_cyc = dev_br[:-1] + new_ibs = [] + new_config = [] + for l in [l.strip() for l in open(config_file).readlines()]: + if "RELEASE_QUEUE=" + new_cyc in l: + continue + l = l.replace("RELEASE_BRANCH=master;", "RELEASE_BRANCH=%s;" % dev_br) + new_config.append(l) + if ("RELEASE_BRANCH=" + dev_cyc in l) and (not "DISABLED=" in l): + cmssw_br = l.split("RELEASE_BRANCH=")[1].split(";")[0] + cmssw_brs[cmssw_br] = cmssw_br.replace(dev_cyc, new_cyc) + cmsdist_br = l.split("CMSDIST_TAG=")[1].split(";")[0] + cmsdist_brs[cmsdist_br] = cmsdist_br.replace(dev_br, new_br) + new_ibs.append( + l.replace(dev_cyc, new_cyc).replace( + "RELEASE_BRANCH=" + new_br + ";", "RELEASE_BRANCH=master;" + ) + ) + return (cmssw_brs, cmsdist_brs, new_ibs + new_config) + + +def update_config_map(new_br, dryRun=False): + print("Updating config.map") + config_file = join(scriptPath, "config.map") + e, o = run_cmd("grep 'RELEASE_QUEUE=%s;' %s | grep 'PROD_ARCH=1'" % (new_br, config_file)) + if new_br in o: + print( + "Warning: Not updating config.map as it already have changes for new release cycle %s" + % new_br + ) + cmssw_brs, cmsdist_brs, new_config = config_map_branches( + new_br, CMSSW_DEVEL_BRANCH, config_file + ) + return (cmssw_brs, cmsdist_brs) + e, dev_br = run_cmd( + "grep 'RELEASE_BRANCH=master;' %s | grep 'PROD_ARCH=1' | sed 's|.*RELEASE_QUEUE=||;s|;.*||' | sort -u" + % config_file + ) + if e: + print("ERROR: unable to find current dev release") + exit(1) + if len(dev_br.split("\n")) != 1: + print("ERROR: None or more than one dev release cycles found. %s" % dev_br) + exit(1) + if dev_br != CMSSW_DEVEL_BRANCH: + print( + "ERROR: current dev branch '%s' found in config.map does not match the one set in release.py '%s'" + % (dev_br, CMSSW_DEVEL_BRANCH) + ) + exit(1) + cmssw_brs, cmsdist_brs, new_config = config_map_branches(new_br, dev_br, config_file) + new_config_ref = open(config_file, "w") + for l in new_config: + new_config_ref.write(l + "\n") + new_config_ref.close() + return (cmssw_brs, cmsdist_brs) + + +def update_forward_port(new_br, dryRun=False): + print("Updating forward_ports_map.py") + from forward_ports_map import GIT_REPO_FWPORTS + + if new_br in GIT_REPO_FWPORTS["cmssw"]: + print( + "Warning: Not updating forward_ports_map.py as it already have changes for new release cycle %s" + % new_br + ) + return + fwdport_file = join(scriptPath, "forward_ports_map.py") + new_fwd = [] + e, o = run_cmd("grep GIT_REPO_FWPORTS %s | grep '%s'" % (fwdport_file, CMSSW_DEVEL_BRANCH)) + if e: + print( + "ERROR: Unable to find forward ports for existsing development release %s" + % CMSSW_DEVEL_BRANCH + ) + exit(1) + new_cyc = new_br[:-1] + dev_cyc = CMSSW_DEVEL_BRANCH[:-1] + with open(fwdport_file, "a") as ref: + ref.write("\n#Automatically added\n") + for l in o.split("\n"): + ref.write("%s\n" % l.replace(dev_cyc, new_cyc)) + return + + +def update_release_map(new_br, dryRun=False): + print("Updating releases.map") + relmap_file = join(scriptPath, "releases.map") + e, o = run_cmd( + "grep 'type=Development;state=IB;prodarch=1;' %s | grep 'label=%s;'" + % (relmap_file, new_br) + ) + if new_br in o: + print( + "Warning: Not updating releases.map as it already have changes for new release cycle %s" + % new_br + ) + return + e, map_line = run_cmd( + "grep 'type=Development;state=IB;prodarch=1;' %s | grep 'label=%s;'" + % (relmap_file, CMSSW_DEVEL_BRANCH) + ) + if e: + print( + "ERROR: Unable to find current development release '%s' in releases.map" + % CMSSW_DEVEL_BRANCH + ) + exit(1) + if len(map_line.split("\n")) > 1: + print("ERROR: Found multiple entrie for '%s' in releases.map" % CMSSW_DEVEL_BRANCH) + print(map_line) + exit(1) + run_cmd( + "echo '%s' > %s.new" + % (map_line.replace("label=%s;" % CMSSW_DEVEL_BRANCH, "label=%s;" % new_br), relmap_file) + ) + run_cmd("cat %s >> %s.new" % (relmap_file, relmap_file)) + run_cmd("mv %s.new %s" % (relmap_file, relmap_file)) + + +def process(cycle, dryRun): + gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) + cmssw_repo = gh.get_repo(GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO) + srcMileStone = get_milestone(cmssw_repo, CMSSW_DEVEL_BRANCH) + + if not srcMileStone: + print("ERROR: Unable to get milestone for %s" % CMSSW_DEVEL_BRANCH) + exit(1) + + # make sure that existing dev IB use the dev branch instead of master branch + cmssw_brs, cmsdist_brs = update_config_map(cycle, dryRun) + + # update forward port map + update_forward_port(cycle, dryRun) + + # update forward port map + update_release_map(cycle, dryRun) + + # Create milestone + desMileStone = create_milestone(cmssw_repo, cycle, dryRun) + + # Add milestone on + add_milestone_in_cmsbot(cycle, list(cmssw_brs.values()), desMileStone, dryRun) + + # Add devel branch + update_dev_branch(cycle, dryRun) + + # Create cmssw branches + create_branch(cmssw_repo, "master", cycle, dryRun) + + # Update milestone for existing Open PRs + if dryRun: + desMileStone = srcMileStone + update_milestone(cmssw_repo, "master", srcMileStone, desMileStone, dryRun) + + # create cmssw branches + for dev_br in list(cmssw_brs.keys()): + new_br = cmssw_brs[dev_br] + if new_br == cycle: + continue + create_branch(cmssw_repo, dev_br, new_br, dryRun) + + # create cmsdist branches + cmsdist_repo = gh.get_repo(GH_CMSSW_ORGANIZATION + "/" + GH_CMSDIST_REPO) + for dev_br in list(cmsdist_brs.keys()): + new_br = cmsdist_brs[dev_br] + create_branch(cmsdist_repo, dev_br, new_br, dryRun) + + err, out = run_cmd("cd %s; git diff origin" % scriptPath) + print("GIT DIFF:\n", out) + print("\nIf the changes above looks good then please commit and push these to github") + return True + + +############################################################### +if __name__ == "__main__": + parser = OptionParser(usage="%prog ") + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, + ) + parser.add_option( + "-c", + "--cycle", + dest="cycle", + help="Release cycle name e.g CMSSW_10_1_X", + type=str, + default="None", + ) + parser.add_option( + "-o", + "--old-cycle", + dest="old_cycle", + help="Existing development release cycle e.g CMSSW_10_0_X. Default is " + + CMSSW_DEVEL_BRANCH + + " obtained from releases.py", + type=str, + default=CMSSW_DEVEL_BRANCH, + ) + opts, args = parser.parse_args() + + if len(args) > 0: + parser.error("Too many arguments") + if not opts.cycle or not opts.cycle.endswith("_X"): + parser.error("Invalid cycle '" + str(opts.cycle) + "' it must end with _X") + if opts.old_cycle != CMSSW_DEVEL_BRANCH: + CMSSW_DEVEL_BRANCH = opts.old_cycle + process(opts.cycle, opts.dryRun) diff --git a/package2category.py b/package2category.py index f1702d153c9e..1200f503a02d 100755 --- a/package2category.py +++ b/package2category.py @@ -3,14 +3,19 @@ from categories_map import CMSSW_CATEGORIES import sys + def package2category(filename): - if not filename: return - file_pack = '/'.join(filename.split('/')[:2]) - cat = 'unknown' - if file_pack in pack2cat: cat = '-'.join(sorted(pack2cat[file_pack])) - if not cat in cats: cats[cat] = {} + if not filename: + return + file_pack = "/".join(filename.split("/")[:2]) + cat = "unknown" + if file_pack in pack2cat: + cat = "-".join(sorted(pack2cat[file_pack])) + if not cat in cats: + cats[cat] = {} cats[cat][file_pack] = 1 + pack2cat = {} for cat in CMSSW_CATEGORIES: for pack in CMSSW_CATEGORIES[cat]: @@ -26,4 +31,4 @@ def package2category(filename): package2category(line.strip()) for cat in cats: - print ("%s %s" % (cat, " ".join(cats[cat].keys()))) + print("%s %s" % (cat, " ".join(cats[cat].keys()))) diff --git a/parse_iwyu_logs.py b/parse_iwyu_logs.py index dc09eb457999..7d3103938a38 100755 --- a/parse_iwyu_logs.py +++ b/parse_iwyu_logs.py @@ -1,14 +1,16 @@ #!/bin/env python from __future__ import print_function -import sys , json -fd=open(sys.argv[1],'r') +import sys, json + +fd = open(sys.argv[1], "r") info = {} -includes=0 -excludes=0 -pkg_name = '/'.join(sys.argv[1].split('/')[-3:-1]) -files=0 -splitline = sys.argv[2] + '/src/' -print(""" +includes = 0 +excludes = 0 +pkg_name = "/".join(sys.argv[1].split("/")[-3:-1]) +files = 0 +splitline = sys.argv[2] + "/src/" +print( + """ -""") -print('' + 'Access BuildLog' + '
') +""" +) +print("" + "Access BuildLog" + "
") print('') lines_seen = set() for l in fd: - if 'remove these lines' in l and l not in lines_seen: - lines_seen.add(l) - sec=iter(fd) - line=next(sec) - line=line.rstrip() - if len(line): - files += 1 - items = l.split(splitline)[-1].split(" ",1) - print('') - - elif 'add these lines' in l and l not in lines_seen: - lines_seen.add(l) - sec=iter(fd) - line=next(sec) - line=line.rstrip() - if len(line): - files += 1 - items = l.split(splitline)[-1].split(" ",1) - print('') -print('
' + items[0] + ' '+items[1]+'') - while len(line): - excludes +=1 - line=line.replace('<','<') - line=line.replace('>','>') - print('
'+line) - line=next(sec) - line=line.rstrip() - print('
' + items[0] + ' '+items[1]+'') - while len(line): - includes += 1 - line=line.replace('<','<') - line=line.replace('>','>') - print('
'+line) - line=next(sec) - line=line.rstrip() - print('
') -stat = [ files , includes , excludes ] + if "remove these lines" in l and l not in lines_seen: + lines_seen.add(l) + sec = iter(fd) + line = next(sec) + line = line.rstrip() + if len(line): + files += 1 + items = l.split(splitline)[-1].split(" ", 1) + print( + '" + + items[0] + + " " + + items[1] + + "" + ) + while len(line): + excludes += 1 + line = line.replace("<", "<") + line = line.replace(">", ">") + print("
" + line) + line = next(sec) + line = line.rstrip() + print("") + + elif "add these lines" in l and l not in lines_seen: + lines_seen.add(l) + sec = iter(fd) + line = next(sec) + line = line.rstrip() + if len(line): + files += 1 + items = l.split(splitline)[-1].split(" ", 1) + print( + '" + + items[0] + + " " + + items[1] + + "" + ) + while len(line): + includes += 1 + line = line.replace("<", "<") + line = line.replace(">", ">") + print("
" + line) + line = next(sec) + line = line.rstrip() + print("") +print("") +stat = [files, includes, excludes] info[pkg_name] = stat -output_file = open('stats.json', 'a') +output_file = open("stats.json", "a") output_file.write(json.dumps(info)) output_file.close() diff --git a/parse_jenkins_builds.py b/parse_jenkins_builds.py index 31eed14bc51d..d8c0a49afab7 100755 --- a/parse_jenkins_builds.py +++ b/parse_jenkins_builds.py @@ -1,69 +1,90 @@ #!/usr/bin/env python3 from __future__ import print_function from hashlib import sha1 -import os , re , sys , json, datetime, time, functools +import os, re, sys, json, datetime, time, functools import xml.etree.ElementTree as ET import subprocess -from es_utils import send_payload,get_payload,resend_payload,get_payload_wscroll +from es_utils import send_payload, get_payload, resend_payload, get_payload_wscroll + +JENKINS_PREFIX = "jenkins" +try: + JENKINS_PREFIX = os.environ["JENKINS_URL"].strip("/").split("/")[-1] +except: + JENKINS_PREFIX = "jenkins" +LOCAL_JENKINS_URL = os.environ["LOCAL_JENKINS_URL"] -JENKINS_PREFIX="jenkins" -try: JENKINS_PREFIX=os.environ['JENKINS_URL'].strip("/").split("/")[-1] -except: JENKINS_PREFIX="jenkins" -LOCAL_JENKINS_URL = os.environ['LOCAL_JENKINS_URL'] def findParametersAction(root): - if root.tag=='parameters': return root - for x in root: - p=findParametersAction(x) - if p is not None: return p - return None + if root.tag == "parameters": + return root + for x in root: + p = findParametersAction(x) + if p is not None: + return p + return None + def getParameters(root, payload): - n=root.find('name') - if n is not None: - if n.text is None: return - v=root.find('value') - vv = "None" - if v is not None: vv = str(v.text) - payload['parameter_'+n.text]=vv - else: - for x in root: getParameters(x, payload) + n = root.find("name") + if n is not None: + if n.text is None: + return + v = root.find("value") + vv = "None" + if v is not None: + vv = str(v.text) + payload["parameter_" + n.text] = vv + else: + for x in root: + getParameters(x, payload) + def get_current_time(): - """Returns current time in milliseconds. """ + """Returns current time in milliseconds.""" current_time = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1) - current_time = round(current_time.total_seconds()*1000) + current_time = round(current_time.total_seconds() * 1000) return current_time + def display_build_info(build_id, build_payload): - """Display id, job name, build number and waiting time for a conrete build in queue. """ - print("==> ", str(build_id) + " " + str(build_payload["job_name"]) + " #" + str(build_payload["queue_id"])) - wait_time = build_payload["wait_time"]/1000 - print("Time in queue (minutes): ", str(wait_time/60)) + """Display id, job name, build number and waiting time for a conrete build in queue.""" + print( + "==> ", + str(build_id) + + " " + + str(build_payload["job_name"]) + + " #" + + str(build_payload["queue_id"]), + ) + wait_time = build_payload["wait_time"] / 1000 + print("Time in queue (minutes): ", str(wait_time / 60)) + def update_payload_timestamp(build_id, queue): - """Updates timestamp for a given payload. """ + """Updates timestamp for a given payload.""" id = build_id payload = queue[id] current_time = get_current_time() - payload['@timestamp'] = current_time + payload["@timestamp"] = current_time return id, payload + def process_queue_reason(labels): if "already in progress" in labels: reason = "concurrent builds not allowed" elif "Waiting for next available executor on" in labels: - node = labels.split(" on ")[1].encode('ascii', errors='ignore').decode("ascii", "ignore") + node = labels.split(" on ")[1].encode("ascii", errors="ignore").decode("ascii", "ignore") reason = node + "-busy" elif "is offline;" in labels: reason = "multiple-offline" elif "is offline" in labels: - node = labels.split(" is ")[0].encode('ascii', errors='ignore').decode("ascii", "ignore") + node = labels.split(" is ")[0].encode("ascii", errors="ignore").decode("ascii", "ignore") reason = node + "-offline" else: reason = "other" return reason + def grep(filename, pattern, verbose=False): """Bash-like grep function. Set verbose=True to print the line match.""" if not os.path.exists(filename): @@ -76,18 +97,25 @@ def grep(filename, pattern, verbose=False): else: return True -query_running_builds = """{ + +query_running_builds = ( + """{ "query": {"bool": {"must": {"query_string": {"query": "job_status:Running AND jenkins_server:%s", "default_operator": "AND"}}}}, "from": 0, "size": 10000 -}""" % JENKINS_PREFIX +}""" + % JENKINS_PREFIX +) # Query job with in_queue=1 -query_inqueue1 = """{ +query_inqueue1 = ( + """{ "query": {"bool": {"must": {"query_string": {"query": "in_queue: 1 AND start_time: 0 AND jenkins_server: %s", "default_operator": "AND"}}}}, "from": 0, "size": 10000 -}""" % JENKINS_PREFIX +}""" + % JENKINS_PREFIX +) # Query jobs with in_queue=0 query_inqueue0 = """{ @@ -97,7 +125,7 @@ def grep(filename, pattern, verbose=False): }""" # Get jobs in queue from elastic search -queue_index = 'cmssdt-jenkins-queue*' +queue_index = "cmssdt-jenkins-queue*" try: elements_inqueue = get_payload_wscroll(queue_index, query_inqueue1) except ValueError: @@ -106,15 +134,19 @@ def grep(filename, pattern, verbose=False): es_queue = dict() es_indexes = dict() if elements_inqueue: - if (not 'hits' in elements_inqueue) or (not 'hits' in elements_inqueue['hits']): - print("ERROR: ", elements_inqueue) - for entry in elements_inqueue['hits']['hits']: - es_indexes[entry['_id']] = entry['_index'] - es_queue[entry['_id']] = entry['_source'] + if (not "hits" in elements_inqueue) or (not "hits" in elements_inqueue["hits"]): + print("ERROR: ", elements_inqueue) + for entry in elements_inqueue["hits"]["hits"]: + es_indexes[entry["_id"]] = entry["_index"] + es_queue[entry["_id"]] = entry["_source"] # Get jenkins queue and construct payload to be send to elastic search -que_cmd='curl -s -H "OIDC_CLAIM_CERN_UPN: cmssdt; charset=UTF-8" "' + LOCAL_JENKINS_URL + '/queue/api/json?pretty=true"' -jque_res = subprocess.run(que_cmd,shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) +que_cmd = ( + 'curl -s -H "OIDC_CLAIM_CERN_UPN: cmssdt; charset=UTF-8" "' + + LOCAL_JENKINS_URL + + '/queue/api/json?pretty=true"' +) +jque_res = subprocess.run(que_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) queue_json = json.loads(jque_res.stdout) jenkins_queue = dict() @@ -128,7 +160,7 @@ def grep(filename, pattern, verbose=False): labels = element["why"].encode("ascii", "ignore").decode("ascii", "ignore") reason = process_queue_reason(labels) - payload['jenkins_server'] = JENKINS_PREFIX + payload["jenkins_server"] = JENKINS_PREFIX payload["in_queue_since"] = queue_time payload["queue_id"] = queue_id payload["job_name"] = job_name @@ -137,11 +169,13 @@ def grep(filename, pattern, verbose=False): payload["wait_time"] = current_time - queue_time payload["start_time"] = 0 - unique_id = JENKINS_PREFIX + ":/build/builds/" + job_name + "/" + str(queue_id) # Not a real path + unique_id = ( + JENKINS_PREFIX + ":/build/builds/" + job_name + "/" + str(queue_id) + ) # Not a real path id = sha1(unique_id.encode()).hexdigest() jenkins_queue[id] = payload -queue_index="cmssdt-jenkins-queue-"+str(int(((current_time/86400000)+4)/7)) +queue_index = "cmssdt-jenkins-queue-" + str(int(((current_time / 86400000) + 4) / 7)) queue_document = "queue-data" # Update information in elastic search @@ -150,7 +184,7 @@ def grep(filename, pattern, verbose=False): for build_id in new_inqueue: id, payload = update_payload_timestamp(build_id, jenkins_queue) display_build_info(id, payload) - send_payload(queue_index,queue_document,id,json.dumps(payload)) + send_payload(queue_index, queue_document, id, json.dumps(payload)) still_inqueue = [x for x in jenkins_queue.keys() if x in es_queue.keys()] print("[INFO] Updating waiting time for build that are still in queue ...") @@ -158,15 +192,20 @@ def grep(filename, pattern, verbose=False): id, payload = update_payload_timestamp(build_id, jenkins_queue) payload["wait_time"] = current_time - payload["in_queue_since"] display_build_info(id, payload) - send_payload(es_indexes[id],queue_document,id,json.dumps(payload)) + send_payload(es_indexes[id], queue_document, id, json.dumps(payload)) no_inqueue = [str(y) for y in es_queue.keys() if y not in jenkins_queue.keys()] print("[INFO] Updating builds that are no longer in queue ...") for build_id in no_inqueue: id, payload = update_payload_timestamp(build_id, es_queue) - payload['in_queue'] = 0 - print("==> Cleaning up ",es_indexes[id],"/", str(id) + " " + str(payload["job_name"]) + " #" + str(payload["queue_id"])) - send_payload(es_indexes[id],queue_document,id,json.dumps(payload)) + payload["in_queue"] = 0 + print( + "==> Cleaning up ", + es_indexes[id], + "/", + str(id) + " " + str(payload["job_name"]) + " #" + str(payload["queue_id"]), + ) + send_payload(es_indexes[id], queue_document, id, json.dumps(payload)) time.sleep(10) @@ -174,71 +213,88 @@ def grep(filename, pattern, verbose=False): queue_content_hash = get_payload_wscroll("cmssdt-jenkins-queue*", query_inqueue0) es_queue = dict() es_indexes = dict() -for entry in queue_content_hash['hits']['hits']: - if not 'queue_id' in entry['_source']: continue - queue_id = entry['_source']['queue_id'] - entry['_source']['queue_hash'] = entry['_id'] - es_indexes[queue_id] = entry['_index'] - es_queue[queue_id] = entry['_source'] +for entry in queue_content_hash["hits"]["hits"]: + if not "queue_id" in entry["_source"]: + continue + queue_id = entry["_source"]["queue_id"] + entry["_source"]["queue_hash"] = entry["_id"] + es_indexes[queue_id] = entry["_index"] + es_queue[queue_id] = entry["_source"] print("[INFO] Checking status of running/finished builds ...") all_local = [] -path = '/build/builds' +path = "/build/builds" document = "builds-data" rematch = re.compile(".*/\d+$") for root, dirs, files in os.walk(path): - if rematch.match(root): - logFile = root + '/build.xml' - flagFile = root + '/check.done' - if os.path.exists(logFile) and not os.path.exists(flagFile): - payload = {} - job_info = root.split('/') - payload['job_name'] = '/'.join(job_info[3:-1]) - payload['build_number'] = job_info[-1] - payload['url'] = "https://cmssdt.cern.ch/"+JENKINS_PREFIX+"/job/" + '/job/'.join(job_info[3:-1]) + "/" + job_info[-1] + "/" - id = sha1((JENKINS_PREFIX+":"+root).encode()).hexdigest() - try: - tree = ET.parse(logFile) - root = tree.getroot() - pa=findParametersAction(root) - if pa is not None: getParameters(pa, payload) - jstime = root.find('startTime').text - payload['@timestamp'] = int(jstime) - try: - payload['slave_node'] = root.find('builtOn').text - except: - payload['slave_node'] = 'unknown' - try: - payload['queue_id'] = root.find('queueId').text - except: - payload['queue_id'] = 'unknown' - payload['jenkins_server'] = JENKINS_PREFIX - build_result = root.find('result') - if build_result is not None: - payload['build_result'] = build_result.text - payload['build_duration'] = int(int(root.find('duration').text)/1000) - payload['job_status'] = 'Finished' - os.system('touch "' + flagFile + '"') - else: - payload['job_status'] = 'Running' - - # Check if job has been in queue, and update queue waiting time - queue_id = int(payload['queue_id']) - if queue_id in es_queue.keys(): - queue_payload = es_queue[queue_id] - queue_payload['start_time'] = int(jstime) # start time in millisec - queue_payload['wait_time'] = int(jstime) - queue_payload["in_queue_since"] - queue_payload['build_number'] = payload['build_number'] - - print("==> Sending payload for ", queue_payload['queue_hash']) - send_payload(es_indexes[queue_id], queue_document, queue_payload['queue_hash'], json.dumps(queue_payload)) - - all_local.append(id) - weekindex="jenkins-jobs-"+str(int((((int(jstime)/1000)/86400)+4)/7)) - print("==>",id,payload['job_name'],payload['build_number'],payload['job_status']) - send_payload(weekindex,document,id,json.dumps(payload)) - except Exception as e: - print("Xml parsing error",logFile , e) + if rematch.match(root): + logFile = root + "/build.xml" + flagFile = root + "/check.done" + if os.path.exists(logFile) and not os.path.exists(flagFile): + payload = {} + job_info = root.split("/") + payload["job_name"] = "/".join(job_info[3:-1]) + payload["build_number"] = job_info[-1] + payload["url"] = ( + "https://cmssdt.cern.ch/" + + JENKINS_PREFIX + + "/job/" + + "/job/".join(job_info[3:-1]) + + "/" + + job_info[-1] + + "/" + ) + id = sha1((JENKINS_PREFIX + ":" + root).encode()).hexdigest() + try: + tree = ET.parse(logFile) + root = tree.getroot() + pa = findParametersAction(root) + if pa is not None: + getParameters(pa, payload) + jstime = root.find("startTime").text + payload["@timestamp"] = int(jstime) + try: + payload["slave_node"] = root.find("builtOn").text + except: + payload["slave_node"] = "unknown" + try: + payload["queue_id"] = root.find("queueId").text + except: + payload["queue_id"] = "unknown" + payload["jenkins_server"] = JENKINS_PREFIX + build_result = root.find("result") + if build_result is not None: + payload["build_result"] = build_result.text + payload["build_duration"] = int(int(root.find("duration").text) / 1000) + payload["job_status"] = "Finished" + os.system('touch "' + flagFile + '"') + else: + payload["job_status"] = "Running" + + # Check if job has been in queue, and update queue waiting time + queue_id = int(payload["queue_id"]) + if queue_id in es_queue.keys(): + queue_payload = es_queue[queue_id] + queue_payload["start_time"] = int(jstime) # start time in millisec + queue_payload["wait_time"] = int(jstime) - queue_payload["in_queue_since"] + queue_payload["build_number"] = payload["build_number"] + + print("==> Sending payload for ", queue_payload["queue_hash"]) + send_payload( + es_indexes[queue_id], + queue_document, + queue_payload["queue_hash"], + json.dumps(queue_payload), + ) + + all_local.append(id) + weekindex = "jenkins-jobs-" + str(int((((int(jstime) / 1000) / 86400) + 4) / 7)) + print( + "==>", id, payload["job_name"], payload["build_number"], payload["job_status"] + ) + send_payload(weekindex, document, id, json.dumps(payload)) + except Exception as e: + print("Xml parsing error", logFile, e) # Check remaining elements in the queue (to catch jobs that enter the queue and finish on the same iter) print("[INFO] Checking remaining elements in queue ...") @@ -252,32 +308,53 @@ def grep(filename, pattern, verbose=False): queue_id = grep(file_path, str(es_queue[entry]["queue_id"]), True) if queue_id != None: queue_id.replace("", "").replace("", "").replace("\n", "") - jstime = grep(file_path, str(""), True).replace("", "").replace("", "").replace("\n", "") + jstime = ( + grep(file_path, str(""), True) + .replace("", "") + .replace("", "") + .replace("\n", "") + ) es_queue[entry]["start_time"] = int(jstime) es_queue[entry]["wait_time"] = int(jstime) - es_queue[entry]["in_queue_since"] - print("==> Sending payload for ", es_queue[entry]['queue_hash']) - send_payload(es_indexes[entry], queue_document, es_queue[entry]['queue_hash'], json.dumps(es_queue[entry])) + print("==> Sending payload for ", es_queue[entry]["queue_hash"]) + send_payload( + es_indexes[entry], + queue_document, + es_queue[entry]["queue_hash"], + json.dumps(es_queue[entry]), + ) -running_builds_elastic={} -content_hash = get_payload_wscroll('jenkins-*',query_running_builds) +running_builds_elastic = {} +content_hash = get_payload_wscroll("jenkins-*", query_running_builds) if not content_hash: - running_builds_elastic = {} + running_builds_elastic = {} else: - if (not 'hits' in content_hash) or (not 'hits' in content_hash['hits']): - print("ERROR: ",content_hash) - sys.exit(1) - print("Found:", len(content_hash['hits']['hits'])) - for hit in content_hash['hits']['hits']: - if hit["_index"].startswith("cmssdt-jenkins-jobs-"): - if not "jenkins_server" in hit["_source"]: hit["_source"]["jenkins_server"] = JENKINS_PREFIX - if hit["_source"]["jenkins_server"]!=JENKINS_PREFIX: continue - try:print("Running:",hit["_source"]["jenkins_server"],":",hit["_source"]['job_name'],hit["_source"]['build_number'],hit["_index"],hit['_id']) - except Exception as e: print("Error:", e) - running_builds_elastic[hit['_id']]=hit + if (not "hits" in content_hash) or (not "hits" in content_hash["hits"]): + print("ERROR: ", content_hash) + sys.exit(1) + print("Found:", len(content_hash["hits"]["hits"])) + for hit in content_hash["hits"]["hits"]: + if hit["_index"].startswith("cmssdt-jenkins-jobs-"): + if not "jenkins_server" in hit["_source"]: + hit["_source"]["jenkins_server"] = JENKINS_PREFIX + if hit["_source"]["jenkins_server"] != JENKINS_PREFIX: + continue + try: + print( + "Running:", + hit["_source"]["jenkins_server"], + ":", + hit["_source"]["job_name"], + hit["_source"]["build_number"], + hit["_index"], + hit["_id"], + ) + except Exception as e: + print("Error:", e) + running_builds_elastic[hit["_id"]] = hit for build in running_builds_elastic: - if build not in all_local: - hit = running_builds_elastic[build] - hit["_source"]["job_status"]="Failed" - resend_payload(hit) - print("job status marked as Failed") - + if build not in all_local: + hit = running_builds_elastic[build] + hit["_source"]["job_status"] = "Failed" + resend_payload(hit) + print("job status marked as Failed") diff --git a/parse_workflow_time.py b/parse_workflow_time.py index 37febe88e126..ce446b284528 100755 --- a/parse_workflow_time.py +++ b/parse_workflow_time.py @@ -1,27 +1,28 @@ #!/bin/env python from datetime import datetime -import re , json +import re, json from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument("-i", "--input") parser.add_argument("-o", "--output") args = parser.parse_args() -fd_read = open(args.input,'r') +fd_read = open(args.input, "r") dict_store = {} for line in fd_read: - workflow = line.split('_')[0] - match_date = re.findall(r'[A-Z]{3}\s+[\d]{2}\s+[\d]{2}:[\d]{2}:[\d]{2}\s+[\d]{4}', line, re.IGNORECASE) - if len(match_date)!=2: - continue - - t1 = datetime.strptime(match_date[1],'%b %d %H:%M:%S %Y') - t2 = datetime.strptime(match_date[0],'%b %d %H:%M:%S %Y') - delta = t2-t1 - dict_store[workflow] = delta.seconds + workflow = line.split("_")[0] + match_date = re.findall( + r"[A-Z]{3}\s+[\d]{2}\s+[\d]{2}:[\d]{2}:[\d]{2}\s+[\d]{4}", line, re.IGNORECASE + ) + if len(match_date) != 2: + continue -fd_read.close -with open(args.output, 'w') as outfile: - json.dump(dict_store, outfile) + t1 = datetime.strptime(match_date[1], "%b %d %H:%M:%S %Y") + t2 = datetime.strptime(match_date[0], "%b %d %H:%M:%S %Y") + delta = t2 - t1 + dict_store[workflow] = delta.seconds +fd_read.close +with open(args.output, "w") as outfile: + json.dump(dict_store, outfile) diff --git a/port-pull-request.py b/port-pull-request.py index 2e6e115a4ff7..9aeff997cabb 100755 --- a/port-pull-request.py +++ b/port-pull-request.py @@ -6,24 +6,52 @@ from github import Github from github_utils import port_pr from socket import setdefaulttimeout + setdefaulttimeout(120) if __name__ == "__main__": - parser = OptionParser( usage="%prog " ) - parser.add_option( "-n" , "--dry-run" , dest="dryRun" , action="store_true", help="Do not post on Github", default=False ) - parser.add_option( "-p", "--pull_request", dest="pull_request" , action="store" , help="Pull request number to be ported", type=int ) - parser.add_option( "-b", "--branch", dest="branch" , action="store" , help="Git branch where this PR should be ported to e.g. CMSSW_7_6_X") - parser.add_option( "-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default=gh_user+"/"+gh_cmssw) + parser = OptionParser(usage="%prog ") + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not post on Github", + default=False, + ) + parser.add_option( + "-p", + "--pull_request", + dest="pull_request", + action="store", + help="Pull request number to be ported", + type=int, + ) + parser.add_option( + "-b", + "--branch", + dest="branch", + action="store", + help="Git branch where this PR should be ported to e.g. CMSSW_7_6_X", + ) + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default=gh_user + "/" + gh_cmssw, + ) - opts, args = parser.parse_args( ) + opts, args = parser.parse_args() - if len( args ) != 0: - parser.print_help() - parser.error( "Too many arguments" ) + if len(args) != 0: + parser.print_help() + parser.error("Too many arguments") - if not opts.pull_request or not opts.branch: - parser.print_help() - parser.error("Too few arguments") + if not opts.pull_request or not opts.branch: + parser.print_help() + parser.error("Too few arguments") - gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) - port_pr(gh.get_repo(opts.repository), opts.pull_request , opts.branch, opts.dryRun) + gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) + port_pr(gh.get_repo(opts.repository), opts.pull_request, opts.branch, opts.dryRun) diff --git a/pr-checks/check-pr-files b/pr-checks/check-pr-files deleted file mode 100755 index 57b0c1804d3c..000000000000 --- a/pr-checks/check-pr-files +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -from os.path import dirname, abspath -from optparse import OptionParser -import sys, re -sys.path.append(dirname(dirname(abspath(__file__)))) # in order to import cms-bot level modules -from _py2with3compatibility import run_cmd -CMS_BOT_DIR = dirname(dirname(abspath(sys.argv[0]))) - -def check_commits_files(repo, pr, detail=False): - status_map = {'A':'Added', - 'C':'Copied', - 'D':'Deleted', - 'M':'Modified', - 'R':'Renamed', - 'T':'Type', - 'U':'Unmerged', - 'X':'Unknown'} - invalid_status=[('A', 'D'), ('C', 'D'), ('R', 'D'), ('X', 'X'), ('U', 'U')] - - all_ok = False - e, o = run_cmd('%s/process-pull-request -a -c -r %s %s' % (CMS_BOT_DIR, repo, pr)) - if e: - print(o) - return all_ok - details= {} - data = {} - for c in o.split('\n'): - e , o = run_cmd('git diff-tree --no-commit-id --name-status -r %s' % c) - if e: - print(o) - return all_ok - for l in [re.sub("\s+"," ",x.strip()) for x in o.split('\n') if x.strip()]: - (t,f)=l.split(' ') - if not f in data: - data[f]=[] - details[f]={} - if not t in data[f]: - data[f].append(t) - details[f][t]=[] - details[f][t].append(c) - all_ok = True - for f in data: - for s in invalid_status: - if len([1 for x in s if x in data[f]])>1: - if not detail: - print("%s: %s" % (f, ', '.join([status_map[x] for x in data[f]]))) - if detail: - print("%s:" % f) - for x in data[f]: - print(" %s: %s" % (status_map[x], ", ".join(details[f][x]))) - all_ok = False - return all_ok -def process(repo, pr, detail=False): - if not check_commits_files(repo, pr, detail): return False - return True - -if __name__ == "__main__": - parser = OptionParser(usage="%prog ") - parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw") - parser.add_option("-d", "--detail", dest="detail", action="store_true", help="Print detail output", default=False) - opts, args = parser.parse_args() - - if len(args) != 1: parser.error("Too many/few arguments") - if not process(opts.repository, args[0], opts.detail): sys.exit(1) - diff --git a/pr-checks/check-pr-files b/pr-checks/check-pr-files new file mode 120000 index 000000000000..71f4cf9f68c2 --- /dev/null +++ b/pr-checks/check-pr-files @@ -0,0 +1 @@ +check-pr-files.py \ No newline at end of file diff --git a/pr-checks/check-pr-files.py b/pr-checks/check-pr-files.py new file mode 100755 index 000000000000..77d4964f0947 --- /dev/null +++ b/pr-checks/check-pr-files.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python +from __future__ import print_function +from os.path import dirname, abspath +from optparse import OptionParser +import sys, re + +sys.path.append(dirname(dirname(abspath(__file__)))) # in order to import cms-bot level modules +from _py2with3compatibility import run_cmd + +CMS_BOT_DIR = dirname(dirname(abspath(sys.argv[0]))) + + +def check_commits_files(repo, pr, detail=False): + status_map = { + "A": "Added", + "C": "Copied", + "D": "Deleted", + "M": "Modified", + "R": "Renamed", + "T": "Type", + "U": "Unmerged", + "X": "Unknown", + } + invalid_status = [("A", "D"), ("C", "D"), ("R", "D"), ("X", "X"), ("U", "U")] + + all_ok = False + e, o = run_cmd("%s/process-pull-request -a -c -r %s %s" % (CMS_BOT_DIR, repo, pr)) + if e: + print(o) + return all_ok + details = {} + data = {} + for c in o.split("\n"): + e, o = run_cmd("git diff-tree --no-commit-id --name-status -r %s" % c) + if e: + print(o) + return all_ok + for l in [re.sub("\s+", " ", x.strip()) for x in o.split("\n") if x.strip()]: + (t, f) = l.split(" ") + if not f in data: + data[f] = [] + details[f] = {} + if not t in data[f]: + data[f].append(t) + details[f][t] = [] + details[f][t].append(c) + all_ok = True + for f in data: + for s in invalid_status: + if len([1 for x in s if x in data[f]]) > 1: + if not detail: + print("%s: %s" % (f, ", ".join([status_map[x] for x in data[f]]))) + if detail: + print("%s:" % f) + for x in data[f]: + print(" %s: %s" % (status_map[x], ", ".join(details[f][x]))) + all_ok = False + return all_ok + + +def process(repo, pr, detail=False): + if not check_commits_files(repo, pr, detail): + return False + return True + + +if __name__ == "__main__": + parser = OptionParser(usage="%prog ") + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default="cms-sw/cmssw", + ) + parser.add_option( + "-d", + "--detail", + dest="detail", + action="store_true", + help="Print detail output", + default=False, + ) + opts, args = parser.parse_args() + + if len(args) != 1: + parser.error("Too many/few arguments") + if not process(opts.repository, args[0], opts.detail): + sys.exit(1) diff --git a/pr-checks/find-changed-workflows.py b/pr-checks/find-changed-workflows.py index 5390f1284c79..5bdb12ab95f8 100755 --- a/pr-checks/find-changed-workflows.py +++ b/pr-checks/find-changed-workflows.py @@ -1,47 +1,53 @@ #!/usr/bin/env python from __future__ import print_function -import sys,re +import sys, re + def read_workflows(wfile): - fin = open(wfile) - data = {} - wf = "" - for line in fin.readlines(): - m = re.match("^([^[]+)\[(\d+)]:\s+(.+)",line) - if m: - cmd = re.sub("\s\s+"," ",m.group(3).strip()) - if m.group(1).strip(): - wf = m.group(1).strip().split(" ",1)[0] - data [wf] = [] - data[wf].append(cmd) - return data + fin = open(wfile) + data = {} + wf = "" + for line in fin.readlines(): + m = re.match("^([^[]+)\[(\d+)]:\s+(.+)", line) + if m: + cmd = re.sub("\s\s+", " ", m.group(3).strip()) + if m.group(1).strip(): + wf = m.group(1).strip().split(" ", 1)[0] + data[wf] = [] + data[wf].append(cmd) + return data + orig = sys.argv[1] new = sys.argv[2] -odata= read_workflows(orig) +odata = read_workflows(orig) ndata = read_workflows(new) cdata = {} for wf in ndata: - cdata[wf] = [] - if not wf in odata: - cdata[wf] = ["New workflow"] - continue - nlen = len(ndata[wf]) - olen = len(odata[wf]) - if nlen!=olen: - cdata[wf] = ["Number of Steps changed: %s vs %s" % (olen, nlen)] - else: - for i in range(nlen): - if ndata[wf][i]!=odata[wf][i]: - cdata[wf].append("\n - **step%s**\n```\n- %s\n+ %s\n```" % (i+1, ndata[wf][i],odata[wf][i])) + cdata[wf] = [] + if not wf in odata: + cdata[wf] = ["New workflow"] + continue + nlen = len(ndata[wf]) + olen = len(odata[wf]) + if nlen != olen: + cdata[wf] = ["Number of Steps changed: %s vs %s" % (olen, nlen)] + else: + for i in range(nlen): + if ndata[wf][i] != odata[wf][i]: + cdata[wf].append( + "\n - **step%s**\n```\n- %s\n+ %s\n```" + % (i + 1, ndata[wf][i], odata[wf][i]) + ) wfs = sorted(cdata, key=float) for wf in wfs: - if not cdata[wf]: continue - if len(cdata[wf])==1: - print (" - **%s**: %s" % (wf, cdata[wf][0])) - else: - print (" - **%s**:" % wf) - for c in cdata[wf]: - print (c) + if not cdata[wf]: + continue + if len(cdata[wf]) == 1: + print(" - **%s**: %s" % (wf, cdata[wf][0])) + else: + print(" - **%s**:" % wf) + for c in cdata[wf]: + print(c) diff --git a/pr_testing/get-merged-prs.py b/pr_testing/get-merged-prs.py index cc599b2811da..a0b94b88543b 100755 --- a/pr_testing/get-merged-prs.py +++ b/pr_testing/get-merged-prs.py @@ -8,45 +8,91 @@ from __future__ import print_function from os import environ -from os.path import dirname,basename,abspath,join +from os.path import dirname, basename, abspath, join from json import dumps, dump, load from optparse import OptionParser import sys -sys.path.append(dirname(dirname(abspath(__file__)))) + +sys.path.append(dirname(dirname(abspath(__file__)))) from github_utils import get_merge_prs -parser = OptionParser( usage="%prog " ) -parser.add_option( "-s", "--start-tag", dest="start_tag" , action="store" , help="Starting tag, default is CMSSW_VERSION environment.", default=None) -parser.add_option( "-e", "--end-tag", dest="end_tag" , action="store" , help="Ending tag, default is HEAD.", default='HEAD') -parser.add_option( "-g", "--git-directory", dest="git_dir" , action="store" , help=".git directory, default is CMSSW_BASE/src/.git", default=None) -parser.add_option( "-c", "--cache-directory", dest="cache_dir" , action="store" , help="Path to cms-prs cache directory", default=None) -parser.add_option( "-o", "--out-file", dest="out_file" , action="store" , help="Outpu json file name", default=None) -parser.add_option( "-r", "--repository", dest="repository" , action="store" , help="Repository e.g. cms-sw/cmssw or cms-sw/cmsdist", default="cms-sw/cmssw") -parser.add_option( "-i", "--ignore-prs", dest="ignore" , action="store" , help="Comma separated list of PRs to ignore", default="") -opts, args = parser.parse_args( ) -if len( args ) != 0: - parser.print_help() - parser.error( "Too many arguments" ) +parser = OptionParser(usage="%prog ") +parser.add_option( + "-s", + "--start-tag", + dest="start_tag", + action="store", + help="Starting tag, default is CMSSW_VERSION environment.", + default=None, +) +parser.add_option( + "-e", + "--end-tag", + dest="end_tag", + action="store", + help="Ending tag, default is HEAD.", + default="HEAD", +) +parser.add_option( + "-g", + "--git-directory", + dest="git_dir", + action="store", + help=".git directory, default is CMSSW_BASE/src/.git", + default=None, +) +parser.add_option( + "-c", + "--cache-directory", + dest="cache_dir", + action="store", + help="Path to cms-prs cache directory", + default=None, +) +parser.add_option( + "-o", "--out-file", dest="out_file", action="store", help="Outpu json file name", default=None +) +parser.add_option( + "-r", + "--repository", + dest="repository", + action="store", + help="Repository e.g. cms-sw/cmssw or cms-sw/cmsdist", + default="cms-sw/cmssw", +) +parser.add_option( + "-i", + "--ignore-prs", + dest="ignore", + action="store", + help="Comma separated list of PRs to ignore", + default="", +) +opts, args = parser.parse_args() +if len(args) != 0: + parser.print_help() + parser.error("Too many arguments") if not opts.start_tag: - opts.start_tag = environ['CMSSW_VERSION'] + opts.start_tag = environ["CMSSW_VERSION"] if not opts.git_dir: - opts.git_dir = environ['CMSSW_BASE']+"/src/.git" + opts.git_dir = environ["CMSSW_BASE"] + "/src/.git" if not opts.cache_dir: - parser.error( "Please pass -c|--cache-directory /path/to/cms-prs" ) + parser.error("Please pass -c|--cache-directory /path/to/cms-prs") prs = {} if opts.out_file: - with open(opts.out_file) as ref: - prs = load(ref) -prs[opts.repository] = get_merge_prs(opts.start_tag, opts.end_tag, opts.git_dir,opts.cache_dir,{},basename(opts.repository)) + with open(opts.out_file) as ref: + prs = load(ref) +prs[opts.repository] = get_merge_prs( + opts.start_tag, opts.end_tag, opts.git_dir, opts.cache_dir, {}, basename(opts.repository) +) for ignore in [int(i) for i in opts.ignore.split(",") if i]: - if ignore in prs[opts.repository]: - del prs[opts.repository][ignore] + if ignore in prs[opts.repository]: + del prs[opts.repository][ignore] if not prs[opts.repository]: - del prs[opts.repository] + del prs[opts.repository] if opts.out_file: - with open(opts.out_file,"w") as ref: - dump(prs, ref,sort_keys=True, indent=4, separators=(',', ': ')) + with open(opts.out_file, "w") as ref: + dump(prs, ref, sort_keys=True, indent=4, separators=(",", ": ")) else: - print(dumps(prs,sort_keys=True, indent=4, separators=(',', ': '))) - + print(dumps(prs, sort_keys=True, indent=4, separators=(",", ": "))) diff --git a/pr_testing/run-das-query.py b/pr_testing/run-das-query.py index 01bbbe855ffd..a567abde5d52 100755 --- a/pr_testing/run-das-query.py +++ b/pr_testing/run-das-query.py @@ -8,31 +8,44 @@ from __future__ import print_function import os, sys -BOT_DIR=os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))) -sys.path.insert(0,BOT_DIR) + +BOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))) +sys.path.insert(0, BOT_DIR) from _py2with3compatibility import run_cmd from cmsutils import MachineCPUCount from RelValArgs import GetMatrixOptions os.environ["PATH"] = "%s/das-utils:%s" % (BOT_DIR, os.environ["PATH"]) -cmd = "runTheMatrix.py -j %s --maxSteps=0 %s" % (MachineCPUCount, GetMatrixOptions(os.environ["CMSSW_VERSION"], os.environ["SCRAM_ARCH"])) -print("Running ",cmd) -e, o = run_cmd("touch runall-report-step123-.log ; rm -rf rel; mkdir rel; cd rel; %s; [ -f runall-report-step123-.log ] && cp runall-report-step123-.log ../" % cmd) +cmd = "runTheMatrix.py -j %s --maxSteps=0 %s" % ( + MachineCPUCount, + GetMatrixOptions(os.environ["CMSSW_VERSION"], os.environ["SCRAM_ARCH"]), +) +print("Running ", cmd) +e, o = run_cmd( + "touch runall-report-step123-.log ; rm -rf rel; mkdir rel; cd rel; %s; [ -f runall-report-step123-.log ] && cp runall-report-step123-.log ../" + % cmd +) print(o) -err=0 -if e: err=1 -if os.getenv("MATRIX_EXTRAS",""): - e, o = run_cmd("grep -E '^[1-9][0-9]*(\.[0-9]*|)_' runall-report-step123-.log | sed 's|_.*||'") - all_wfs = [wf for wf in o.split('\n') if wf] - print("All WFS:",all_wfs) - new_wfs = [] - for wf in os.getenv("MATRIX_EXTRAS","").split(","): - if wf and (not wf in all_wfs) and (not wf in new_wfs): new_wfs.append(wf) - print("New WFs:",new_wfs) - if new_wfs: - cmd = "%s -l %s %s" % (cmd, ','.join(new_wfs), os.getenv("EXTRA_MATRIX_ARGS","")) - print("Running ",cmd) - e, o = run_cmd("rm -rf rel; mkdir rel; cd rel; %s ; [ -f runall-report-step123-.log ] && cat runall-report-step123-.log >> ../runall-report-step123-.log" % cmd) - print(o) - if e: err=1 +err = 0 +if e: + err = 1 +if os.getenv("MATRIX_EXTRAS", ""): + e, o = run_cmd("grep -E '^[1-9][0-9]*(\.[0-9]*|)_' runall-report-step123-.log | sed 's|_.*||'") + all_wfs = [wf for wf in o.split("\n") if wf] + print("All WFS:", all_wfs) + new_wfs = [] + for wf in os.getenv("MATRIX_EXTRAS", "").split(","): + if wf and (not wf in all_wfs) and (not wf in new_wfs): + new_wfs.append(wf) + print("New WFs:", new_wfs) + if new_wfs: + cmd = "%s -l %s %s" % (cmd, ",".join(new_wfs), os.getenv("EXTRA_MATRIX_ARGS", "")) + print("Running ", cmd) + e, o = run_cmd( + "rm -rf rel; mkdir rel; cd rel; %s ; [ -f runall-report-step123-.log ] && cat runall-report-step123-.log >> ../runall-report-step123-.log" + % cmd + ) + print(o) + if e: + err = 1 sys.exit(err) diff --git a/pr_testing/test_multiple_prs.sh b/pr_testing/test_multiple_prs.sh index 40e3ff6aba66..b07b43c1e933 100755 --- a/pr_testing/test_multiple_prs.sh +++ b/pr_testing/test_multiple_prs.sh @@ -81,7 +81,7 @@ let NCPU2=${NCPU}*2 rm -rf ${RESULTS_DIR} ${RESULTS_FILE} mkdir ${RESULTS_DIR} -TEST_RELVALS_INPUT=true +TEST_RELVALS_INPUT=false DO_COMPARISON=false DO_MB_COMPARISON=false DO_DAS_QUERY=false @@ -106,6 +106,7 @@ fi if $PRODUCTION_RELEASE ; then DO_DAS_QUERY=true + TEST_RELVALS_INPUT=true fi # ---------- diff --git a/process-build-release-request b/process-build-release-request deleted file mode 100755 index bce15bf73c77..000000000000 --- a/process-build-release-request +++ /dev/null @@ -1,1190 +0,0 @@ -#!/usr/bin/env python3 -import json -import re -from _py2with3compatibility import run_cmd, quote, Request, urlopen, HTTPError -from datetime import datetime, timedelta -from optparse import OptionParser -from os.path import dirname, abspath, exists -from os.path import expanduser -from socket import setdefaulttimeout - -import yaml -try: - from yaml import CLoader as Loader, CDumper as Dumper -except ImportError: - from yaml import Loader, Dumper -from github import Github - -from categories import REQUEST_BUILD_RELEASE, APPROVE_BUILD_RELEASE -from cms_static import BUILD_REL, GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, GH_CMSDIST_REPO -from cmsutils import get_config_map_properties, get_full_release_archs -from github_utils import api_rate_limits, get_ref_commit, get_commit_info -from github_utils import get_branch -from releases import get_release_managers - -setdefaulttimeout(120) -from os import environ -JENKINS_PREFIX="jenkins" -try: JENKINS_PREFIX=environ['JENKINS_URL'].strip("/").split("/")[-1] -except: JENKINS_PREFIX="jenkins" - -try: - CMS_BOT_DIR = dirname(abspath(__file__)) -except Exception as e : - from sys import argv - CMS_BOT_DIR = dirname( abspath(argv[0])) -# -# Processes a github issue to check if it is requesting the build of a new release -# If the issue is not requesting any release, it ignores it. -# - -# ------------------------------------------------------------------------------- -# Global Variables -# -------------------------------------------------------------------------------- - -NOT_AUTHORIZED_MSG = 'You are not authorized to request the build of a release.' -CONFIG_MAP_FILE = CMS_BOT_DIR+'/config.map' -NO_ARCHS_FOUND_MSG = 'No architecures to build found for {rel_name}. Please check that you entered a ' \ - 'valid release name or that the IBs are currently enabled for {queue}' -RELEASE_BASE_URL = 'https://github.com/cms-sw/cmssw/releases/tag/%s' -BASE_BRANCH_URL = ' https://github.com/cms-sw/cmssw/tree/%s' -RELEASE_CREATED_MSG = 'Release created: {rel_name}. The tag was created on top of branch: {base_branch}' -RELEASE_CREATION_FAIL_MSG = 'There was an error while attempting to create {rel_name}. ' \ - 'Please check if it already exists https://github.com/cms-sw/cmssw/releases' -WRONG_RELEASE_NAME_MSG = 'The release name is malformed. Please check for typos.' -ACK_MSG = 'Request received. I will start to build the release after one of the following approve ' \ - 'the issue: {approvers_list}. You can do this by writing "+1" in a ' \ - 'comment.\n You can also ask me to begin to build cmssw-tool-conf first ( Cannot be done for patch releases ). To do this write ' \ - '"build cmssw-tool-conf" in a comment. I will start to build cmssw-tool-conf and then wait for the "+1" ' \ - 'to start the build of the release.\n' \ - 'CMSSW Branch: {cmssw_queue}\n'\ - 'Architecture: {architecture}\n'\ - '{cmssw_commit_tag}' -WATCHERS_MSG = '{watchers_list} you requested to watch the automated builds for {queue}' -QUEUING_BUILDS_MSG = 'Queuing Jenkins build for the following architectures: %s \n' \ - 'You can abort the build by writing "Abort" in a comment. I will delete the release, ' \ - 'the cmssw and cmsdist tag, and close the issue. You can\'t abort the upload once at' \ - ' least one achitecture is being uploaded. \n' \ - 'If you are building cmssw-tool-conf first, I will wait for each architecture to finish to start the build of cmssw.' -QUEUING_TOOLCONF_MSG = 'Queuing Jenkins build for cmssw-tool-conf for the following architectures: %s \n' \ - 'Be aware that I am building only cmssw-tool-conf. You still need to "+1" this issue to ' \ - 'make me start the build of the release. For each architecture, I will only start to build ' \ - 'the release after cmssw-tool-conf finishes building.' -QUEING_UPLOADS_MSG = 'Queing Jenkins upload for {architecture}' -CLEANUP_STARTED_MSG = 'The cleanup has started for {architecture}' -NOT_TOOLCONF_FOR_PATCH_MSG = 'You cannot ask me to build cmssw-tool-conf for patch releases. Please delete that message.' -JENKINS_CMSSW_X_Y_Z = 'CMSSW_X_Y_Z' -JENKINS_ARCH = 'ARCHITECTURE' -JENKINS_ISSUE_NUMBER = 'ISSUE_NUMBER' -JENKINS_MACHINE_NAME = 'MACHINE_NAME' -JENKINS_CMSSW_QUEUE = 'CMSSW_QUEUE' -JENKINS_DOCKER_IMG = 'DOCKER_IMG' -JENKINS_ONLY_TOOL_CONF = 'ONLY_BUILD_TOOLCONF' -WRONG_NOTES_RELEASE_MSG = 'Previous release "{previous_release}" does not appear to be a valid release name' -PREVIOUS_RELEASE_NAME_MSG = 'Unable to find previous release for {release_name}. Please use "release-notes since " in first line of the comment.' -GENERATING_RELEASE_NOTES_MSG = 'Generating release notes since {previous_release}. \n' \ - 'You can see the progress here: \n' \ - 'https://cmssdt.cern.ch/%s/job/release-produce-changelog/\n' \ - 'I will generate an announcement template.\n' % JENKINS_PREFIX -PROD_ARCH_NOT_READY_MSG = 'ATTENTION!!! The production architecture ({prod_arch}) is not ready yet. '\ - 'This needs to be checked before asking me to generate the release notes.\n'\ - 'When the production architecture is installed successfully, I will generate the release notes.'\ - ' You don\'t need to write the command again.' -REL_NAME_REGEXP="(CMSSW_[0-9]+_[0-9]+)_[0-9]+(_SLHC[0-9]*|)(_pre[0-9]+|_[a-zA-Z]*patch[0-9]+|)(_[^_]*|)" -UPLOAD_COMMENT = 'upload %s' -UPLOAD_ALL_COMMENT = '^[uU]pload all$' -ABORT_COMMENT = '^[Aa]bort$' -RELEASE_NOTES_COMMENT = '^release-notes([ ]+since[ ]+[^ ]+)?$' -BUILD_TOOLCONF = '^[Bb]uild cmssw-tool-conf' -APPROVAL_COMMENT = '^[+]1$' -RELEASE_NOTES_GENERATED_LBL = 'release-notes-requested' -ANNOUNCEMENT_GENERATED_LBL = 'release-notes-requested' -JENKINS_PREV_RELEASE='PREVIOUS_RELEASE' -JENKINS_RELEASE='RELEASE' -JENKINS_PREV_CMSDIST_TAG='PREVIOUS_CMSDIST_TAG' -JENKINS_CMSDIST_TAG='CMSDIST_TAG' -JENKINS_PRODUCTION_ARCH='PRODUCTION_ARCH' -JENKINS_BUILD_DIR='BUILD_DIR' -ANNOUNCEMENT_TEMPLATE = 'Hi all,\n\n' \ - 'The {rel_type} {is_patch}release {rel_name} is now available '\ - 'for the following architectures:\n\n'\ - '{production_arch} (production)\n'\ - '{rest_of_archs}'\ - 'The release notes of what changed with respect to {prev_release} can be found at:\n\n'\ - 'https://github.com/cms-sw/cmssw/releases/{rel_name}\n'\ - '{description}'\ - 'Cheers,\n'\ - 'cms-bot' - -HN_REL_ANNOUNCE_EMAIL = 'hn-cms-relAnnounce@cern.ch' -ANNOUNCEMENT_EMAIL_SUBJECT = '{rel_type} {is_patch}Release {rel_name} Now Available ' -MAILTO_TEMPLATE = 'here' - -# ------------------------------------------------------------------------------- -# Statuses -# -------------------------------------------------------------------------------- -# This is to determine the status of the issue after reading the labels - -#The issue has just been created -NEW_ISSUE = 'NEW_ISSUSE' -# The issue has been received, but it needs approval to start the build -PENDING_APPROVAL = 'build-pending-approval' -# The build has been queued in jenkins -BUILD_IN_PROGRESS = 'build-in-progress' -# The build has started -BUILD_STARTED = 'build-started' -# The build has been aborted. -BUILD_ABORTED = 'build-aborted' -# they requested to build cmssw-tool-conf and it is being built -TOOLCONF_BUILDING = 'toolconf-building' -# at leas one of the architectures was built successully -BUILD_SUCCESSFUL = 'build-successful' -# the builds are being uploaded -UPLOADING_BUILDS = 'uploading-builds' -# the release has been announced -RELEASE_ANNOUNCED = 'release-announced' -# the release was build without issues. -PROCESS_COMPLETE = 'process-complete' -# Label for all Release Build Issue -RELEASE_BUILD_ISSUE = 'release-build-request' - -# ------------------------------------------------------------------------------- -# Functions -# -------------------------------------------------------------------------------- - -# -# creates a properties file to cleanup the build files. -# -def create_properties_file_cleanup( release_name, arch, issue_number, machine_name, tool_conf=False): - if tool_conf: - out_file_name = 'cleanup-tool-conf-%s-%s.properties' % ( release_name , arch ) - else: - out_file_name = 'cleanup-%s-%s.properties' % ( release_name , arch ) - - if opts.dryRun: - print('Not creating cleanup properties file (dry-run):\n %s' % out_file_name) - else: - print('Creating properties file for %s' % arch) - out_file = open( out_file_name , 'w' ) - out_file.write( '%s=%s\n' % ( JENKINS_CMSSW_X_Y_Z , release_name ) ) - out_file.write( '%s=%s\n' % ( JENKINS_ARCH , arch ) ) - out_file.write( '%s=%s\n' % ( JENKINS_ISSUE_NUMBER , issue_number ) ) - out_file.write( '%s=%s\n' % ( JENKINS_MACHINE_NAME , machine_name ) ) - - -# Creates a properties file in Jenkins to trigger the upload -# it needs to know the machine that was used for the build -# -def create_properties_files_upload( release_name, arch, issue_number, machine_name, docker_imgs, prod ): - - docker_img = '' - if arch in docker_imgs: docker_img = docker_imgs[arch] - out_file_name = 'upload-%s-%s.properties' % ( release_name , arch ) - if opts.dryRun: - print('Not creating properties file (dry-run):\n %s' % out_file_name) - else: - print('Creating properties file for %s' % arch) - out_file = open( out_file_name , 'w' ) - out_file.write( '%s=%s\n' % ( JENKINS_CMSSW_X_Y_Z , release_name ) ) - out_file.write( '%s=%s\n' % ( JENKINS_ARCH , arch ) ) - out_file.write( '%s=%s\n' % ( JENKINS_ISSUE_NUMBER , issue_number ) ) - out_file.write( '%s=%s\n' % ( JENKINS_DOCKER_IMG , docker_img ) ) - out_file.write( '%s=%s\n' % ( JENKINS_MACHINE_NAME , machine_name ) ) - out_file.write( '%s=%s\n' % ( JENKINS_PRODUCTION_ARCH , 'true' if prod else 'false' ) ) - - -# -# Searches in the comments if there is a comment made from the given users that -# matches the given pattern. It returns the date of the first comment that matches -# if no comment matches it not returns None -# -def search_date_comment( comments, user_logins, pattern, first_line ): - - for comment in reversed( comments ): - - if comment.user.login not in user_logins: - continue - - examined_str = comment.body - - if first_line: - examined_str = str(comment.body.encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ")) - - if examined_str == pattern: - return comment.created_at - - if re.match( pattern , examined_str ): - return comment.created_at - - return None - - -# -# Searches in the comments if there is a comment made from the given users that -# matches the given pattern. It returns a list with the matched comments. -# -def search_in_comments( comments, user_logins, pattern, first_line ): - found_comments = [] - requested_comment_bodies = [ c.body for c in comments if c.user.login in user_logins ] - for body in requested_comment_bodies: - examined_str = body - if first_line: - examined_str = str(body.encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ")) - - if examined_str == pattern: - found_comments.append( body ) - continue - - if re.match( pattern , examined_str ): - found_comments.append( body ) - - return found_comments - - -# -# Checks if the issue has already been seen so the issue will not be processed again -# Returns True if the issue needs to be processed, False if not -# -def check_if_already_processed( issue ): - comments = [ c for c in issue.get_comments( ) ] - comment_bodies = [ c.body for c in comments if c.user.login == 'cmsbuild' ] - for body in comment_bodies: - if 'Release created' in body: - return True - if 'Queuing Jenkins build' in body: - return True - if 'You are not authorized' in body: - return True - - return False -# -# Creates the properties files to trigger the build in Jenkins -# if only_toolconf is selected, it adds a parameter to tell the script to only build cmssw-tool-conf -# -def create_properties_files( issue, release_name, architectures, issue_number, queue, docker_imgs, only_toolconf=False, cmsdist_commit=None): - - if not only_toolconf: - for arch in architectures: - remove_label( issue, arch + '-tool-conf-ok' ) - add_label( issue, arch + '-build-queued' ) - - if opts.dryRun: - print('Not creating properties files for (dry-run): %s' % ", ".join( architectures )) - return - - for arch in architectures: - docker_img = '' - if arch in docker_imgs: docker_img = docker_imgs[arch] - out_file_name = 'build-%s-%s.properties' % ( release_name , arch ) - print('Creating properties file for %s' % arch) - out_file = open( out_file_name , 'w' ) - out_file.write( '%s=%s\n' % ( JENKINS_CMSSW_X_Y_Z , release_name ) ) - out_file.write( '%s=%s\n' % ( JENKINS_ARCH , arch ) ) - out_file.write( '%s=%s\n' % ( JENKINS_ISSUE_NUMBER , issue_number ) ) - out_file.write( '%s=%s\n' % ( JENKINS_CMSSW_QUEUE , queue) ) - out_file.write( '%s=%s\n' % ( JENKINS_DOCKER_IMG , docker_img) ) - tool_conf_param = 'true' if only_toolconf else 'false' - out_file.write( '%s=%s\n' % ( JENKINS_ONLY_TOOL_CONF, tool_conf_param ) ) - if cmsdist_commit: out_file.write( 'CMSDIST_HASH=%s\n' % cmsdist_commit ) - - -# -# generates the properties file for triggering the release notes -# it infers the tag names based on te format REL//architecture -# -def create_properties_file_rel_notes( release_name, previous_release, architecture, issue_number ): - cmsdist_tag = 'REL/'+release_name+'/'+architecture - previos_cmsdist_tag = 'REL/'+previous_release+'/'+architecture - out_file_name = 'release-notes.properties' - - if opts.dryRun: - print('Not creating properties file (dry-run): %s' % out_file_name) - return - - out_file = open( out_file_name , 'w' ) - out_file.write( '%s=%s\n' % ( JENKINS_PREV_RELEASE, previous_release ) ) - out_file.write( '%s=%s\n' % ( JENKINS_RELEASE, release_name ) ) - out_file.write( '%s=%s\n' % ( JENKINS_PREV_CMSDIST_TAG, previos_cmsdist_tag ) ) - out_file.write( '%s=%s\n' % ( JENKINS_CMSDIST_TAG, cmsdist_tag) ) - out_file.write( '%s=%s\n' % ( JENKINS_ISSUE_NUMBER , issue_number ) ) - -# -# Creates a release in github -# If dry-run is selected it doesn't create the release and just prints that -# returns true if it was able to create the release, false if not -# -def get_release_github(repository, release_name): - print('Checking release:\n %s' % release_name) - request = Request("https://api.github.com/repos/" + GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO +"/releases/tags/"+release_name) - try: - print(urlopen(request).read()) - return True - except Exception as e: - print('There was an error while creating the release:\n', e) - return False - -def create_release_github( repository, release_name, branch, commit=None): - if get_release_github (repository, release_name): return True - if opts.dryRun: - print('Not creating release (dry-run):\n %s' % release_name) - return True - - print('Creating release:\n %s' % release_name) - if commit: - if not get_commit_info(GH_CMSSW_ORGANIZATION+"/"+GH_CMSSW_REPO, commit): - sha = get_ref_commit(GH_CMSSW_ORGANIZATION+"/"+GH_CMSSW_REPO, commit) - if sha: commit = sha - else: commit=branch - # creating releases will be available in the next version of pyGithub - params = { "tag_name" : release_name, - "target_commitish" : commit, - "name" : release_name, - "body" : 'cms-bot is going to build this release', - "draft" : False, - "prerelease" : False } - print(params) - request = Request("https://api.github.com/repos/" + GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO +"/releases", - headers={"Authorization" : "token " + GH_TOKEN }) - request.get_method = lambda: 'POST' - print('--') - try: - print(urlopen( request, json.dumps( params ).encode() ).read()) - return True - except Exception as e: - print('There was an error while creating the release:\n', e) - return False - print() - -# -# Deletes in github the release given as a parameter. -# If the release does no exists, it informs it in the message. -# -def delete_release_github( release_name ): - if opts.dryRun: - print('Not deleting release (dry-run):\n %s' % release_name) - return 'Not deleting release (dry-run)' - - releases_url = "https://api.github.com/repos/" + GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO +"/releases?per_page=100" - - request = Request( releases_url, headers={"Authorization" : "token " + GH_TOKEN }) - releases = json.loads(urlopen(request).read()) - matchingRelease = [x["id"] for x in releases if x["name"] == release_name] - - if len(matchingRelease) < 1: - return "Release %s not found." % release_name - - releaseId = matchingRelease[0] - url = "https://api.github.com/repos/cms-sw/cmssw/releases/%s" % releaseId - request = Request( url, headers={"Authorization" : "token " + GH_TOKEN }) - request.get_method = lambda: 'DELETE' - - try: - print(urlopen( request ).read()) - return 'Release successfully deleted' - except Exception as e: - return 'There was an error while deleting the release:\n %s' % e - -def delete_tag(org, repo, tag): - if not exists (repo): - cmd = "mkdir deltag-{repo}; cd deltag-{repo}; git init; git remote add {repo} git@github.com:{org}/{repo}.git".format(org=org,repo=repo) - print('Executing: \n %s' % cmd) - status, out = run_cmd( cmd ) - cmd = "cd deltag-{repo}; git push {repo} :{tag}".format(repo=repo,tag=tag) - print('Executing: \n %s' % cmd) - status, out = run_cmd( cmd ) - print(out) - if status != 0: - msg = 'I was not able to delete the tag %s. Probaly it had not been created.' % tag - print(msg) - return msg - msg = '%s tag %s successfully deleted.' % (repo, tag) - return msg - -# -# Deletes in github the tag given as a parameter -# -def delete_cmssw_tag_github( release_name ): - if opts.dryRun: - print('Not deleting cmssw tag (dry-run):\n %s' % release_name) - return 'Not deleting cmssw tag (dry-run): %s ' % release_name - return delete_tag(GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, release_name) - -# -# for each architecture, gets the tag in cmsdist that should have ben created and deletes it -# -def delete_cmsdist_tags_github( release_name, architectures ): - result = '' - for arch in architectures: - tag_to_delete = "REL/{rel_name}/{architecture}".format(rel_name=release_name, architecture=arch ) - if opts.dryRun: - msg = 'Not deleting cmsdist tag (dry-run): %s' % tag_to_delete - result += '\n\n - ' + msg - continue - result += '\n\n - ' + delete_tag(GH_CMSSW_ORGANIZATION, GH_CMSDIST_REPO, tag_to_delete) - return result - -# -# Adds a label to the issue in github -# if dry-run is selected it doesn't add the label and just prints it -def add_label( issue , label ): - if opts.dryRun: - print('Not adding label (dry-run):\n %s' % label) - return - print('Adding label:\n %s' % label) - issue.add_to_labels( label ) - -# -# posts a message to the issue in github -# if dry-run is selected it doesn't post the message and just prints it -# if you set checkIfRepeated to False, if will not check if the message has already been written. -# -def post_message( issue , msg, checkIfRepeated=True ): - if opts.dryRun: - print('Not posting message (dry-run):\n %s' % msg) - return - if checkIfRepeated and search_in_comments( comments, [ 'cmsbuild' ], msg, False): - print('Message already in the thread: \n %s' % msg) - return - print('Posting message:\n %s' % msg) - issue.create_comment( msg ) - -# -# reads the comments and gets returns the status of the issue -# -def get_issue_status( issue ): - labels = [ l.name for l in issue.get_labels() if l.name != RELEASE_BUILD_ISSUE] - print("Issue Label: ",labels) - - if not labels: - return NEW_ISSUE - if BUILD_ABORTED in labels: - return BUILD_ABORTED - if PENDING_APPROVAL in labels: - return PENDING_APPROVAL - if BUILD_IN_PROGRESS in labels: - return BUILD_IN_PROGRESS - if TOOLCONF_BUILDING in labels: - return TOOLCONF_BUILDING - if BUILD_SUCCESSFUL in labels: - return BUILD_SUCCESSFUL - if UPLOADING_BUILDS in labels: - return UPLOADING_BUILDS - if RELEASE_ANNOUNCED in labels: - return RELEASE_ANNOUNCED - if PROCESS_COMPLETE in labels: - return PROCESS_COMPLETE -# -# closes the issue -# -def close_issue( issue ): - if opts.dryRun: - print('Not closing issue (dry-run)') - return - print('Closing issue...') - issue.edit( state="closed" ) - -# -# removes the labels of the issue -# -def remove_labels( issue ): - if opts.dryRun: - print('Not removing issue labels (dry-run)') - return - issue.delete_labels() - -# Removes a label form the issue -def remove_label( issue, label ): - if opts.dryRun: - print('Not removing label (dry-run):\n %s' % label) - return - - if label not in labels: - print('label ', label, ' does not exist. Not attempting to remove') - return - - print('Removing label: %s' % label) - try: - issue.remove_from_labels( label ) - except Exception as e: - print(e) - from sys import exit - exit(1) - -# -# Creates a properties file in Jenkins to kill the build -# it needs to know the machine that was used for the build -# -def create_properties_kill_build( release_name ): - out_file_name = 'kill-%s.properties' % ( release_name ) - print('Creating properties file for %s' % release_name) - out_file = open( out_file_name , 'w' ) - out_file.write( '%s=%s\n' % ( JENKINS_CMSSW_X_Y_Z , release_name ) ) - if opts.dryRun: - out_file.write('DRY_RUN=true\n') - else: - out_file.write('DRY_RUN=false\n') - -# -# Aborts the build: -# -Deletes the release in github -# -Deletes the cmssw tags -# -Deletes the cmsdist tags -# -Triggers the killing of the build process in jenkins -# -def abort_build( issue, release_name, architectures, comments): - msg = 'Deleting %s:' % release_name - del_rel_result = delete_release_github( release_name ) - msg += '\n\n - ' + del_rel_result - msg += '\n\n - ' + delete_cmssw_tag_github( release_name ) - - create_properties_kill_build(release_name) - msg += delete_cmsdist_tags_github( release_name, architectures ) - msg += '\n\n' + 'You must create a new issue to start over the build.' - post_message( issue, msg ) - -# -# Classifies the labels and fills the lists with the details of the current -# status of each architecture -# -def fillDeatilsArchsLists( issue ): - labels = [ l.name for l in issue.get_labels() ] - BUILD_OK.extend( [ x.split('-')[0] for x in labels if '-build-ok' in x ] ) - BUILDING.extend( [ x.split('-')[0] for x in labels if '-building' in x ] ) - UPLOAD_OK.extend( [ x.split('-')[0] for x in labels if '-upload-ok' in x ] ) - INSTALL_OK.extend( [ x.split('-')[0] for x in labels if '-installation-ok' in x ] ) - UPLOADING.extend( [ x.split('-')[0] for x in labels if '-uploading' in x ] ) - BUILD_ERROR.extend( [ x.split('-')[0] for x in labels if '-build-error' in x ] ) - TOOL_CONF_BUILDING.extend( [ x.split('-')[0] for x in labels if '-tool-conf-building' in x ] ) - TOOL_CONF_OK.extend( [ x.split('-')[0] for x in labels if '-tool-conf-ok' in x ] ) - TOOL_CONF_ERROR.extend( [ x.split('-')[0] for x in labels if '-tool-conf-error' in x ] ) - TOOL_CONF_WAITING.extend( [ x.split('-')[0] for x in labels if '-tool-conf-waiting' in x ] ) - TO_CLEANUP.extend( UPLOAD_OK + BUILD_ERROR + BUILD_OK + INSTALL_OK ) - -# -# Triggers the cleanup for the architectures in the list TO_CLEANUP -# -def triggerCleanup( issue, comments, release_name ): - - if TO_CLEANUP: - - for arch in TO_CLEANUP: - - pattern = 'The build has started for %s .*' % arch - build_info_comments = search_in_comments( comments, ['cmsbuild'], pattern, False) - - pattern_tool_conf = 'The cmssw-tool-conf build has started for %s .*' % arch - tool_conf_info_comments = search_in_comments( comments, ['cmsbuild'], pattern_tool_conf, False) - - if not build_info_comments: - print('No information found about the build machine, something is wrong for %s' % arch) - continue - - build_machine = build_info_comments[-1].split( ' ' )[7].strip( '.' ) - print('\nTriggering cleanup for %s' % arch) - create_properties_file_cleanup( release_name, arch, issue.number, build_machine ) - - if tool_conf_info_comments: - build_machine_toolconf = tool_conf_info_comments[-1].split( ' ' )[8].strip( '.' ) - print('\nTriggering tool-conf cleanup for %s' % arch) - create_properties_file_cleanup( release_name, arch, issue.number, build_machine_toolconf, tool_conf=True) - - print() - msg = CLEANUP_STARTED_MSG.format( architecture=arch ) - post_message( issue, msg ) - remove_label( issue, arch + '-upload-ok' ) - remove_label( issue, arch + '-build-error' ) - remove_label( issue, arch + '-build-ok' ) - remove_label( issue, arch + '-installation-ok' ) - add_label( issue, arch + '-finished' ) - -# -# Creates the release in github, including the cmssw tag. It then creates the files to trigger the builds in jenkins -# -def start_release_build( issue, release_name, release_branch, architectures, docker_imgs, commit=None, cmsdist_commit=None): - - cmssw_repo = gh.get_repo(GH_CMSSW_ORGANIZATION+"/"+GH_CMSSW_REPO) - release_created = create_release_github( cmssw_repo, release_name, release_branch, commit) - if not release_created: - msg = RELEASE_CREATION_FAIL_MSG.format( rel_name=release_name ) - post_message( issue , RELEASE_CREATION_FAIL_MSG.format( rel_name=release_name ) ) - exit( 0 ) - - msg = RELEASE_CREATED_MSG.format( rel_name=(RELEASE_BASE_URL % release_name), - base_branch=(BASE_BRANCH_URL % release_branch) ) - post_message( issue , msg ) - - ready_to_build = list( set( architectures ) - set( TOOL_CONF_WAITING ) - set( TOOL_CONF_ERROR ) - set( TOOL_CONF_BUILDING ) ) - create_properties_files( issue, release_name, ready_to_build, issue.number, release_queue, docker_imgs, only_toolconf=False, cmsdist_commit=cmsdist_commit ) - if ready_to_build: - msg = QUEUING_BUILDS_MSG % ', '.join( ready_to_build ) - post_message( issue , msg ) - -# -# Creates the files to trigger the build of cmssw-tool-conf in jenkins. -# -def start_tool_conf_build( issue, release_name, release_branch, architectures, docker_imgs, cmsdist_commit=None): - - create_properties_files( issue, release_name, architectures, issue.number, release_queue, docker_imgs, only_toolconf=True, cmsdist_commit=cmsdist_commit) - msg = QUEUING_TOOLCONF_MSG % ', '.join( architectures ) - post_message( issue , msg ) - -# -# removes the label for the current state and adds the label for the next state -# -def go_to_state( issue, current_state, new_state ): - print('\nSwitching to state: ', new_state, '\n') - remove_label( issue, current_state ) - add_label( issue, new_state ) - -# -# Generates an announcement prototype -# -def fix_release_description(issue): - if not issue.body: return '\n' - desc_str = '\n' + issue.body.encode("ascii", "ignore").decode().strip() + '\n\n' - desc_lines = [] - for l in desc_str.split('\n'): - if "RELEASE_QUEUE:" in l: continue - if "ARCHITECTURE:" in l: continue - if "TAG_COMMIT:" in l: continue - if "CMSSW_COMMIT:" in l: continue - if "CMSDIST_COMMIT:" in l: continue - if "PRODUCTION_ARCHITECTURE:" in l: continue - desc_lines.append(l) - return "\n".join(desc_lines) - -def generate_announcement( release_name, previous_release_name, production_architecture, architectures ): - - print('\nGenerating announcement template...\n') - is_development = 'pre' in release_name - type_str = 'development' if is_development else 'production' - print('Is development: ', is_development) - is_patch = 'patch' in release_name - patch_str = 'patch ' if is_patch else '' - print('Is patch: ', is_patch) - # The description of the issue should explain the reason for building the release - desc = fix_release_description(issue) - print('Description: \n', desc) - - architectures.remove( production_architecture ) - rest_of_archs = '\n'.join(architectures) + '\n\n' if architectures else '\n' - rel_cyc = "_".join(release_name.split("_")[:2]) - announcement = ANNOUNCEMENT_TEMPLATE.format( rel_type=type_str, - is_patch=patch_str, - rel_name=release_name, - rel_cyc=rel_cyc, - production_arch=production_architecture, - rest_of_archs=rest_of_archs, - prev_release=previous_release_name, - description=desc ) - - return announcement - -# -# Generates a link that the uset can click to write the announcement email with just one click -# -def generate_announcement_link( announcement, release_name ): - is_development = 'pre' in release_name - type_str = 'Development' if is_development else 'Production' - is_patch = 'patch' in release_name - patch_str = 'patch ' if is_patch else '' - - subject = quote(ANNOUNCEMENT_EMAIL_SUBJECT.format( rel_type=type_str, - is_patch=patch_str, - rel_name=release_name)) - - msg = quote(announcement) - link = MAILTO_TEMPLATE.format( destinatary=HN_REL_ANNOUNCE_EMAIL, - sub=subject, - body=msg ) - return link - - -# -# checks if the production architecture is ready, if so, it generates a template for the announcement -# -def check_if_prod_arch_ready( issue, prev_rel_name, production_architecture ): - if ( production_architecture in INSTALL_OK ): - print('Production architecture successfully installed..') - #For now, it assumes that the release is being installed and it will be installed successfully - announcement = generate_announcement( release_name, prev_rel_name, production_architecture, list(set(INSTALL_OK+UPLOAD_OK)) ) - mailto = generate_announcement_link( announcement, release_name ) - msg = 'You can use this template for announcing the release:\n\n%s\n\n' \ - 'You can also click %s to send the email.' % (announcement, mailto) - post_message( issue, msg, checkIfRepeated=False ) - add_label( issue, ANNOUNCEMENT_GENERATED_LBL ) - -# -# checks the issue for archs to be uploaded -# -def check_archs_to_upload( release_name, issue, docker_imgs, production_architecture): - print('Looking for archs ready to be uploaded...\n') - for arch in BUILD_OK: - print('Ready to upload %s' % arch) - pattern = '^The build has started for %s .*' % arch - build_info_comments = search_in_comments( comments, ['cmsbuild'] , pattern, True ) - if not build_info_comments: - print('No information found about the build machine, something is wrong') - exit( 1 ) - - first_line_info_comment = str(build_info_comments[-1].encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ")) - build_machine = first_line_info_comment.split( ' ' )[ 7 ].strip( '.' ) - print('Triggering upload for %s (prod arch: %s)' % (arch, arch==production_architecture)) - create_properties_files_upload( release_name , arch , issue.number , build_machine, docker_imgs, arch==production_architecture ) - post_message( issue , QUEING_UPLOADS_MSG.format( architecture=arch ) ) - remove_label( issue, arch + '-build-ok' ) - add_label( issue, arch + '-uploading' ) - - if BUILD_OK: - return True - else: - return False - -# -# checks if there are architectures that are ready to be built afer building tool-conf, and triggers the build if neccessary -# -def check_to_build_after_tool_conf( issue, release_name, release_queue, docker_imgs): - print('Checking if there are architectures waiting to be started after building tool-conf') - ready_to_build = TOOL_CONF_OK - print(ready_to_build) - create_properties_files( issue, release_name, ready_to_build, issue.number, release_queue, docker_imgs ) - if ready_to_build: - msg = QUEUING_BUILDS_MSG % ', '.join( ready_to_build ) - post_message( issue , msg ) - -# -# Guesses the previous release name based on the name given as a parameter -# -def guess_prev_rel_name( release_name, issue ): - num_str = release_name.split( '_' )[ -1 ] - number = int( re.search( '[0-9]+$', release_name).group(0) ) - prev_number = number - 1 - prev_num_str = num_str.replace( str(number), str(prev_number) ) - - if ('patch' in num_str) or ('pre' in num_str): - if prev_number < 1: - if 'pre' in num_str: - post_message( issue, PREVIOUS_RELEASE_NAME_MSG.format( release_name=release_name ) ) - exit (0) - return re.sub("_"+num_str+"$","",release_name) - return re.sub("_"+num_str+"$","_"+prev_num_str,release_name) - rel_match = re.sub("_"+num_str+"$","_"+prev_num_str,release_name)+"\(_[a-zA-Z]*patch[0-9][0-9]*\|\);" - if number == 0: - rel_match = release_name + "_pre\([0-9][0-9]*\);" - ret, out = run_cmd("grep 'label="+ rel_match +"' "+ CMS_BOT_DIR+"/releases.map" + " | grep -v 'label=" + release_name + ";' | tail -1 | sed 's|.*label=||;s|;.*||'") - return out - -# ------------------------------------------------------------------------------- -# Start of execution -# -------------------------------------------------------------------------------- - -if __name__ == "__main__": - parser = OptionParser( usage="%prog " ) - parser.add_option( "-n" , "--dry-run" , dest="dryRun" , action="store_true", help="Do not post on Github", default=False ) - parser.add_option( "-f" , "--force" , dest="force" , action="store_true", help="Ignore previous comments in the issue and proccess it again", default=False ) - parser.add_option( "-c", "--check-upload", dest="check_upload" , action="store" , help="Check if one of the authorized users has written the upload message" - "for the architecture given as a parameter. It exits with 0 if it finds" - "a message with the structure 'upload ', if not it exits" - " with 1" ) - opts, args = parser.parse_args( ) - - if len( args ) != 1: - parser.print_help() - parser.error( "Too many arguments" ) - - GH_TOKEN = open( expanduser("~/.github-token")).read().strip() - - issue_id = int( args[ 0 ] ) - gh = Github( login_or_token=GH_TOKEN ) - api_rate_limits(gh) - cmssw_repo_name = GH_CMSSW_ORGANIZATION + '/' + GH_CMSSW_REPO - cmssw_repo = gh.get_repo( cmssw_repo_name ) - issue = cmssw_repo.get_issue( issue_id ) - print('API Rate Limit') - print('Limit, Remaining: ', gh.rate_limiting) - print('Reset time (GMT): ', datetime.fromtimestamp(gh.rate_limiting_resettime)) - - # 1. Is this a pull request? - if issue.pull_request: - print('This is a pull request, ignoring.') - exit( 0 ) - - title_match = re.match(BUILD_REL, issue.title) - - # 2. Is this issue meant to build a release? - if not title_match: - print('This issue is not for building a release, ignoring.') - exit( 0 ) - - comments = [ c for c in issue.get_comments( ) ] - - release_name = title_match.group(1) - is_patch = 'patch' in release_name - full_release = release_name.split("patch")[0].rsplit('_',1)[0] if is_patch else "" - # Get the release queue from the release name. - print(release_name) - issue_body = "" - if issue.body: issue_body = issue.body.encode("ascii", "ignore").decode().strip() - release_queue = None - rel_name_match = re.match( REL_NAME_REGEXP, release_name ) - if "RELEASE_QUEUE:" in issue_body: - release_queue = issue_body.split("RELEASE_QUEUE:",1)[1].split("\n",1)[0].strip() - print("Found forces release queue:",release_queue) - else: - if not rel_name_match: - print('Release name not correctly formed') - post_message( issue, WRONG_RELEASE_NAME_MSG ) - exit( 0 ) - - release_queue = "".join([x for x in rel_name_match.group(1,4)] + ["_X"] + [x.strip("0123456789") for x in rel_name_match.group(2)]) - - release_tag_commit = None - if "TAG_COMMIT:" in issue_body: - release_tag_commit = issue_body.split("TAG_COMMIT:",1)[1].split("\n",1)[0].strip() - print("Found forces commit:",release_tag_commit) - - if "CMSSW_COMMIT:" in issue_body: - release_tag_commit = issue_body.split("CMSSW_COMMIT:",1)[1].split("\n",1)[0].strip() - print("Found forces commit:",release_tag_commit) - - cmsdist_tag_commit = None - if "CMSDIST_COMMIT:" in issue_body: - cmsdist_tag_commit = issue_body.split("CMSDIST_COMMIT:",1)[1].split("\n",1)[0].strip() - print("Found forces cmsdist commit:",cmsdist_tag_commit) - - sel_archs = [] - if "ARCHITECTURE:" in issue_body: - sel_archs = set(issue_body.split("ARCHITECTURE:",1)[1].split("\n",1)[0].strip().split(",")) - print("Found forces architectures:",sel_archs) - - print(release_queue,sel_archs) - specs = get_config_map_properties({"DISABLED": "1", "IB_ONLY": "1"}) - architectures = [x["SCRAM_ARCH"] for x in specs - if x["RELEASE_QUEUE"] == release_queue] - if not architectures: - print("Trying default queue") - release_queue = "".join([x for x in rel_name_match.group(1,2)] + ["_X"]) - print(release_queue) - architectures = [x["SCRAM_ARCH"] for x in specs - if x["RELEASE_QUEUE"] == release_queue] - - if sel_archs: - architectures = [ a for a in architectures if a in sel_archs ] - - if is_patch: - full_release_archs = get_full_release_archs(full_release) - print("Full release archs:",full_release, full_release_archs) - if not full_release_archs: - msg = "Error: unable to find architectures for full release "+full_release - post_message( issue, msg ) - exit( 0 ) - architectures = [a for a in architectures if a in full_release_archs] - print("Patch release filtered archs:",architectures) - - # Check if we have at least one architecture to build and complain if not. - if not architectures: - print('no archs found for the requested release') - msg = NO_ARCHS_FOUND_MSG.format( rel_name=release_name, queue=release_queue ) - post_message( issue, msg ) - exit( 0 ) - print("Archs: ", architectures) - - #Find out the docker images to be used for each arch - docker_imgs = {} - for x in specs: - if (x["RELEASE_QUEUE"] == release_queue) and ("DOCKER_IMG" in x) and (x["SCRAM_ARCH"] in architectures): - docker_imgs[x["SCRAM_ARCH"]] = x["DOCKER_IMG"] - print("Dockers:",docker_imgs) - - # Determine the release branch (which is the same as the release queue if not - # specified) and start the build if needed. - release_branches = [x["RELEASE_BRANCH"] for x in specs - if (x["RELEASE_QUEUE"] == release_queue) and ("RELEASE_BRANCH" in x)] - - possible_prod_arch = [x["SCRAM_ARCH"] for x in specs - if (x["RELEASE_QUEUE"] == release_queue) and ("PROD_ARCH" in x)] - print("Debug:",release_name, release_queue,release_branches,possible_prod_arch) - if len(architectures)>1: - err, production_architecture = run_cmd(CMS_BOT_DIR+"/get-production-arch %s %s" % (release_name, release_queue)) - print("Debug",production_architecture) - if err: - print("Unable to find production architecture for the release") - post_message( issue, "Unable to find production architecture for the release" ) - exit(0) - production_architecture = production_architecture.split("\n")[-1] - else: - production_architecture = architectures[0] - - if "PRODUCTION_ARCHITECTURE:" in issue_body: - req_arch = issue_body.split("PRODUCTION_ARCHITECTURE:",1)[1].split("\n",1)[0].strip() - if not req_arch in architectures: - msg = "You requested production architecutre to be %s but this is not a valid architecture for this release cycle." % req_arch - print(msg) - post_message( issue, msg) - exit(0) - if is_patch and (production_architecture != req_arch): - msg="You can not override production architecture for a patch release.\nProdction architecture for this release should be %s" % production_architecture - print(msg) - post_message( issue, msg) - exit(0) - production_architecture = req_arch - print("Found production architecture:",production_architecture) - if not production_architecture and len(architectures)==1: - production_architecture = architectures[0] - print("debug production arch: ",production_architecture) - - release_branch = release_queue - - if len(release_branches): - release_branch = release_branches[0] - - # If a patch release is requested and there is a patchX branch, it will be used to tag the release. For example: - # if you want to create CMSSW_7_1_4_patch2 and there exists a branch called CMSSW_7_1_4_patchX the tag will be - # on top of the branch CMSSW_7_1_4_patchX instead of CMSSW_7_1_X. - - if is_patch: - try: - possible_branch = full_release + "_patchX" - if get_branch(cmssw_repo_name, possible_branch)['name'] == possible_branch: - release_branch = possible_branch - print('This is a patch release and the branch %s was found. It will be used as base branch.' % possible_branch) - except HTTPError as e: - print(e) - if e.code != 404: - exit(1) - - print(release_branch) - for rm in get_release_managers(release_branch): - if not rm in APPROVE_BUILD_RELEASE: APPROVE_BUILD_RELEASE.append(rm) - if not rm in REQUEST_BUILD_RELEASE: REQUEST_BUILD_RELEASE.append(rm) - - # 3. Is the author authorized to trigger a build? - if not issue.user.login in REQUEST_BUILD_RELEASE: - print('User not authorized') - post_message( issue , NOT_AUTHORIZED_MSG ) - exit( 0 ) - - # Get the status of this issue. - status = get_issue_status( issue ) - print('Status: %s \n' % status) - - labels = [ l.name for l in issue.get_labels() ] - print("Issue labels:",labels) - - BUILD_OK = [] - BUILDING = [] - UPLOAD_OK = [] - INSTALL_OK = [] - UPLOADING = [] - BUILD_ERROR = [] - TO_CLEANUP = [] - TOOL_CONF_BUILDING = [] - TOOL_CONF_OK = [] - TOOL_CONF_ERROR = [] - TOOL_CONF_WAITING = [] - # These lists are filled by fillDeatilsArchsLists( issue ) - - fillDeatilsArchsLists( issue ) - - if status == BUILD_ABORTED: - print('Build Aborted. A new issue must be created if you want to build the release') - - date_aborted = search_date_comment( comments, APPROVE_BUILD_RELEASE, ABORT_COMMENT, True ) - # the time is 2 days because a new issue must be created to start again the build - # if for the new build the build starts in the same machine as before, this will - # start to delete the work directory of the new build. - cleanup_deadline = datetime.now() - timedelta(days=2) - if date_aborted < cleanup_deadline: - print('Cleaning up since it is too old since it was aborted') - triggerCleanup( issue, comments, release_name ) - close_issue( issue ) - else: - print('Not too old yet to clean up') - - if status == NEW_ISSUE: - approvers = ", ".join( [ "@"+x for x in APPROVE_BUILD_RELEASE ] ) - ALL_WATCHERS = (yaml.load(open(CMS_BOT_DIR+"/build-release-watchers.yaml"), Loader=Loader)) - watchers = ALL_WATCHERS.get( release_queue ) - xqueue = release_queue - if release_queue != release_branch: xqueue =release_queue+"("+release_branch+")" - cmssw_commit_tag="" - if release_tag_commit: cmssw_commit_tag="Release tag based on: %s\n" % release_tag_commit - arch_msg = ", ".join([ a if a!=production_architecture else '%s(%s)' % (a,'Production') for a in architectures ]) - msg = ACK_MSG.format( approvers_list=approvers, cmssw_queue=xqueue, architecture=arch_msg, cmssw_commit_tag=cmssw_commit_tag ) - - if watchers: - watchers_l = ", ".join( [ "@"+x for x in watchers ] ) - watchers_msg = WATCHERS_MSG.format( watchers_list=watchers_l, queue=release_queue ) - msg += watchers_msg - - post_message( issue, msg) - add_label( issue, PENDING_APPROVAL ) - add_label( issue, RELEASE_BUILD_ISSUE ) - exit( 0 ) - - if status == PENDING_APPROVAL: - approval_comments = search_in_comments( comments, APPROVE_BUILD_RELEASE , APPROVAL_COMMENT, True ) - build_toolconf_commments = search_in_comments( comments, APPROVE_BUILD_RELEASE , BUILD_TOOLCONF, True ) - if build_toolconf_commments: - if is_patch: - post_message( issue, NOT_TOOLCONF_FOR_PATCH_MSG ) - else: - start_tool_conf_build( issue, release_name, release_branch, architectures, docker_imgs, cmsdist_commit=cmsdist_tag_commit) - go_to_state( issue, status, TOOLCONF_BUILDING ) - elif approval_comments: - start_release_build( issue, release_name, release_branch, architectures, docker_imgs, release_tag_commit, cmsdist_commit=cmsdist_tag_commit) - go_to_state( issue, status, BUILD_IN_PROGRESS ) - else: - print('Build not approved or cmssw-tool-conf not requested yet') - exit( 0 ) - - if status == TOOLCONF_BUILDING: - - print('Waiting for approval to start the build') - approval_comments = search_in_comments( comments, APPROVE_BUILD_RELEASE , APPROVAL_COMMENT, True ) - if approval_comments: - print('Build approved, switching to "Build in Progress" state') - - # add a label for each arch for which tool conf has not started in jenkins - tool_conf_reported = ( TOOL_CONF_BUILDING + TOOL_CONF_OK + TOOL_CONF_ERROR ) - not_started = list( set( architectures ) - set( tool_conf_reported ) ) - - for arch in not_started: - add_label( issue, arch + '-tool-conf-waiting' ) - TOOL_CONF_WAITING.append( arch ) - - go_to_state( issue, status, BUILD_IN_PROGRESS ) - start_release_build( issue, release_name, release_branch, architectures, docker_imgs, release_tag_commit, cmsdist_commit=cmsdist_tag_commit ) - - if status == BUILD_IN_PROGRESS: - - abort_comments = search_in_comments( comments , APPROVE_BUILD_RELEASE , ABORT_COMMENT, True ) - print(abort_comments) - if abort_comments: - print('Aborting') - abort_build( issue, release_name, architectures, comments) - go_to_state( issue, status, BUILD_ABORTED ) - exit( 0 ) - - # if the previous state was to build tool-conf there are architectures for which it is needed to wait - build_toolconf_commments = search_in_comments( comments, APPROVE_BUILD_RELEASE , BUILD_TOOLCONF, True ) - if build_toolconf_commments: - check_to_build_after_tool_conf( issue, release_name, release_queue, docker_imgs) - - if BUILD_OK: - go_to_state( issue, status, BUILD_SUCCESSFUL ) - - if status == BUILD_SUCCESSFUL: - - abort_comments = search_in_comments( comments , APPROVE_BUILD_RELEASE , ABORT_COMMENT, True ) - print(abort_comments) - if abort_comments: - print('Aborting') - abort_build( issue, release_name, architectures, comments) - go_to_state( issue, status, BUILD_ABORTED ) - exit( 0 ) - - # if the previous state was to build tool-conf there are architectures for which it is needed to wait - build_toolconf_commments = search_in_comments( comments, APPROVE_BUILD_RELEASE , BUILD_TOOLCONF, True ) - if build_toolconf_commments: - check_to_build_after_tool_conf( issue, release_name, release_queue, docker_imgs) - - - upload_all_requested = search_in_comments( comments, APPROVE_BUILD_RELEASE, UPLOAD_ALL_COMMENT, True ) - - if upload_all_requested: - check_archs_to_upload( release_name, issue, docker_imgs, production_architecture ) - go_to_state( issue, status, UPLOADING_BUILDS ) - else: - print('Upload not requested yet') - - if status == UPLOADING_BUILDS: - - #upload archs as soon as they get ready - check_archs_to_upload( release_name, issue, docker_imgs, production_architecture ) - - #Check if someone asked for release notes, go to next state after generating notes. - #At least one architecture must have been successfully installed - if INSTALL_OK and ( RELEASE_NOTES_GENERATED_LBL not in labels ): - print('checking if someone asked for the release notes') - release_notes_comments = search_in_comments( comments, APPROVE_BUILD_RELEASE, RELEASE_NOTES_COMMENT, True ) - - if release_notes_comments: - comment = release_notes_comments[-1] - first_line = str(comment.encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ")) - comment_parts = first_line.strip().split(' ') - print("debug: ",comment_parts) - - if len( comment_parts ) > 1: - prev_rel_name = comment_parts[ 2 ].rstrip() - else: - prev_rel_name = guess_prev_rel_name( release_name, issue ) - print(prev_rel_name) - - rel_name_match = re.match( REL_NAME_REGEXP, prev_rel_name ) - if not rel_name_match: - msg = WRONG_NOTES_RELEASE_MSG.format( previous_release=prev_rel_name ) - post_message( issue, msg ) - exit( 0 ) - - if ( production_architecture not in INSTALL_OK ): - msg = PROD_ARCH_NOT_READY_MSG.format( prod_arch=production_architecture ) - post_message( issue, msg ) - exit( 0 ) - create_properties_file_rel_notes( release_name, prev_rel_name, production_architecture, issue.number ) - msg = GENERATING_RELEASE_NOTES_MSG.format( previous_release=prev_rel_name ) - post_message( issue, msg ) - add_label( issue, RELEASE_NOTES_GENERATED_LBL ) - - #Check if the production architecture was uploaded and was correctly installed, generate announcement if so. - check_if_prod_arch_ready( issue, prev_rel_name, production_architecture ) - go_to_state( issue, status, RELEASE_ANNOUNCED ) - - if status == RELEASE_ANNOUNCED: - - #upload archs as soon as they get ready - check_archs_to_upload( release_name, issue, docker_imgs, production_architecture ) - - print('checking if someone asked again for the release notes') - release_notes_comments = search_in_comments( comments, APPROVE_BUILD_RELEASE, RELEASE_NOTES_COMMENT, True ) - generating_release_notes_comments = search_in_comments( comments, ['cmsbuild'], 'Generating release notes', True ) - - if len( release_notes_comments ) > len( generating_release_notes_comments ): - print('I need to generate the release notes again') - # check if this is beter if a function is added - comment = release_notes_comments[-1] - first_line = str(comment.encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ")) - comment_parts = first_line.strip().split(' ') - - if len( comment_parts ) > 1: - prev_rel_name = comment_parts[ 2 ].rstrip() - else: - prev_rel_name = guess_prev_rel_name( release_name, issue ) - print(prev_rel_name) - - rel_name_match = re.match( REL_NAME_REGEXP, prev_rel_name ) - if not rel_name_match: - msg = WRONG_NOTES_RELEASE_MSG.format( previous_release=prev_rel_name ) - post_message( issue, msg, checkIfRepeated=False ) - exit( 0 ) - - create_properties_file_rel_notes( release_name, prev_rel_name, production_architecture, issue.number ) - msg = GENERATING_RELEASE_NOTES_MSG.format( previous_release=prev_rel_name ) - post_message( issue, msg, checkIfRepeated=False ) - check_if_prod_arch_ready( issue, prev_rel_name, production_architecture ) - - # check if the cleanup has been requested or if 2 days have passed since the release-notes were generated. - print('Checking if someone requested cleanup, or the issue is too old...') - date_rel_notes = search_date_comment( comments, APPROVE_BUILD_RELEASE, RELEASE_NOTES_COMMENT, True ) - cleanup_deadline = datetime.now() - timedelta(days=2) - if date_rel_notes: - too_old = date_rel_notes < cleanup_deadline - else: - too_old = False - pattern = '^cleanup$' - cleanup_requested_comments = search_in_comments( comments, APPROVE_BUILD_RELEASE, pattern, True ) - if cleanup_requested_comments or too_old: - triggerCleanup( issue, comments, release_name ) - close_issue( issue ) - go_to_state( issue, status, PROCESS_COMPLETE ) - diff --git a/process-build-release-request b/process-build-release-request new file mode 120000 index 000000000000..abd14d250869 --- /dev/null +++ b/process-build-release-request @@ -0,0 +1 @@ +process-build-release-request.py \ No newline at end of file diff --git a/process-build-release-request.py b/process-build-release-request.py new file mode 100755 index 000000000000..2a390a447451 --- /dev/null +++ b/process-build-release-request.py @@ -0,0 +1,1477 @@ +#!/usr/bin/env python3 +import json +import re +from _py2with3compatibility import run_cmd, quote, Request, urlopen, HTTPError +from datetime import datetime, timedelta +from optparse import OptionParser +from os.path import dirname, abspath, exists +from os.path import expanduser +from socket import setdefaulttimeout + +import yaml + +try: + from yaml import CLoader as Loader, CDumper as Dumper +except ImportError: + from yaml import Loader, Dumper +from github import Github + +from categories import REQUEST_BUILD_RELEASE, APPROVE_BUILD_RELEASE +from cms_static import BUILD_REL, GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, GH_CMSDIST_REPO +from cmsutils import get_config_map_properties, get_full_release_archs +from github_utils import api_rate_limits, get_ref_commit, get_commit_info +from github_utils import get_branch +from releases import get_release_managers + +setdefaulttimeout(120) +from os import environ + +JENKINS_PREFIX = "jenkins" +try: + JENKINS_PREFIX = environ["JENKINS_URL"].strip("/").split("/")[-1] +except: + JENKINS_PREFIX = "jenkins" + +try: + CMS_BOT_DIR = dirname(abspath(__file__)) +except Exception as e: + from sys import argv + + CMS_BOT_DIR = dirname(abspath(argv[0])) +# +# Processes a github issue to check if it is requesting the build of a new release +# If the issue is not requesting any release, it ignores it. +# + +# ------------------------------------------------------------------------------- +# Global Variables +# -------------------------------------------------------------------------------- + +NOT_AUTHORIZED_MSG = "You are not authorized to request the build of a release." +CONFIG_MAP_FILE = CMS_BOT_DIR + "/config.map" +NO_ARCHS_FOUND_MSG = ( + "No architecures to build found for {rel_name}. Please check that you entered a " + "valid release name or that the IBs are currently enabled for {queue}" +) +RELEASE_BASE_URL = "https://github.com/cms-sw/cmssw/releases/tag/%s" +BASE_BRANCH_URL = " https://github.com/cms-sw/cmssw/tree/%s" +RELEASE_CREATED_MSG = ( + "Release created: {rel_name}. The tag was created on top of branch: {base_branch}" +) +RELEASE_CREATION_FAIL_MSG = ( + "There was an error while attempting to create {rel_name}. " + "Please check if it already exists https://github.com/cms-sw/cmssw/releases" +) +WRONG_RELEASE_NAME_MSG = "The release name is malformed. Please check for typos." +ACK_MSG = ( + "Request received. I will start to build the release after one of the following approve " + 'the issue: {approvers_list}. You can do this by writing "+1" in a ' + "comment.\n You can also ask me to begin to build cmssw-tool-conf first ( Cannot be done for patch releases ). To do this write " + '"build cmssw-tool-conf" in a comment. I will start to build cmssw-tool-conf and then wait for the "+1" ' + "to start the build of the release.\n" + "CMSSW Branch: {cmssw_queue}\n" + "Architecture: {architecture}\n" + "{cmssw_commit_tag}" +) +WATCHERS_MSG = "{watchers_list} you requested to watch the automated builds for {queue}" +QUEUING_BUILDS_MSG = ( + "Queuing Jenkins build for the following architectures: %s \n" + 'You can abort the build by writing "Abort" in a comment. I will delete the release, ' + "the cmssw and cmsdist tag, and close the issue. You can't abort the upload once at" + " least one achitecture is being uploaded. \n" + "If you are building cmssw-tool-conf first, I will wait for each architecture to finish to start the build of cmssw." +) +QUEUING_TOOLCONF_MSG = ( + "Queuing Jenkins build for cmssw-tool-conf for the following architectures: %s \n" + 'Be aware that I am building only cmssw-tool-conf. You still need to "+1" this issue to ' + "make me start the build of the release. For each architecture, I will only start to build " + "the release after cmssw-tool-conf finishes building." +) +QUEING_UPLOADS_MSG = "Queing Jenkins upload for {architecture}" +CLEANUP_STARTED_MSG = "The cleanup has started for {architecture}" +NOT_TOOLCONF_FOR_PATCH_MSG = ( + "You cannot ask me to build cmssw-tool-conf for patch releases. Please delete that message." +) +JENKINS_CMSSW_X_Y_Z = "CMSSW_X_Y_Z" +JENKINS_ARCH = "ARCHITECTURE" +JENKINS_ISSUE_NUMBER = "ISSUE_NUMBER" +JENKINS_MACHINE_NAME = "MACHINE_NAME" +JENKINS_CMSSW_QUEUE = "CMSSW_QUEUE" +JENKINS_DOCKER_IMG = "DOCKER_IMG" +JENKINS_ONLY_TOOL_CONF = "ONLY_BUILD_TOOLCONF" +WRONG_NOTES_RELEASE_MSG = ( + 'Previous release "{previous_release}" does not appear to be a valid release name' +) +PREVIOUS_RELEASE_NAME_MSG = 'Unable to find previous release for {release_name}. Please use "release-notes since " in first line of the comment.' +GENERATING_RELEASE_NOTES_MSG = ( + "Generating release notes since {previous_release}. \n" + "You can see the progress here: \n" + "https://cmssdt.cern.ch/%s/job/release-produce-changelog/\n" + "I will generate an announcement template.\n" % JENKINS_PREFIX +) +PROD_ARCH_NOT_READY_MSG = ( + "ATTENTION!!! The production architecture ({prod_arch}) is not ready yet. " + "This needs to be checked before asking me to generate the release notes.\n" + "When the production architecture is installed successfully, I will generate the release notes." + " You don't need to write the command again." +) +REL_NAME_REGEXP = ( + "(CMSSW_[0-9]+_[0-9]+)_[0-9]+(_SLHC[0-9]*|)(_pre[0-9]+|_[a-zA-Z]*patch[0-9]+|)(_[^_]*|)" +) +UPLOAD_COMMENT = "upload %s" +UPLOAD_ALL_COMMENT = "^[uU]pload all$" +ABORT_COMMENT = "^[Aa]bort$" +RELEASE_NOTES_COMMENT = "^release-notes([ ]+since[ ]+[^ ]+)?$" +BUILD_TOOLCONF = "^[Bb]uild cmssw-tool-conf" +APPROVAL_COMMENT = "^[+]1$" +RELEASE_NOTES_GENERATED_LBL = "release-notes-requested" +ANNOUNCEMENT_GENERATED_LBL = "release-notes-requested" +JENKINS_PREV_RELEASE = "PREVIOUS_RELEASE" +JENKINS_RELEASE = "RELEASE" +JENKINS_PREV_CMSDIST_TAG = "PREVIOUS_CMSDIST_TAG" +JENKINS_CMSDIST_TAG = "CMSDIST_TAG" +JENKINS_PRODUCTION_ARCH = "PRODUCTION_ARCH" +JENKINS_BUILD_DIR = "BUILD_DIR" +ANNOUNCEMENT_TEMPLATE = ( + "Hi all,\n\n" + "The {rel_type} {is_patch}release {rel_name} is now available " + "for the following architectures:\n\n" + "{production_arch} (production)\n" + "{rest_of_archs}" + "The release notes of what changed with respect to {prev_release} can be found at:\n\n" + "https://github.com/cms-sw/cmssw/releases/{rel_name}\n" + "{description}" + "Cheers,\n" + "cms-bot" +) + +HN_REL_ANNOUNCE_EMAIL = "hn-cms-relAnnounce@cern.ch" +ANNOUNCEMENT_EMAIL_SUBJECT = "{rel_type} {is_patch}Release {rel_name} Now Available " +MAILTO_TEMPLATE = 'here' + +# ------------------------------------------------------------------------------- +# Statuses +# -------------------------------------------------------------------------------- +# This is to determine the status of the issue after reading the labels + +# The issue has just been created +NEW_ISSUE = "NEW_ISSUSE" +# The issue has been received, but it needs approval to start the build +PENDING_APPROVAL = "build-pending-approval" +# The build has been queued in jenkins +BUILD_IN_PROGRESS = "build-in-progress" +# The build has started +BUILD_STARTED = "build-started" +# The build has been aborted. +BUILD_ABORTED = "build-aborted" +# they requested to build cmssw-tool-conf and it is being built +TOOLCONF_BUILDING = "toolconf-building" +# at leas one of the architectures was built successully +BUILD_SUCCESSFUL = "build-successful" +# the builds are being uploaded +UPLOADING_BUILDS = "uploading-builds" +# the release has been announced +RELEASE_ANNOUNCED = "release-announced" +# the release was build without issues. +PROCESS_COMPLETE = "process-complete" +# Label for all Release Build Issue +RELEASE_BUILD_ISSUE = "release-build-request" + +# ------------------------------------------------------------------------------- +# Functions +# -------------------------------------------------------------------------------- + + +# +# creates a properties file to cleanup the build files. +# +def create_properties_file_cleanup( + release_name, arch, issue_number, machine_name, tool_conf=False +): + if tool_conf: + out_file_name = "cleanup-tool-conf-%s-%s.properties" % (release_name, arch) + else: + out_file_name = "cleanup-%s-%s.properties" % (release_name, arch) + + if opts.dryRun: + print("Not creating cleanup properties file (dry-run):\n %s" % out_file_name) + else: + print("Creating properties file for %s" % arch) + out_file = open(out_file_name, "w") + out_file.write("%s=%s\n" % (JENKINS_CMSSW_X_Y_Z, release_name)) + out_file.write("%s=%s\n" % (JENKINS_ARCH, arch)) + out_file.write("%s=%s\n" % (JENKINS_ISSUE_NUMBER, issue_number)) + out_file.write("%s=%s\n" % (JENKINS_MACHINE_NAME, machine_name)) + + +# Creates a properties file in Jenkins to trigger the upload +# it needs to know the machine that was used for the build +# +def create_properties_files_upload( + release_name, arch, issue_number, machine_name, docker_imgs, prod +): + docker_img = "" + if arch in docker_imgs: + docker_img = docker_imgs[arch] + out_file_name = "upload-%s-%s.properties" % (release_name, arch) + if opts.dryRun: + print("Not creating properties file (dry-run):\n %s" % out_file_name) + else: + print("Creating properties file for %s" % arch) + out_file = open(out_file_name, "w") + out_file.write("%s=%s\n" % (JENKINS_CMSSW_X_Y_Z, release_name)) + out_file.write("%s=%s\n" % (JENKINS_ARCH, arch)) + out_file.write("%s=%s\n" % (JENKINS_ISSUE_NUMBER, issue_number)) + out_file.write("%s=%s\n" % (JENKINS_DOCKER_IMG, docker_img)) + out_file.write("%s=%s\n" % (JENKINS_MACHINE_NAME, machine_name)) + out_file.write("%s=%s\n" % (JENKINS_PRODUCTION_ARCH, "true" if prod else "false")) + + +# +# Searches in the comments if there is a comment made from the given users that +# matches the given pattern. It returns the date of the first comment that matches +# if no comment matches it not returns None +# +def search_date_comment(comments, user_logins, pattern, first_line): + for comment in reversed(comments): + if comment.user.login not in user_logins: + continue + + examined_str = comment.body + + if first_line: + examined_str = str( + comment.body.encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ") + ) + + if examined_str == pattern: + return comment.created_at + + if re.match(pattern, examined_str): + return comment.created_at + + return None + + +# +# Searches in the comments if there is a comment made from the given users that +# matches the given pattern. It returns a list with the matched comments. +# +def search_in_comments(comments, user_logins, pattern, first_line): + found_comments = [] + requested_comment_bodies = [c.body for c in comments if c.user.login in user_logins] + for body in requested_comment_bodies: + examined_str = body + if first_line: + examined_str = str( + body.encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ") + ) + + if examined_str == pattern: + found_comments.append(body) + continue + + if re.match(pattern, examined_str): + found_comments.append(body) + + return found_comments + + +# +# Checks if the issue has already been seen so the issue will not be processed again +# Returns True if the issue needs to be processed, False if not +# +def check_if_already_processed(issue): + comments = [c for c in issue.get_comments()] + comment_bodies = [c.body for c in comments if c.user.login == "cmsbuild"] + for body in comment_bodies: + if "Release created" in body: + return True + if "Queuing Jenkins build" in body: + return True + if "You are not authorized" in body: + return True + + return False + + +# +# Creates the properties files to trigger the build in Jenkins +# if only_toolconf is selected, it adds a parameter to tell the script to only build cmssw-tool-conf +# +def create_properties_files( + issue, + release_name, + architectures, + issue_number, + queue, + docker_imgs, + only_toolconf=False, + cmsdist_commit=None, +): + if not only_toolconf: + for arch in architectures: + remove_label(issue, arch + "-tool-conf-ok") + add_label(issue, arch + "-build-queued") + + if opts.dryRun: + print("Not creating properties files for (dry-run): %s" % ", ".join(architectures)) + return + + for arch in architectures: + docker_img = "" + if arch in docker_imgs: + docker_img = docker_imgs[arch] + out_file_name = "build-%s-%s.properties" % (release_name, arch) + print("Creating properties file for %s" % arch) + out_file = open(out_file_name, "w") + out_file.write("%s=%s\n" % (JENKINS_CMSSW_X_Y_Z, release_name)) + out_file.write("%s=%s\n" % (JENKINS_ARCH, arch)) + out_file.write("%s=%s\n" % (JENKINS_ISSUE_NUMBER, issue_number)) + out_file.write("%s=%s\n" % (JENKINS_CMSSW_QUEUE, queue)) + out_file.write("%s=%s\n" % (JENKINS_DOCKER_IMG, docker_img)) + tool_conf_param = "true" if only_toolconf else "false" + out_file.write("%s=%s\n" % (JENKINS_ONLY_TOOL_CONF, tool_conf_param)) + if cmsdist_commit: + out_file.write("CMSDIST_HASH=%s\n" % cmsdist_commit) + + +# +# generates the properties file for triggering the release notes +# it infers the tag names based on te format REL//architecture +# +def create_properties_file_rel_notes(release_name, previous_release, architecture, issue_number): + cmsdist_tag = "REL/" + release_name + "/" + architecture + previos_cmsdist_tag = "REL/" + previous_release + "/" + architecture + out_file_name = "release-notes.properties" + + if opts.dryRun: + print("Not creating properties file (dry-run): %s" % out_file_name) + return + + out_file = open(out_file_name, "w") + out_file.write("%s=%s\n" % (JENKINS_PREV_RELEASE, previous_release)) + out_file.write("%s=%s\n" % (JENKINS_RELEASE, release_name)) + out_file.write("%s=%s\n" % (JENKINS_PREV_CMSDIST_TAG, previos_cmsdist_tag)) + out_file.write("%s=%s\n" % (JENKINS_CMSDIST_TAG, cmsdist_tag)) + out_file.write("%s=%s\n" % (JENKINS_ISSUE_NUMBER, issue_number)) + + +# +# Creates a release in github +# If dry-run is selected it doesn't create the release and just prints that +# returns true if it was able to create the release, false if not +# +def get_release_github(repository, release_name): + print("Checking release:\n %s" % release_name) + request = Request( + "https://api.github.com/repos/" + + GH_CMSSW_ORGANIZATION + + "/" + + GH_CMSSW_REPO + + "/releases/tags/" + + release_name + ) + try: + print(urlopen(request).read()) + return True + except Exception as e: + print("There was an error while creating the release:\n", e) + return False + + +def create_release_github(repository, release_name, branch, commit=None): + if get_release_github(repository, release_name): + return True + if opts.dryRun: + print("Not creating release (dry-run):\n %s" % release_name) + return True + + print("Creating release:\n %s" % release_name) + if commit: + if not get_commit_info(GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO, commit): + sha = get_ref_commit(GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO, commit) + if sha: + commit = sha + else: + commit = branch + # creating releases will be available in the next version of pyGithub + params = { + "tag_name": release_name, + "target_commitish": commit, + "name": release_name, + "body": "cms-bot is going to build this release", + "draft": False, + "prerelease": False, + } + print(params) + request = Request( + "https://api.github.com/repos/" + + GH_CMSSW_ORGANIZATION + + "/" + + GH_CMSSW_REPO + + "/releases", + headers={"Authorization": "token " + GH_TOKEN}, + ) + request.get_method = lambda: "POST" + print("--") + try: + print(urlopen(request, json.dumps(params).encode()).read()) + return True + except Exception as e: + print("There was an error while creating the release:\n", e) + return False + print() + + +# +# Deletes in github the release given as a parameter. +# If the release does no exists, it informs it in the message. +# +def delete_release_github(release_name): + if opts.dryRun: + print("Not deleting release (dry-run):\n %s" % release_name) + return "Not deleting release (dry-run)" + + releases_url = ( + "https://api.github.com/repos/" + + GH_CMSSW_ORGANIZATION + + "/" + + GH_CMSSW_REPO + + "/releases?per_page=100" + ) + + request = Request(releases_url, headers={"Authorization": "token " + GH_TOKEN}) + releases = json.loads(urlopen(request).read()) + matchingRelease = [x["id"] for x in releases if x["name"] == release_name] + + if len(matchingRelease) < 1: + return "Release %s not found." % release_name + + releaseId = matchingRelease[0] + url = "https://api.github.com/repos/cms-sw/cmssw/releases/%s" % releaseId + request = Request(url, headers={"Authorization": "token " + GH_TOKEN}) + request.get_method = lambda: "DELETE" + + try: + print(urlopen(request).read()) + return "Release successfully deleted" + except Exception as e: + return "There was an error while deleting the release:\n %s" % e + + +def delete_tag(org, repo, tag): + if not exists(repo): + cmd = "mkdir deltag-{repo}; cd deltag-{repo}; git init; git remote add {repo} git@github.com:{org}/{repo}.git".format( + org=org, repo=repo + ) + print("Executing: \n %s" % cmd) + status, out = run_cmd(cmd) + cmd = "cd deltag-{repo}; git push {repo} :{tag}".format(repo=repo, tag=tag) + print("Executing: \n %s" % cmd) + status, out = run_cmd(cmd) + print(out) + if status != 0: + msg = "I was not able to delete the tag %s. Probaly it had not been created." % tag + print(msg) + return msg + msg = "%s tag %s successfully deleted." % (repo, tag) + return msg + + +# +# Deletes in github the tag given as a parameter +# +def delete_cmssw_tag_github(release_name): + if opts.dryRun: + print("Not deleting cmssw tag (dry-run):\n %s" % release_name) + return "Not deleting cmssw tag (dry-run): %s " % release_name + return delete_tag(GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, release_name) + + +# +# for each architecture, gets the tag in cmsdist that should have ben created and deletes it +# +def delete_cmsdist_tags_github(release_name, architectures): + result = "" + for arch in architectures: + tag_to_delete = "REL/{rel_name}/{architecture}".format( + rel_name=release_name, architecture=arch + ) + if opts.dryRun: + msg = "Not deleting cmsdist tag (dry-run): %s" % tag_to_delete + result += "\n\n - " + msg + continue + result += "\n\n - " + delete_tag(GH_CMSSW_ORGANIZATION, GH_CMSDIST_REPO, tag_to_delete) + return result + + +# +# Adds a label to the issue in github +# if dry-run is selected it doesn't add the label and just prints it +def add_label(issue, label): + if opts.dryRun: + print("Not adding label (dry-run):\n %s" % label) + return + print("Adding label:\n %s" % label) + issue.add_to_labels(label) + + +# +# posts a message to the issue in github +# if dry-run is selected it doesn't post the message and just prints it +# if you set checkIfRepeated to False, if will not check if the message has already been written. +# +def post_message(issue, msg, checkIfRepeated=True): + if opts.dryRun: + print("Not posting message (dry-run):\n %s" % msg) + return + if checkIfRepeated and search_in_comments(comments, ["cmsbuild"], msg, False): + print("Message already in the thread: \n %s" % msg) + return + print("Posting message:\n %s" % msg) + issue.create_comment(msg) + + +# +# reads the comments and gets returns the status of the issue +# +def get_issue_status(issue): + labels = [l.name for l in issue.get_labels() if l.name != RELEASE_BUILD_ISSUE] + print("Issue Label: ", labels) + + if not labels: + return NEW_ISSUE + if BUILD_ABORTED in labels: + return BUILD_ABORTED + if PENDING_APPROVAL in labels: + return PENDING_APPROVAL + if BUILD_IN_PROGRESS in labels: + return BUILD_IN_PROGRESS + if TOOLCONF_BUILDING in labels: + return TOOLCONF_BUILDING + if BUILD_SUCCESSFUL in labels: + return BUILD_SUCCESSFUL + if UPLOADING_BUILDS in labels: + return UPLOADING_BUILDS + if RELEASE_ANNOUNCED in labels: + return RELEASE_ANNOUNCED + if PROCESS_COMPLETE in labels: + return PROCESS_COMPLETE + + +# +# closes the issue +# +def close_issue(issue): + if opts.dryRun: + print("Not closing issue (dry-run)") + return + print("Closing issue...") + issue.edit(state="closed") + + +# +# removes the labels of the issue +# +def remove_labels(issue): + if opts.dryRun: + print("Not removing issue labels (dry-run)") + return + issue.delete_labels() + + +# Removes a label form the issue +def remove_label(issue, label): + if opts.dryRun: + print("Not removing label (dry-run):\n %s" % label) + return + + if label not in labels: + print("label ", label, " does not exist. Not attempting to remove") + return + + print("Removing label: %s" % label) + try: + issue.remove_from_labels(label) + except Exception as e: + print(e) + from sys import exit + + exit(1) + + +# +# Creates a properties file in Jenkins to kill the build +# it needs to know the machine that was used for the build +# +def create_properties_kill_build(release_name): + out_file_name = "kill-%s.properties" % (release_name) + print("Creating properties file for %s" % release_name) + out_file = open(out_file_name, "w") + out_file.write("%s=%s\n" % (JENKINS_CMSSW_X_Y_Z, release_name)) + if opts.dryRun: + out_file.write("DRY_RUN=true\n") + else: + out_file.write("DRY_RUN=false\n") + + +# +# Aborts the build: +# -Deletes the release in github +# -Deletes the cmssw tags +# -Deletes the cmsdist tags +# -Triggers the killing of the build process in jenkins +# +def abort_build(issue, release_name, architectures, comments): + msg = "Deleting %s:" % release_name + del_rel_result = delete_release_github(release_name) + msg += "\n\n - " + del_rel_result + msg += "\n\n - " + delete_cmssw_tag_github(release_name) + + create_properties_kill_build(release_name) + msg += delete_cmsdist_tags_github(release_name, architectures) + msg += "\n\n" + "You must create a new issue to start over the build." + post_message(issue, msg) + + +# +# Classifies the labels and fills the lists with the details of the current +# status of each architecture +# +def fillDeatilsArchsLists(issue): + labels = [l.name for l in issue.get_labels()] + BUILD_OK.extend([x.split("-")[0] for x in labels if "-build-ok" in x]) + BUILDING.extend([x.split("-")[0] for x in labels if "-building" in x]) + UPLOAD_OK.extend([x.split("-")[0] for x in labels if "-upload-ok" in x]) + INSTALL_OK.extend([x.split("-")[0] for x in labels if "-installation-ok" in x]) + UPLOADING.extend([x.split("-")[0] for x in labels if "-uploading" in x]) + BUILD_ERROR.extend([x.split("-")[0] for x in labels if "-build-error" in x]) + TOOL_CONF_BUILDING.extend([x.split("-")[0] for x in labels if "-tool-conf-building" in x]) + TOOL_CONF_OK.extend([x.split("-")[0] for x in labels if "-tool-conf-ok" in x]) + TOOL_CONF_ERROR.extend([x.split("-")[0] for x in labels if "-tool-conf-error" in x]) + TOOL_CONF_WAITING.extend([x.split("-")[0] for x in labels if "-tool-conf-waiting" in x]) + TO_CLEANUP.extend(UPLOAD_OK + BUILD_ERROR + BUILD_OK + INSTALL_OK) + + +# +# Triggers the cleanup for the architectures in the list TO_CLEANUP +# +def triggerCleanup(issue, comments, release_name): + if TO_CLEANUP: + for arch in TO_CLEANUP: + pattern = "The build has started for %s .*" % arch + build_info_comments = search_in_comments(comments, ["cmsbuild"], pattern, False) + + pattern_tool_conf = "The cmssw-tool-conf build has started for %s .*" % arch + tool_conf_info_comments = search_in_comments( + comments, ["cmsbuild"], pattern_tool_conf, False + ) + + if not build_info_comments: + print( + "No information found about the build machine, something is wrong for %s" + % arch + ) + continue + + build_machine = build_info_comments[-1].split(" ")[7].strip(".") + print("\nTriggering cleanup for %s" % arch) + create_properties_file_cleanup(release_name, arch, issue.number, build_machine) + + if tool_conf_info_comments: + build_machine_toolconf = tool_conf_info_comments[-1].split(" ")[8].strip(".") + print("\nTriggering tool-conf cleanup for %s" % arch) + create_properties_file_cleanup( + release_name, arch, issue.number, build_machine_toolconf, tool_conf=True + ) + + print() + msg = CLEANUP_STARTED_MSG.format(architecture=arch) + post_message(issue, msg) + remove_label(issue, arch + "-upload-ok") + remove_label(issue, arch + "-build-error") + remove_label(issue, arch + "-build-ok") + remove_label(issue, arch + "-installation-ok") + add_label(issue, arch + "-finished") + + +# +# Creates the release in github, including the cmssw tag. It then creates the files to trigger the builds in jenkins +# +def start_release_build( + issue, + release_name, + release_branch, + architectures, + docker_imgs, + commit=None, + cmsdist_commit=None, +): + cmssw_repo = gh.get_repo(GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO) + release_created = create_release_github(cmssw_repo, release_name, release_branch, commit) + if not release_created: + msg = RELEASE_CREATION_FAIL_MSG.format(rel_name=release_name) + post_message(issue, RELEASE_CREATION_FAIL_MSG.format(rel_name=release_name)) + exit(0) + + msg = RELEASE_CREATED_MSG.format( + rel_name=(RELEASE_BASE_URL % release_name), base_branch=(BASE_BRANCH_URL % release_branch) + ) + post_message(issue, msg) + + ready_to_build = list( + set(architectures) + - set(TOOL_CONF_WAITING) + - set(TOOL_CONF_ERROR) + - set(TOOL_CONF_BUILDING) + ) + create_properties_files( + issue, + release_name, + ready_to_build, + issue.number, + release_queue, + docker_imgs, + only_toolconf=False, + cmsdist_commit=cmsdist_commit, + ) + if ready_to_build: + msg = QUEUING_BUILDS_MSG % ", ".join(ready_to_build) + post_message(issue, msg) + + +# +# Creates the files to trigger the build of cmssw-tool-conf in jenkins. +# +def start_tool_conf_build( + issue, release_name, release_branch, architectures, docker_imgs, cmsdist_commit=None +): + create_properties_files( + issue, + release_name, + architectures, + issue.number, + release_queue, + docker_imgs, + only_toolconf=True, + cmsdist_commit=cmsdist_commit, + ) + msg = QUEUING_TOOLCONF_MSG % ", ".join(architectures) + post_message(issue, msg) + + +# +# removes the label for the current state and adds the label for the next state +# +def go_to_state(issue, current_state, new_state): + print("\nSwitching to state: ", new_state, "\n") + remove_label(issue, current_state) + add_label(issue, new_state) + + +# +# Generates an announcement prototype +# +def fix_release_description(issue): + if not issue.body: + return "\n" + desc_str = "\n" + issue.body.encode("ascii", "ignore").decode().strip() + "\n\n" + desc_lines = [] + for l in desc_str.split("\n"): + if "RELEASE_QUEUE:" in l: + continue + if "ARCHITECTURE:" in l: + continue + if "TAG_COMMIT:" in l: + continue + if "CMSSW_COMMIT:" in l: + continue + if "CMSDIST_COMMIT:" in l: + continue + if "PRODUCTION_ARCHITECTURE:" in l: + continue + desc_lines.append(l) + return "\n".join(desc_lines) + + +def generate_announcement( + release_name, previous_release_name, production_architecture, architectures +): + print("\nGenerating announcement template...\n") + is_development = "pre" in release_name + type_str = "development" if is_development else "production" + print("Is development: ", is_development) + is_patch = "patch" in release_name + patch_str = "patch " if is_patch else "" + print("Is patch: ", is_patch) + # The description of the issue should explain the reason for building the release + desc = fix_release_description(issue) + print("Description: \n", desc) + + architectures.remove(production_architecture) + rest_of_archs = "\n".join(architectures) + "\n\n" if architectures else "\n" + rel_cyc = "_".join(release_name.split("_")[:2]) + announcement = ANNOUNCEMENT_TEMPLATE.format( + rel_type=type_str, + is_patch=patch_str, + rel_name=release_name, + rel_cyc=rel_cyc, + production_arch=production_architecture, + rest_of_archs=rest_of_archs, + prev_release=previous_release_name, + description=desc, + ) + + return announcement + + +# +# Generates a link that the uset can click to write the announcement email with just one click +# +def generate_announcement_link(announcement, release_name): + is_development = "pre" in release_name + type_str = "Development" if is_development else "Production" + is_patch = "patch" in release_name + patch_str = "patch " if is_patch else "" + + subject = quote( + ANNOUNCEMENT_EMAIL_SUBJECT.format( + rel_type=type_str, is_patch=patch_str, rel_name=release_name + ) + ) + + msg = quote(announcement) + link = MAILTO_TEMPLATE.format(destinatary=HN_REL_ANNOUNCE_EMAIL, sub=subject, body=msg) + return link + + +# +# checks if the production architecture is ready, if so, it generates a template for the announcement +# +def check_if_prod_arch_ready(issue, prev_rel_name, production_architecture): + if production_architecture in INSTALL_OK: + print("Production architecture successfully installed..") + # For now, it assumes that the release is being installed and it will be installed successfully + announcement = generate_announcement( + release_name, prev_rel_name, production_architecture, list(set(INSTALL_OK + UPLOAD_OK)) + ) + mailto = generate_announcement_link(announcement, release_name) + msg = ( + "You can use this template for announcing the release:\n\n%s\n\n" + "You can also click %s to send the email." % (announcement, mailto) + ) + post_message(issue, msg, checkIfRepeated=False) + add_label(issue, ANNOUNCEMENT_GENERATED_LBL) + + +# +# checks the issue for archs to be uploaded +# +def check_archs_to_upload(release_name, issue, docker_imgs, production_architecture): + print("Looking for archs ready to be uploaded...\n") + for arch in BUILD_OK: + print("Ready to upload %s" % arch) + pattern = "^The build has started for %s .*" % arch + build_info_comments = search_in_comments(comments, ["cmsbuild"], pattern, True) + if not build_info_comments: + print("No information found about the build machine, something is wrong") + exit(1) + + first_line_info_comment = str( + build_info_comments[-1] + .encode("ascii", "ignore") + .decode() + .split("\n")[0] + .strip("\n\t\r ") + ) + build_machine = first_line_info_comment.split(" ")[7].strip(".") + print("Triggering upload for %s (prod arch: %s)" % (arch, arch == production_architecture)) + create_properties_files_upload( + release_name, + arch, + issue.number, + build_machine, + docker_imgs, + arch == production_architecture, + ) + post_message(issue, QUEING_UPLOADS_MSG.format(architecture=arch)) + remove_label(issue, arch + "-build-ok") + add_label(issue, arch + "-uploading") + + if BUILD_OK: + return True + else: + return False + + +# +# checks if there are architectures that are ready to be built afer building tool-conf, and triggers the build if neccessary +# +def check_to_build_after_tool_conf(issue, release_name, release_queue, docker_imgs): + print("Checking if there are architectures waiting to be started after building tool-conf") + ready_to_build = TOOL_CONF_OK + print(ready_to_build) + create_properties_files( + issue, release_name, ready_to_build, issue.number, release_queue, docker_imgs + ) + if ready_to_build: + msg = QUEUING_BUILDS_MSG % ", ".join(ready_to_build) + post_message(issue, msg) + + +# +# Guesses the previous release name based on the name given as a parameter +# +def guess_prev_rel_name(release_name, issue): + num_str = release_name.split("_")[-1] + number = int(re.search("[0-9]+$", release_name).group(0)) + prev_number = number - 1 + prev_num_str = num_str.replace(str(number), str(prev_number)) + + if ("patch" in num_str) or ("pre" in num_str): + if prev_number < 1: + if "pre" in num_str: + post_message(issue, PREVIOUS_RELEASE_NAME_MSG.format(release_name=release_name)) + exit(0) + return re.sub("_" + num_str + "$", "", release_name) + return re.sub("_" + num_str + "$", "_" + prev_num_str, release_name) + rel_match = ( + re.sub("_" + num_str + "$", "_" + prev_num_str, release_name) + + "\(_[a-zA-Z]*patch[0-9][0-9]*\|\);" + ) + if number == 0: + rel_match = release_name + "_pre\([0-9][0-9]*\);" + ret, out = run_cmd( + "grep 'label=" + + rel_match + + "' " + + CMS_BOT_DIR + + "/releases.map" + + " | grep -v 'label=" + + release_name + + ";' | tail -1 | sed 's|.*label=||;s|;.*||'" + ) + return out + + +# ------------------------------------------------------------------------------- +# Start of execution +# -------------------------------------------------------------------------------- + +if __name__ == "__main__": + parser = OptionParser(usage="%prog ") + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not post on Github", + default=False, + ) + parser.add_option( + "-f", + "--force", + dest="force", + action="store_true", + help="Ignore previous comments in the issue and proccess it again", + default=False, + ) + parser.add_option( + "-c", + "--check-upload", + dest="check_upload", + action="store", + help="Check if one of the authorized users has written the upload message" + "for the architecture given as a parameter. It exits with 0 if it finds" + "a message with the structure 'upload ', if not it exits" + " with 1", + ) + opts, args = parser.parse_args() + + if len(args) != 1: + parser.print_help() + parser.error("Too many arguments") + + GH_TOKEN = open(expanduser("~/.github-token")).read().strip() + + issue_id = int(args[0]) + gh = Github(login_or_token=GH_TOKEN) + api_rate_limits(gh) + cmssw_repo_name = GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO + cmssw_repo = gh.get_repo(cmssw_repo_name) + issue = cmssw_repo.get_issue(issue_id) + print("API Rate Limit") + print("Limit, Remaining: ", gh.rate_limiting) + print("Reset time (GMT): ", datetime.fromtimestamp(gh.rate_limiting_resettime)) + + # 1. Is this a pull request? + if issue.pull_request: + print("This is a pull request, ignoring.") + exit(0) + + title_match = re.match(BUILD_REL, issue.title) + + # 2. Is this issue meant to build a release? + if not title_match: + print("This issue is not for building a release, ignoring.") + exit(0) + + comments = [c for c in issue.get_comments()] + + release_name = title_match.group(1) + is_patch = "patch" in release_name + full_release = release_name.split("patch")[0].rsplit("_", 1)[0] if is_patch else "" + # Get the release queue from the release name. + print(release_name) + issue_body = "" + if issue.body: + issue_body = issue.body.encode("ascii", "ignore").decode().strip() + release_queue = None + rel_name_match = re.match(REL_NAME_REGEXP, release_name) + if "RELEASE_QUEUE:" in issue_body: + release_queue = issue_body.split("RELEASE_QUEUE:", 1)[1].split("\n", 1)[0].strip() + print("Found forces release queue:", release_queue) + else: + if not rel_name_match: + print("Release name not correctly formed") + post_message(issue, WRONG_RELEASE_NAME_MSG) + exit(0) + + release_queue = "".join( + [x for x in rel_name_match.group(1, 4)] + + ["_X"] + + [x.strip("0123456789") for x in rel_name_match.group(2)] + ) + + release_tag_commit = None + if "TAG_COMMIT:" in issue_body: + release_tag_commit = issue_body.split("TAG_COMMIT:", 1)[1].split("\n", 1)[0].strip() + print("Found forces commit:", release_tag_commit) + + if "CMSSW_COMMIT:" in issue_body: + release_tag_commit = issue_body.split("CMSSW_COMMIT:", 1)[1].split("\n", 1)[0].strip() + print("Found forces commit:", release_tag_commit) + + cmsdist_tag_commit = None + if "CMSDIST_COMMIT:" in issue_body: + cmsdist_tag_commit = issue_body.split("CMSDIST_COMMIT:", 1)[1].split("\n", 1)[0].strip() + print("Found forces cmsdist commit:", cmsdist_tag_commit) + + sel_archs = [] + if "ARCHITECTURE:" in issue_body: + sel_archs = set( + issue_body.split("ARCHITECTURE:", 1)[1].split("\n", 1)[0].strip().split(",") + ) + print("Found forces architectures:", sel_archs) + + print(release_queue, sel_archs) + specs = get_config_map_properties({"DISABLED": "1", "IB_ONLY": "1"}) + architectures = [x["SCRAM_ARCH"] for x in specs if x["RELEASE_QUEUE"] == release_queue] + if not architectures: + print("Trying default queue") + release_queue = "".join([x for x in rel_name_match.group(1, 2)] + ["_X"]) + print(release_queue) + architectures = [x["SCRAM_ARCH"] for x in specs if x["RELEASE_QUEUE"] == release_queue] + + if sel_archs: + architectures = [a for a in architectures if a in sel_archs] + + if is_patch: + full_release_archs = get_full_release_archs(full_release) + print("Full release archs:", full_release, full_release_archs) + if not full_release_archs: + msg = "Error: unable to find architectures for full release " + full_release + post_message(issue, msg) + exit(0) + architectures = [a for a in architectures if a in full_release_archs] + print("Patch release filtered archs:", architectures) + + # Check if we have at least one architecture to build and complain if not. + if not architectures: + print("no archs found for the requested release") + msg = NO_ARCHS_FOUND_MSG.format(rel_name=release_name, queue=release_queue) + post_message(issue, msg) + exit(0) + print("Archs: ", architectures) + + # Find out the docker images to be used for each arch + docker_imgs = {} + for x in specs: + if ( + (x["RELEASE_QUEUE"] == release_queue) + and ("DOCKER_IMG" in x) + and (x["SCRAM_ARCH"] in architectures) + ): + docker_imgs[x["SCRAM_ARCH"]] = x["DOCKER_IMG"] + print("Dockers:", docker_imgs) + + # Determine the release branch (which is the same as the release queue if not + # specified) and start the build if needed. + release_branches = [ + x["RELEASE_BRANCH"] + for x in specs + if (x["RELEASE_QUEUE"] == release_queue) and ("RELEASE_BRANCH" in x) + ] + + possible_prod_arch = [ + x["SCRAM_ARCH"] + for x in specs + if (x["RELEASE_QUEUE"] == release_queue) and ("PROD_ARCH" in x) + ] + print("Debug:", release_name, release_queue, release_branches, possible_prod_arch) + if len(architectures) > 1: + err, production_architecture = run_cmd( + CMS_BOT_DIR + "/get-production-arch %s %s" % (release_name, release_queue) + ) + print("Debug", production_architecture) + if err: + print("Unable to find production architecture for the release") + post_message(issue, "Unable to find production architecture for the release") + exit(0) + production_architecture = production_architecture.split("\n")[-1] + else: + production_architecture = architectures[0] + + if "PRODUCTION_ARCHITECTURE:" in issue_body: + req_arch = issue_body.split("PRODUCTION_ARCHITECTURE:", 1)[1].split("\n", 1)[0].strip() + if not req_arch in architectures: + msg = ( + "You requested production architecutre to be %s but this is not a valid architecture for this release cycle." + % req_arch + ) + print(msg) + post_message(issue, msg) + exit(0) + if is_patch and (production_architecture != req_arch): + msg = ( + "You can not override production architecture for a patch release.\nProdction architecture for this release should be %s" + % production_architecture + ) + print(msg) + post_message(issue, msg) + exit(0) + production_architecture = req_arch + print("Found production architecture:", production_architecture) + if not production_architecture and len(architectures) == 1: + production_architecture = architectures[0] + print("debug production arch: ", production_architecture) + + release_branch = release_queue + + if len(release_branches): + release_branch = release_branches[0] + + # If a patch release is requested and there is a patchX branch, it will be used to tag the release. For example: + # if you want to create CMSSW_7_1_4_patch2 and there exists a branch called CMSSW_7_1_4_patchX the tag will be + # on top of the branch CMSSW_7_1_4_patchX instead of CMSSW_7_1_X. + + if is_patch: + try: + possible_branch = full_release + "_patchX" + if get_branch(cmssw_repo_name, possible_branch)["name"] == possible_branch: + release_branch = possible_branch + print( + "This is a patch release and the branch %s was found. It will be used as base branch." + % possible_branch + ) + except HTTPError as e: + print(e) + if e.code != 404: + exit(1) + + print(release_branch) + for rm in get_release_managers(release_branch): + if not rm in APPROVE_BUILD_RELEASE: + APPROVE_BUILD_RELEASE.append(rm) + if not rm in REQUEST_BUILD_RELEASE: + REQUEST_BUILD_RELEASE.append(rm) + + # 3. Is the author authorized to trigger a build? + if not issue.user.login in REQUEST_BUILD_RELEASE: + print("User not authorized") + post_message(issue, NOT_AUTHORIZED_MSG) + exit(0) + + # Get the status of this issue. + status = get_issue_status(issue) + print("Status: %s \n" % status) + + labels = [l.name for l in issue.get_labels()] + print("Issue labels:", labels) + + BUILD_OK = [] + BUILDING = [] + UPLOAD_OK = [] + INSTALL_OK = [] + UPLOADING = [] + BUILD_ERROR = [] + TO_CLEANUP = [] + TOOL_CONF_BUILDING = [] + TOOL_CONF_OK = [] + TOOL_CONF_ERROR = [] + TOOL_CONF_WAITING = [] + # These lists are filled by fillDeatilsArchsLists( issue ) + + fillDeatilsArchsLists(issue) + + if status == BUILD_ABORTED: + print("Build Aborted. A new issue must be created if you want to build the release") + + date_aborted = search_date_comment(comments, APPROVE_BUILD_RELEASE, ABORT_COMMENT, True) + # the time is 2 days because a new issue must be created to start again the build + # if for the new build the build starts in the same machine as before, this will + # start to delete the work directory of the new build. + cleanup_deadline = datetime.now() - timedelta(days=2) + if date_aborted < cleanup_deadline: + print("Cleaning up since it is too old since it was aborted") + triggerCleanup(issue, comments, release_name) + close_issue(issue) + else: + print("Not too old yet to clean up") + + if status == NEW_ISSUE: + approvers = ", ".join(["@" + x for x in APPROVE_BUILD_RELEASE]) + ALL_WATCHERS = yaml.load(open(CMS_BOT_DIR + "/build-release-watchers.yaml"), Loader=Loader) + watchers = ALL_WATCHERS.get(release_queue) + xqueue = release_queue + if release_queue != release_branch: + xqueue = release_queue + "(" + release_branch + ")" + cmssw_commit_tag = "" + if release_tag_commit: + cmssw_commit_tag = "Release tag based on: %s\n" % release_tag_commit + arch_msg = ", ".join( + [ + a if a != production_architecture else "%s(%s)" % (a, "Production") + for a in architectures + ] + ) + msg = ACK_MSG.format( + approvers_list=approvers, + cmssw_queue=xqueue, + architecture=arch_msg, + cmssw_commit_tag=cmssw_commit_tag, + ) + + if watchers: + watchers_l = ", ".join(["@" + x for x in watchers]) + watchers_msg = WATCHERS_MSG.format(watchers_list=watchers_l, queue=release_queue) + msg += watchers_msg + + post_message(issue, msg) + add_label(issue, PENDING_APPROVAL) + add_label(issue, RELEASE_BUILD_ISSUE) + exit(0) + + if status == PENDING_APPROVAL: + approval_comments = search_in_comments( + comments, APPROVE_BUILD_RELEASE, APPROVAL_COMMENT, True + ) + build_toolconf_commments = search_in_comments( + comments, APPROVE_BUILD_RELEASE, BUILD_TOOLCONF, True + ) + if build_toolconf_commments: + if is_patch: + post_message(issue, NOT_TOOLCONF_FOR_PATCH_MSG) + else: + start_tool_conf_build( + issue, + release_name, + release_branch, + architectures, + docker_imgs, + cmsdist_commit=cmsdist_tag_commit, + ) + go_to_state(issue, status, TOOLCONF_BUILDING) + elif approval_comments: + start_release_build( + issue, + release_name, + release_branch, + architectures, + docker_imgs, + release_tag_commit, + cmsdist_commit=cmsdist_tag_commit, + ) + go_to_state(issue, status, BUILD_IN_PROGRESS) + else: + print("Build not approved or cmssw-tool-conf not requested yet") + exit(0) + + if status == TOOLCONF_BUILDING: + print("Waiting for approval to start the build") + approval_comments = search_in_comments( + comments, APPROVE_BUILD_RELEASE, APPROVAL_COMMENT, True + ) + if approval_comments: + print('Build approved, switching to "Build in Progress" state') + + # add a label for each arch for which tool conf has not started in jenkins + tool_conf_reported = TOOL_CONF_BUILDING + TOOL_CONF_OK + TOOL_CONF_ERROR + not_started = list(set(architectures) - set(tool_conf_reported)) + + for arch in not_started: + add_label(issue, arch + "-tool-conf-waiting") + TOOL_CONF_WAITING.append(arch) + + go_to_state(issue, status, BUILD_IN_PROGRESS) + start_release_build( + issue, + release_name, + release_branch, + architectures, + docker_imgs, + release_tag_commit, + cmsdist_commit=cmsdist_tag_commit, + ) + + if status == BUILD_IN_PROGRESS: + abort_comments = search_in_comments(comments, APPROVE_BUILD_RELEASE, ABORT_COMMENT, True) + print(abort_comments) + if abort_comments: + print("Aborting") + abort_build(issue, release_name, architectures, comments) + go_to_state(issue, status, BUILD_ABORTED) + exit(0) + + # if the previous state was to build tool-conf there are architectures for which it is needed to wait + build_toolconf_commments = search_in_comments( + comments, APPROVE_BUILD_RELEASE, BUILD_TOOLCONF, True + ) + if build_toolconf_commments: + check_to_build_after_tool_conf(issue, release_name, release_queue, docker_imgs) + + if BUILD_OK: + go_to_state(issue, status, BUILD_SUCCESSFUL) + + if status == BUILD_SUCCESSFUL: + abort_comments = search_in_comments(comments, APPROVE_BUILD_RELEASE, ABORT_COMMENT, True) + print(abort_comments) + if abort_comments: + print("Aborting") + abort_build(issue, release_name, architectures, comments) + go_to_state(issue, status, BUILD_ABORTED) + exit(0) + + # if the previous state was to build tool-conf there are architectures for which it is needed to wait + build_toolconf_commments = search_in_comments( + comments, APPROVE_BUILD_RELEASE, BUILD_TOOLCONF, True + ) + if build_toolconf_commments: + check_to_build_after_tool_conf(issue, release_name, release_queue, docker_imgs) + + upload_all_requested = search_in_comments( + comments, APPROVE_BUILD_RELEASE, UPLOAD_ALL_COMMENT, True + ) + + if upload_all_requested: + check_archs_to_upload(release_name, issue, docker_imgs, production_architecture) + go_to_state(issue, status, UPLOADING_BUILDS) + else: + print("Upload not requested yet") + + if status == UPLOADING_BUILDS: + # upload archs as soon as they get ready + check_archs_to_upload(release_name, issue, docker_imgs, production_architecture) + + # Check if someone asked for release notes, go to next state after generating notes. + # At least one architecture must have been successfully installed + if INSTALL_OK and (RELEASE_NOTES_GENERATED_LBL not in labels): + print("checking if someone asked for the release notes") + release_notes_comments = search_in_comments( + comments, APPROVE_BUILD_RELEASE, RELEASE_NOTES_COMMENT, True + ) + + if release_notes_comments: + comment = release_notes_comments[-1] + first_line = str( + comment.encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ") + ) + comment_parts = first_line.strip().split(" ") + print("debug: ", comment_parts) + + if len(comment_parts) > 1: + prev_rel_name = comment_parts[2].rstrip() + else: + prev_rel_name = guess_prev_rel_name(release_name, issue) + print(prev_rel_name) + + rel_name_match = re.match(REL_NAME_REGEXP, prev_rel_name) + if not rel_name_match: + msg = WRONG_NOTES_RELEASE_MSG.format(previous_release=prev_rel_name) + post_message(issue, msg) + exit(0) + + if production_architecture not in INSTALL_OK: + msg = PROD_ARCH_NOT_READY_MSG.format(prod_arch=production_architecture) + post_message(issue, msg) + exit(0) + create_properties_file_rel_notes( + release_name, prev_rel_name, production_architecture, issue.number + ) + msg = GENERATING_RELEASE_NOTES_MSG.format(previous_release=prev_rel_name) + post_message(issue, msg) + add_label(issue, RELEASE_NOTES_GENERATED_LBL) + + # Check if the production architecture was uploaded and was correctly installed, generate announcement if so. + check_if_prod_arch_ready(issue, prev_rel_name, production_architecture) + go_to_state(issue, status, RELEASE_ANNOUNCED) + + if status == RELEASE_ANNOUNCED: + # upload archs as soon as they get ready + check_archs_to_upload(release_name, issue, docker_imgs, production_architecture) + + print("checking if someone asked again for the release notes") + release_notes_comments = search_in_comments( + comments, APPROVE_BUILD_RELEASE, RELEASE_NOTES_COMMENT, True + ) + generating_release_notes_comments = search_in_comments( + comments, ["cmsbuild"], "Generating release notes", True + ) + + if len(release_notes_comments) > len(generating_release_notes_comments): + print("I need to generate the release notes again") + # check if this is beter if a function is added + comment = release_notes_comments[-1] + first_line = str( + comment.encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ") + ) + comment_parts = first_line.strip().split(" ") + + if len(comment_parts) > 1: + prev_rel_name = comment_parts[2].rstrip() + else: + prev_rel_name = guess_prev_rel_name(release_name, issue) + print(prev_rel_name) + + rel_name_match = re.match(REL_NAME_REGEXP, prev_rel_name) + if not rel_name_match: + msg = WRONG_NOTES_RELEASE_MSG.format(previous_release=prev_rel_name) + post_message(issue, msg, checkIfRepeated=False) + exit(0) + + create_properties_file_rel_notes( + release_name, prev_rel_name, production_architecture, issue.number + ) + msg = GENERATING_RELEASE_NOTES_MSG.format(previous_release=prev_rel_name) + post_message(issue, msg, checkIfRepeated=False) + check_if_prod_arch_ready(issue, prev_rel_name, production_architecture) + + # check if the cleanup has been requested or if 2 days have passed since the release-notes were generated. + print("Checking if someone requested cleanup, or the issue is too old...") + date_rel_notes = search_date_comment( + comments, APPROVE_BUILD_RELEASE, RELEASE_NOTES_COMMENT, True + ) + cleanup_deadline = datetime.now() - timedelta(days=2) + if date_rel_notes: + too_old = date_rel_notes < cleanup_deadline + else: + too_old = False + pattern = "^cleanup$" + cleanup_requested_comments = search_in_comments( + comments, APPROVE_BUILD_RELEASE, pattern, True + ) + if cleanup_requested_comments or too_old: + triggerCleanup(issue, comments, release_name) + close_issue(issue) + go_to_state(issue, status, PROCESS_COMPLETE) diff --git a/process-create-data-repo-request b/process-create-data-repo-request deleted file mode 100755 index b7d7a37c2b21..000000000000 --- a/process-create-data-repo-request +++ /dev/null @@ -1,264 +0,0 @@ -#!/usr/bin/env python3 -import re -from datetime import datetime -from optparse import OptionParser -from os.path import expanduser -from socket import setdefaulttimeout -from typing import Optional, Any - -import github -from github import Github - -from categories_map import CMSSW_CATEGORIES -from cms_static import ( - GH_CMSSW_REPO, - CREATE_REPO, - GH_CMSSW_ORGANIZATION, -) -from github_utils import api_rate_limits - -setdefaulttimeout(120) - - -# Processes a github issue to check if it is requesting the creation of a new data repo -# If the issue is not requesting new repo, it ignores it. - -# ------------------------------------------------------------------------------- -# Global Variables -# -------------------------------------------------------------------------------- - -opts: Optional[Any] = None -labels = [] - -INVALID_REQUEST_MSG = "No category found for requested package {package}" -EXISTS_MSG = "Requested repository {repo} already exists" -ACK_MSG = "Request received. I will create the requested repository after this issue is fully signed." -COMPLETE_MSG = "Repository created: {url}" - -# ------------------------------------------------------------------------------- -# Statuses -# -------------------------------------------------------------------------------- -# This is to determine the status of the issue after reading the labels - -# Default state -NEW_ISSUE = "new-issue" -# The issue is pending assignment -PENDING_ASSIGNMENT = "pending-assignment" -# The issue has been received, but it needs approval to start the build -PENDING_APPROVAL = "pending-approval" -# The build has been queued in jenkins -FULLY_SIGNED = "fully-signed" - -# ------------------------------------------------------------------------------- -# Functions -# -------------------------------------------------------------------------------- - - -# -# Searches in the comments if there is a comment made from the given users that -# matches the given pattern. It returns a list with the matched comments. -# -def search_in_comments(comments, user_logins, pattern, first_line): - found_comments = [] - requested_comment_bodies = [c.body for c in comments if c.user.login in user_logins] - for body in requested_comment_bodies: - examined_str = body - if first_line: - examined_str = str( - body.encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ") - ) - - if examined_str == pattern: - found_comments.append(body) - continue - - if re.match(pattern, examined_str): - found_comments.append(body) - - return found_comments - - -# -# posts a message to the issue in github -# if dry-run is selected it doesn't post the message and just prints it -# if you set checkIfRepeated to False, if will not check if the message has already been written. -# -def post_message(issue, msg, comments, checkIfRepeated=True): - if checkIfRepeated and search_in_comments(comments, ["cmsbuild"], msg, False): - print("Message already in the thread: \n %s" % msg) - return - - if opts.dryRun: - print("Not posting message (dry-run):\n %s" % msg) - return - - print("Posting message:\n %s" % msg) - issue.create_comment(msg) - - -# -# reads the comments and gets returns the status of the issue -# -def get_issue_status(issue): - global labels - labels = [label.name for label in issue.get_labels()] - print("Issue Label: ", labels) - - if PENDING_ASSIGNMENT in labels: - return PENDING_ASSIGNMENT - if any(label.endswith("-pending") for label in labels): - return PENDING_APPROVAL - if FULLY_SIGNED in labels: - return FULLY_SIGNED - - return NEW_ISSUE - - -# -# closes the issue -# -def close_issue(issue): - if opts.dryRun: - print("Not closing issue (dry-run)") - return - print("Closing issue...") - issue.edit(state="closed") - - -# ------------------------------------------------------------------------------- -# Start of execution -# -------------------------------------------------------------------------------- - - -def main(): - global opts, PENDING_APPROVAL - parser = OptionParser(usage="%prog ") - parser.add_option( - "-n", - "--dry-run", - dest="dryRun", - action="store_true", - help="Do not post on Github", - default=False, - ) - opts, args = parser.parse_args() - - if len(args) != 1: - parser.print_help() - parser.error("Too many arguments") - - GH_TOKEN = open(expanduser("~/.github-token")).read().strip() - - issue_id = int(args[0]) - gh = Github(login_or_token=GH_TOKEN) - api_rate_limits(gh) - cmssw_repo_name = GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO - cmssw_repo = gh.get_repo(cmssw_repo_name) - issue = cmssw_repo.get_issue(issue_id) - print("API Rate Limit") - print("Limit, Remaining: ", gh.rate_limiting) - print("Reset time (GMT): ", datetime.fromtimestamp(gh.rate_limiting_resettime)) - - # 0. Is this issue closed? - if issue.state == "closed": - print("Issue closed, ignoring.") - exit(0) - - # 1. Is this a pull request? - if issue.pull_request: - print("This is a pull request, ignoring.") - exit(0) - - title_match = re.match(CREATE_REPO, issue.title) - - # 2. Is this issue meant to create a new cms-data repo? - if not title_match: - print("This issue is not for creating a repo, ignoring.") - print(issue.title) - exit(0) - - category_name = title_match.group(1) - package_name = title_match.group(2) - - print(category_name + "/" + package_name) - - # 3. Does the requested repository already exist? - repo = None - try: - repo = gh.get_organization("cms-data").get_repo( - category_name + "-" + package_name - ) - except github.UnknownObjectException: - pass - - comments = [c for c in issue.get_comments()] - - if repo: - post_message(issue, EXISTS_MSG.format(repo=repo.url), comments) - close_issue(issue) - exit(0) - - # Figure out who must approve the action - data_categs = set() - for cat, pkgs in CMSSW_CATEGORIES.items(): - for pkg in pkgs: - if not pkg: - continue - if re.match(pkg + ".*", category_name + "/" + package_name): - data_categs.add(cat) - break - - data_categs = sorted(list(data_categs)) - print(data_categs) - - if not data_categs: - post_message( - issue, - INVALID_REQUEST_MSG.format(package=category_name + "/" + package_name), - comments, - ) - exit(0) - - # Get the status of this issue. - status = get_issue_status(issue) - print("Status: %s \n" % status) - print("Issue labels:", labels) - - if status == NEW_ISSUE: - print("Issue not processed by the cms-bot yet, skipping") - exit(0) - - if status == PENDING_ASSIGNMENT: - post_message(issue, "assign " + ",".join(data_categs), comments) - post_message(issue, ACK_MSG, comments) - if not opts.dryRun: - issue.create_reaction("+1") - exit(0) - - if status == PENDING_APPROVAL: - print("Request not approved yet") - exit(0) - - if status == FULLY_SIGNED: - org = gh.get_organization("cms-data") - if not opts.dryRun: - new_repo = org.create_repo( - category_name + "-" + package_name, - "Data files for " + category_name + "/" + package_name, - has_wiki=False, - has_projects=False, - private=False, - auto_init=True - ) - - with open("new-repo.txt", "w") as out: - out.write("REPOSITORY=cms-data/%s-%s\n" % (category_name, package_name)) - post_message(issue, COMPLETE_MSG.format(url=new_repo.html_url), comments) - close_issue(issue) - else: - print("(Dry-run) Not creating repo ", category_name + "-" + package_name) - exit(0) - - -if __name__ == "__main__": - main() diff --git a/process-create-data-repo-request b/process-create-data-repo-request new file mode 120000 index 000000000000..e18e5c7e6e5f --- /dev/null +++ b/process-create-data-repo-request @@ -0,0 +1 @@ +process-create-data-repo-request.py \ No newline at end of file diff --git a/process-create-data-repo-request.py b/process-create-data-repo-request.py new file mode 100755 index 000000000000..93482d016af6 --- /dev/null +++ b/process-create-data-repo-request.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python3 +import re +from datetime import datetime +from optparse import OptionParser +from os.path import expanduser +from socket import setdefaulttimeout +from typing import Optional, Any + +import github +from github import Github + +from categories_map import CMSSW_CATEGORIES +from cms_static import ( + GH_CMSSW_REPO, + CREATE_REPO, + GH_CMSSW_ORGANIZATION, +) +from github_utils import api_rate_limits + +setdefaulttimeout(120) + + +# Processes a github issue to check if it is requesting the creation of a new data repo +# If the issue is not requesting new repo, it ignores it. + +# ------------------------------------------------------------------------------- +# Global Variables +# -------------------------------------------------------------------------------- + +opts: Optional[Any] = None +labels = [] + +INVALID_REQUEST_MSG = "No category found for requested package {package}" +EXISTS_MSG = "Requested repository {repo} already exists" +ACK_MSG = ( + "Request received. I will create the requested repository after this issue is fully signed." +) +COMPLETE_MSG = "Repository created: {url}" + +# ------------------------------------------------------------------------------- +# Statuses +# -------------------------------------------------------------------------------- +# This is to determine the status of the issue after reading the labels + +# Default state +NEW_ISSUE = "new-issue" +# The issue is pending assignment +PENDING_ASSIGNMENT = "pending-assignment" +# The issue has been received, but it needs approval to start the build +PENDING_APPROVAL = "pending-approval" +# The build has been queued in jenkins +FULLY_SIGNED = "fully-signed" + +# ------------------------------------------------------------------------------- +# Functions +# -------------------------------------------------------------------------------- + + +# +# Searches in the comments if there is a comment made from the given users that +# matches the given pattern. It returns a list with the matched comments. +# +def search_in_comments(comments, user_logins, pattern, first_line): + found_comments = [] + requested_comment_bodies = [c.body for c in comments if c.user.login in user_logins] + for body in requested_comment_bodies: + examined_str = body + if first_line: + examined_str = str( + body.encode("ascii", "ignore").decode().split("\n")[0].strip("\n\t\r ") + ) + + if examined_str == pattern: + found_comments.append(body) + continue + + if re.match(pattern, examined_str): + found_comments.append(body) + + return found_comments + + +# +# posts a message to the issue in github +# if dry-run is selected it doesn't post the message and just prints it +# if you set checkIfRepeated to False, if will not check if the message has already been written. +# +def post_message(issue, msg, comments, checkIfRepeated=True): + if checkIfRepeated and search_in_comments(comments, ["cmsbuild"], msg, False): + print("Message already in the thread: \n %s" % msg) + return + + if opts.dryRun: + print("Not posting message (dry-run):\n %s" % msg) + return + + print("Posting message:\n %s" % msg) + issue.create_comment(msg) + + +# +# reads the comments and gets returns the status of the issue +# +def get_issue_status(issue): + global labels + labels = [label.name for label in issue.get_labels()] + print("Issue Label: ", labels) + + if PENDING_ASSIGNMENT in labels: + return PENDING_ASSIGNMENT + if any(label.endswith("-pending") for label in labels): + return PENDING_APPROVAL + if FULLY_SIGNED in labels: + return FULLY_SIGNED + + return NEW_ISSUE + + +# +# closes the issue +# +def close_issue(issue): + if opts.dryRun: + print("Not closing issue (dry-run)") + return + print("Closing issue...") + issue.edit(state="closed") + + +# ------------------------------------------------------------------------------- +# Start of execution +# -------------------------------------------------------------------------------- + + +def main(): + global opts, PENDING_APPROVAL + parser = OptionParser(usage="%prog ") + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not post on Github", + default=False, + ) + opts, args = parser.parse_args() + + if len(args) != 1: + parser.print_help() + parser.error("Too many arguments") + + GH_TOKEN = open(expanduser("~/.github-token")).read().strip() + + issue_id = int(args[0]) + gh = Github(login_or_token=GH_TOKEN) + api_rate_limits(gh) + cmssw_repo_name = GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO + cmssw_repo = gh.get_repo(cmssw_repo_name) + issue = cmssw_repo.get_issue(issue_id) + print("API Rate Limit") + print("Limit, Remaining: ", gh.rate_limiting) + print("Reset time (GMT): ", datetime.fromtimestamp(gh.rate_limiting_resettime)) + + # 0. Is this issue closed? + if issue.state == "closed": + print("Issue closed, ignoring.") + exit(0) + + # 1. Is this a pull request? + if issue.pull_request: + print("This is a pull request, ignoring.") + exit(0) + + title_match = re.match(CREATE_REPO, issue.title) + + # 2. Is this issue meant to create a new cms-data repo? + if not title_match: + print("This issue is not for creating a repo, ignoring.") + print(issue.title) + exit(0) + + category_name = title_match.group(1) + package_name = title_match.group(2) + + print(category_name + "/" + package_name) + + # 3. Does the requested repository already exist? + repo = None + try: + repo = gh.get_organization("cms-data").get_repo(category_name + "-" + package_name) + except github.UnknownObjectException: + pass + + comments = [c for c in issue.get_comments()] + + if repo: + post_message(issue, EXISTS_MSG.format(repo=repo.url), comments) + close_issue(issue) + exit(0) + + # Figure out who must approve the action + data_categs = set() + for cat, pkgs in CMSSW_CATEGORIES.items(): + for pkg in pkgs: + if not pkg: + continue + if re.match(pkg + ".*", category_name + "/" + package_name): + data_categs.add(cat) + break + + data_categs = sorted(list(data_categs)) + print(data_categs) + + if not data_categs: + post_message( + issue, + INVALID_REQUEST_MSG.format(package=category_name + "/" + package_name), + comments, + ) + exit(0) + + # Get the status of this issue. + status = get_issue_status(issue) + print("Status: %s \n" % status) + print("Issue labels:", labels) + + if status == NEW_ISSUE: + print("Issue not processed by the cms-bot yet, skipping") + exit(0) + + if status == PENDING_ASSIGNMENT: + post_message(issue, "assign " + ",".join(data_categs), comments) + post_message(issue, ACK_MSG, comments) + if not opts.dryRun: + issue.create_reaction("+1") + exit(0) + + if status == PENDING_APPROVAL: + print("Request not approved yet") + exit(0) + + if status == FULLY_SIGNED: + org = gh.get_organization("cms-data") + if not opts.dryRun: + new_repo = org.create_repo( + category_name + "-" + package_name, + "Data files for " + category_name + "/" + package_name, + has_wiki=False, + has_projects=False, + private=False, + auto_init=True, + ) + + with open("new-repo.txt", "w") as out: + out.write("REPOSITORY=cms-data/%s-%s\n" % (category_name, package_name)) + post_message(issue, COMPLETE_MSG.format(url=new_repo.html_url), comments) + close_issue(issue) + else: + print("(Dry-run) Not creating repo ", category_name + "-" + package_name) + exit(0) + + +if __name__ == "__main__": + main() diff --git a/process-error-reports b/process-error-reports deleted file mode 100755 index eff6e6f7f520..000000000000 --- a/process-error-reports +++ /dev/null @@ -1,386 +0,0 @@ -#!/usr/bin/env python3 - -# This script will be famous: -# -# Parses logs looking for errors. -# Parses github issues looking for past errors. -# Updates past errors to match the current status. - -from argparse import ArgumentParser -from glob import glob -from github import Github, Label -from os.path import basename, join, expanduser, exists -from _py2with3compatibility import run_cmd -import re -import hashlib -from operator import itemgetter -from socket import setdefaulttimeout -setdefaulttimeout(120) - -PING_COMMENT="This issue is also present in release %s" -RESULTS_RE = "^([0-9.]+)_([^ ]*) (.*) - time date.*exit: (.*)" -FAILED_RE = "Step([0-9])-FAILED" - -# Create a regular expression from a format string. -# - Replace format strings with something does not enter the substitutions below. -# - Replace regexp special caratecters with their escaped counter parts -# - Replace back @@@ to be "(.*)" for the matching -def reEscape(s): - s = re.sub("%\([a-z_A-Z]+\)s", "@@@", s) - s = re.sub("([\[\]\(\)\*\+\.])", "\\\\\\1", s) - s = s.replace("\n", "\\n") - s = re.sub("@@@", "(.*)", s) - return s - -RELVAL_ISSUE_TITLE="%(id_ib)s has \"%(error_title)s\" issue in %(id_release)s. ERROR_ID:%(error_hash)s" -RELVAL_ISSUE_SUMMARY="The following error:\n\n```\n%(error_text)s\n\n```\n\nis found in the following *(workflow, step)* pairs:\n\n%(steps)s\nClick on the link for more information." -RELVAL_ISSUE_LINK_TEMPLATE="- [Workflow %(workflowId)s - Step %(step)s](https://cmssdt.cern.ch/SDT/jenkins-artifacts/summary-merged-prs/merged_prs.html)" - -ISSUE_TITLE_MATCHER = reEscape(RELVAL_ISSUE_TITLE) -ISSUE_BODY_MATCHER = reEscape(RELVAL_ISSUE_SUMMARY) -ISSUE_LINK_MATCHER = reEscape(RELVAL_ISSUE_LINK_TEMPLATE) - -def format(s, **kwds): - return s % kwds - -# Parses the report with all the failing steps -def parseSteps(buf): - results = [] - for l in buf.split("\n"): - m = re.match(ISSUE_LINK_MATCHER, l) - if not m: - continue - print(m.groups()) - results.append({"workflow": str(m.group(1)), "step": int(m.group(2))}) - return results - -def readWorkflows(f): - buf = f.read() - results = {} - for l in buf.split("\n"): - r = re.match(RESULTS_RE, l) - if not r: - continue - workflowId, workflow, steps, exit = r.groups() - steps = steps.split(" ") - failedSteps = [int(re.match(FAILED_RE, s).group(1)) + 1 - for s in steps if re.match(FAILED_RE, s)] - if not failedSteps: - continue - results[workflowId] = { - "workflowId": workflowId, - "name": workflow, - "steps": failedSteps - } - return results - -def postNewMessage(dryRun=True, labels=None, repo=None, queue=None, error_title=None, workflows=None, current_release=None, error_hash=None, error_text=None, **kwds): - if labels is None: - labels = [] - if workflows is None: - workflows = [] - steps = "" - print("foo" + str(workflows[0])) - workflows.sort(key=itemgetter("workflowId")) - for info in workflows[:20]: - steps += format(RELVAL_ISSUE_LINK_TEMPLATE, - step=step, - workflowId=info["workflowId"], - name=info["name"], - ) + "\n" - if len(workflows) > 20: - steps += "- .. and %s more not listed here." % (len(workflows) - 20) - title = format(RELVAL_ISSUE_TITLE, - id_ib=queue, - id_release=current_release, - error_title=error_title, - error_hash=error_hash) - body = format(RELVAL_ISSUE_SUMMARY, - error_text=error_text, - steps=steps, - full_message_url="foo", - ) - print("\n---") - print("The following message will be added:") - print(title) - print(body) - if dryRun: - print("--dry-run specified. Not adding new messages") - return - repo.create_issue(title=title, body=body, labels=labels) - -def updateBugReport(dryRun=False, error_text="", workflows=[], issue=None, **kwds): - print(workflows) - workflows.sort(key=itemgetter("workflowId")) - links = [RELVAL_ISSUE_LINK_TEMPLATE % s for s in workflows] - if len(links) > 20: - links = links[:20] + ["- .. and %s more" % (len(links) - 20)] - steps = "\n".join(links) + "\n" - body = format(RELVAL_ISSUE_SUMMARY, - error_text=error_text, - steps=steps, - full_message_url="foo" - ) - print("Issue %s will be updated as follows" % issue.number) - print(body) - oldBody = issue.body.split("\n") - - if dryRun: - print("--dry-run specified. Not adding new messages") - return - issue.edit(body=body) - -def getZippedLog(name, t, p, info): - zippedLogs = "%s/pyRelValMatrixLogs.zip" % p - print(info) - info["step"] = info["steps"][0] - logFile = "%(workflowId)s_%(name)s/step%(step)s_%(name)s.log" % info - print(logFile) - print(join(p, "pyRelValMatrixLogs.zip")) - if not exists(join(p, "pyRelValMatrixLogs.zip")): - return None - cmd = "unzip -cx %s %s" % (zippedLogs, logFile) - print(cmd) - err, out = run_cmd("unzip -cx %s %s" % (zippedLogs, logFile)) - if err: - return None - return out - -# Return (hash, title, errorMessage) if I can -# understand the error message. False otherwise. -def understandAssertion(name, t, p, info): - log = getZippedLog(name, t, p, info) - if not log: - return None - checkAssertion = re.findall(".*/src/(.*/src/.*): Assertion `(.*)' failed.", log) - if len(checkAssertion) == 0: - return None - - print("Reporting this as an assertion") - errorTitle = "failed assertion" - uniqueMessage = checkAssertion[0][1] - errorMessage = "%s: Assertion `%s' failed." % (checkAssertion[0][0], checkAssertion[0][1]) - h = hashlib.sha1((name + errorTitle + uniqueMessage).encode()).hexdigest()[:10] - return (h, errorTitle, errorMessage) - -# Understand fatal root errors -def understandFatalRootError(name, t, p, info): - print("Attempt with root error") - log = getZippedLog(name, t, p, info) - if not log: - print("Log not found") - return None - print(len(log)) - print(not "----- Begin Fatal Exception" in log) - if not "----- Begin Fatal Exception" in log: - return None - matcher = str(".*An exception of category 'FatalRootError' occurred.*" - ".*Fatal Root Error: [@]SUB=([^\n]*)(.*)\n") - s = log.split("----- Begin Fatal Exception")[1].split("----- End Fatal Exception")[0] - s = s.split("\n", 1)[1] - checkRootError = re.findall(matcher, s, re.DOTALL) - print("root error %s" % str(checkRootError)) - if not checkRootError: - return None - errorTitle = re.sub("/.*/", "", checkRootError[0][1].strip("\n")) - # Remove any paths. - errorMessage = re.sub("/.*/", "", s).strip("\n") - h = hashlib.sha1((name + errorTitle + errorMessage).encode()).hexdigest()[:10] - print(h) - return (h, errorTitle, errorMessage) - -# Understand if there was a missing input file error. -# - Fails in step2. -def understandStep1Error(name, t, p, info): - if int(info["steps"][0]) != 2: - return - zippedLogs = "%s/pyRelValMatrixLogs.zip" % p - logFile = "%(workflowId)s_%(name)s/step1_dasquery.log" % info - if not exists(join(p, "pyRelValMatrixLogs.zip")): - return None - cmd = "unzip -qq -cx %s %s 2>/dev/null" % (zippedLogs, logFile) - print(cmd) - err, out = run_cmd(cmd) - if err: - return None - if out.strip(): - return None - errorTitle = "cannot find input" - errorMessage = str("step2 fails when looking for input.\n" - "Input file might have been deleted or we have a DAS issue.") - h = hashlib.sha1((name + errorTitle).encode()).hexdigest()[:10] - return (h, errorTitle, errorMessage) - -# Generic "catch all" solution for errors. This must be last in the list of -# understanding plugins. -def understandGenericError(name, t, p, info): - errorTitle = "generic error" - errorMessage = "I could not fully undestand what is going on, but some relval fails.\nPlease have a look at the errors." - h = hashlib.sha1((name + "generic error").encode()).hexdigest()[:10] - return (h, errorTitle, errorMessage) - -understandingPlugins = [understandStep1Error, - understandAssertion, - understandFatalRootError, - understandGenericError] - -def understandError(name, t, p, info): - """returns a tuple with a unique hash identifyingh this error - and a human readable message trying to explain it. - """ - # For the moment we simply have a generic error and - # we include the release queue in the hash so that we - # have errors being generated separately per release queue - for plugin in understandingPlugins: - result = plugin(name, t, p, info) - if not result: - continue - return result - assert(False) - -if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument("--logdir", type=str, help="where to find the logs") - parser.add_argument("--filter", type=str, default="") - parser.add_argument("-n", "--dry-run", dest="dryRun", action="store_true", default=False) - args = parser.parse_args() - print(args.dryRun) - - # Generate a tuple with - # (, , ) - globExpr = join(args.logdir, "*/www/*/*/*") - print(globExpr) - releases = [r for r in glob(globExpr) if re.match(".*" + args.filter + ".*", r)] - RESULTS_PATH = "pyRelValMatrixLogs/run/runall-report-step123-.log" - releases = [r for r in releases if exists(join(r, RESULTS_PATH))] - names = sorted([basename(r) for r in releases]) - names.reverse() - types = [] - last_names = [] - for x in names: - if x.split("_X")[0] in types: - continue - types.append(x.split("_X")[0]) - last_names.append(x) - - types = [x + "_X" for x in types] - last_releases = [] - for r in releases: - for l in last_names: - if l in r: - last_releases.append(r) - last_releases.sort() - release_info = zip(last_names, types, last_releases) - - for x in release_info: - print("The following releases will be considered: ") - print("\n".join(["- %s for %s" % (x[0], x[1]) for x in release_info])) - - # Iterate on the latest releases and find out if they have issues, producing - # a map of workflows steps which are broken. - print("Parsing new issues") - validErrorReport={} - for (name, t, p) in release_info: - errorLogPath = join(p, "pyRelValMatrixLogs/run/runall-report-step123-.log") - if not exists(errorLogPath): - print("Cannot find %s" % errorLogPath) - continue - #print errorLogPath - print("Processing %s" % errorLogPath) - workflows = readWorkflows(open(errorLogPath)) - if not workflows: - continue - # If we have error report we construct a hash which uniquely identifies the - # error (by hashing error message and release) and append all broken - # steps to it. - for (workflow, info) in workflows.items(): - for step in info["steps"]: - (h, errorTitle, errorMessage) = understandError(name, t, p, info) - if not h in validErrorReport: - validErrorReport[h] = {"queue": t, - "current_release": name, - "error_title": errorTitle, - "error_text": errorMessage, - "error_hash": h, - "workflows": [] - } - print(workflow, step) - stepInfo = {"workflowId": info["workflowId"], "name": info["name"], "step": step} - validErrorReport[h]["workflows"].append(stepInfo) - - print("Parsing old issues.") - # Get from github all the issues which are associated to failing relvals. - # Parse them to have an understanding of current status. - issues = [] - gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) - repo = gh.get_organization("cms-sw").get_repo("cmssw") - labels = repo.get_labels() - relvalIssueLabel = [x for x in labels if x.name == "relval"] - issues = repo.get_issues(labels=relvalIssueLabel) - print("Old issues found: " + ", ".join(["#%s" % x.number for x in issues])) - pastIssues = {} - for issue in issues: - tm = re.match(ISSUE_TITLE_MATCHER, issue.title, re.DOTALL) - if not tm: - print("Unable to parse title %s for issue %s" % (issue.title, issue.number)) - continue - parts = tm.groups() - queue, error_title, first_release, error_hash = parts - - if not error_hash in pastIssues: - pastIssues[error_hash] = {"queue": queue, - "first_release": first_release, - "error_hash": error_hash, - "workflows": [], - "issue": issue, - } - - # Parse the body to try to understand the previous set of failing tests. - # If the format of the report changed, this is handle by simply rewriting - # the body completely. - bm = re.match(ISSUE_BODY_MATCHER, issue.body, re.DOTALL) - if not bm: - print("Unable to parse body for issue %s. Issue will be updated" % (issue.number)) - continue - parts = bm.groups() - error_message, workflows = parts - pastIssues[error_hash]["workflows"] = parseSteps(workflows) - - print("Updating current status") - # Do the matching between current status and old status. - # Iterate on new status: - # - If an error was not reported. Add a new message - # - If an error was already reported, for a different - # set of steps, update the list of steps. - # - If an error was already reported, do not do anything. - # - for h, payload in validErrorReport.items(): - if not h in pastIssues: - print("New error detected for %s. Will post a message" % payload["queue"]) - postNewMessage(dryRun=args.dryRun, repo=repo, labels=relvalIssueLabel, **payload) - continue - - currentSteps = payload["workflows"] - pastSteps = pastIssues[h]["workflows"] - currentSteps = sorted([(float(x["workflowId"]), x["step"]) for x in currentSteps]) - pastSteps = sorted([(float(x["workflow"]), x["step"]) for x in pastSteps]) - - if currentSteps == pastSteps: - print("No changes in issue %s." % pastIssues[h]["issue"].number) - continue - - issue = pastIssues[h]["issue"] - print("Error %s is already found in github, but changed. Adapting description." % issue.number) - updateBugReport(dryRun=args.dryRun, issue=issue, **payload) - - for h, payload in list(pastIssues.items()): - if h in validErrorReport: - continue - # Skip the queues which we have filtered. - if not re.match(".*" + args.filter + ".*", payload["queue"]): - continue - if args.dryRun: - print("Issue %s should really be closed." % payload["issue"].number) - else: - print("Closing issue %s." % payload["issue"].number) - issue.edit(state="closed") diff --git a/process-error-reports b/process-error-reports new file mode 120000 index 000000000000..36744169f404 --- /dev/null +++ b/process-error-reports @@ -0,0 +1 @@ +process-error-reports.py \ No newline at end of file diff --git a/process-error-reports.py b/process-error-reports.py new file mode 100755 index 000000000000..e05222394719 --- /dev/null +++ b/process-error-reports.py @@ -0,0 +1,426 @@ +#!/usr/bin/env python3 + +# This script will be famous: +# +# Parses logs looking for errors. +# Parses github issues looking for past errors. +# Updates past errors to match the current status. + +from argparse import ArgumentParser +from glob import glob +from github import Github, Label +from os.path import basename, join, expanduser, exists +from _py2with3compatibility import run_cmd +import re +import hashlib +from operator import itemgetter +from socket import setdefaulttimeout + +setdefaulttimeout(120) + +PING_COMMENT = "This issue is also present in release %s" +RESULTS_RE = "^([0-9.]+)_([^ ]*) (.*) - time date.*exit: (.*)" +FAILED_RE = "Step([0-9])-FAILED" + + +# Create a regular expression from a format string. +# - Replace format strings with something does not enter the substitutions below. +# - Replace regexp special caratecters with their escaped counter parts +# - Replace back @@@ to be "(.*)" for the matching +def reEscape(s): + s = re.sub("%\([a-z_A-Z]+\)s", "@@@", s) + s = re.sub("([\[\]\(\)\*\+\.])", "\\\\\\1", s) + s = s.replace("\n", "\\n") + s = re.sub("@@@", "(.*)", s) + return s + + +RELVAL_ISSUE_TITLE = ( + '%(id_ib)s has "%(error_title)s" issue in %(id_release)s. ERROR_ID:%(error_hash)s' +) +RELVAL_ISSUE_SUMMARY = "The following error:\n\n```\n%(error_text)s\n\n```\n\nis found in the following *(workflow, step)* pairs:\n\n%(steps)s\nClick on the link for more information." +RELVAL_ISSUE_LINK_TEMPLATE = "- [Workflow %(workflowId)s - Step %(step)s](https://cmssdt.cern.ch/SDT/jenkins-artifacts/summary-merged-prs/merged_prs.html)" + +ISSUE_TITLE_MATCHER = reEscape(RELVAL_ISSUE_TITLE) +ISSUE_BODY_MATCHER = reEscape(RELVAL_ISSUE_SUMMARY) +ISSUE_LINK_MATCHER = reEscape(RELVAL_ISSUE_LINK_TEMPLATE) + + +def format(s, **kwds): + return s % kwds + + +# Parses the report with all the failing steps +def parseSteps(buf): + results = [] + for l in buf.split("\n"): + m = re.match(ISSUE_LINK_MATCHER, l) + if not m: + continue + print(m.groups()) + results.append({"workflow": str(m.group(1)), "step": int(m.group(2))}) + return results + + +def readWorkflows(f): + buf = f.read() + results = {} + for l in buf.split("\n"): + r = re.match(RESULTS_RE, l) + if not r: + continue + workflowId, workflow, steps, exit = r.groups() + steps = steps.split(" ") + failedSteps = [ + int(re.match(FAILED_RE, s).group(1)) + 1 for s in steps if re.match(FAILED_RE, s) + ] + if not failedSteps: + continue + results[workflowId] = {"workflowId": workflowId, "name": workflow, "steps": failedSteps} + return results + + +def postNewMessage( + dryRun=True, + labels=None, + repo=None, + queue=None, + error_title=None, + workflows=None, + current_release=None, + error_hash=None, + error_text=None, + **kwds, +): + if labels is None: + labels = [] + if workflows is None: + workflows = [] + steps = "" + print("foo" + str(workflows[0])) + workflows.sort(key=itemgetter("workflowId")) + for info in workflows[:20]: + steps += ( + format( + RELVAL_ISSUE_LINK_TEMPLATE, + step=step, + workflowId=info["workflowId"], + name=info["name"], + ) + + "\n" + ) + if len(workflows) > 20: + steps += "- .. and %s more not listed here." % (len(workflows) - 20) + title = format( + RELVAL_ISSUE_TITLE, + id_ib=queue, + id_release=current_release, + error_title=error_title, + error_hash=error_hash, + ) + body = format( + RELVAL_ISSUE_SUMMARY, + error_text=error_text, + steps=steps, + full_message_url="foo", + ) + print("\n---") + print("The following message will be added:") + print(title) + print(body) + if dryRun: + print("--dry-run specified. Not adding new messages") + return + repo.create_issue(title=title, body=body, labels=labels) + + +def updateBugReport(dryRun=False, error_text="", workflows=[], issue=None, **kwds): + print(workflows) + workflows.sort(key=itemgetter("workflowId")) + links = [RELVAL_ISSUE_LINK_TEMPLATE % s for s in workflows] + if len(links) > 20: + links = links[:20] + ["- .. and %s more" % (len(links) - 20)] + steps = "\n".join(links) + "\n" + body = format(RELVAL_ISSUE_SUMMARY, error_text=error_text, steps=steps, full_message_url="foo") + print("Issue %s will be updated as follows" % issue.number) + print(body) + oldBody = issue.body.split("\n") + + if dryRun: + print("--dry-run specified. Not adding new messages") + return + issue.edit(body=body) + + +def getZippedLog(name, t, p, info): + zippedLogs = "%s/pyRelValMatrixLogs.zip" % p + print(info) + info["step"] = info["steps"][0] + logFile = "%(workflowId)s_%(name)s/step%(step)s_%(name)s.log" % info + print(logFile) + print(join(p, "pyRelValMatrixLogs.zip")) + if not exists(join(p, "pyRelValMatrixLogs.zip")): + return None + cmd = "unzip -cx %s %s" % (zippedLogs, logFile) + print(cmd) + err, out = run_cmd("unzip -cx %s %s" % (zippedLogs, logFile)) + if err: + return None + return out + + +# Return (hash, title, errorMessage) if I can +# understand the error message. False otherwise. +def understandAssertion(name, t, p, info): + log = getZippedLog(name, t, p, info) + if not log: + return None + checkAssertion = re.findall(".*/src/(.*/src/.*): Assertion `(.*)' failed.", log) + if len(checkAssertion) == 0: + return None + + print("Reporting this as an assertion") + errorTitle = "failed assertion" + uniqueMessage = checkAssertion[0][1] + errorMessage = "%s: Assertion `%s' failed." % (checkAssertion[0][0], checkAssertion[0][1]) + h = hashlib.sha1((name + errorTitle + uniqueMessage).encode()).hexdigest()[:10] + return (h, errorTitle, errorMessage) + + +# Understand fatal root errors +def understandFatalRootError(name, t, p, info): + print("Attempt with root error") + log = getZippedLog(name, t, p, info) + if not log: + print("Log not found") + return None + print(len(log)) + print(not "----- Begin Fatal Exception" in log) + if not "----- Begin Fatal Exception" in log: + return None + matcher = str( + ".*An exception of category 'FatalRootError' occurred.*" + ".*Fatal Root Error: [@]SUB=([^\n]*)(.*)\n" + ) + s = log.split("----- Begin Fatal Exception")[1].split("----- End Fatal Exception")[0] + s = s.split("\n", 1)[1] + checkRootError = re.findall(matcher, s, re.DOTALL) + print("root error %s" % str(checkRootError)) + if not checkRootError: + return None + errorTitle = re.sub("/.*/", "", checkRootError[0][1].strip("\n")) + # Remove any paths. + errorMessage = re.sub("/.*/", "", s).strip("\n") + h = hashlib.sha1((name + errorTitle + errorMessage).encode()).hexdigest()[:10] + print(h) + return (h, errorTitle, errorMessage) + + +# Understand if there was a missing input file error. +# - Fails in step2. +def understandStep1Error(name, t, p, info): + if int(info["steps"][0]) != 2: + return + zippedLogs = "%s/pyRelValMatrixLogs.zip" % p + logFile = "%(workflowId)s_%(name)s/step1_dasquery.log" % info + if not exists(join(p, "pyRelValMatrixLogs.zip")): + return None + cmd = "unzip -qq -cx %s %s 2>/dev/null" % (zippedLogs, logFile) + print(cmd) + err, out = run_cmd(cmd) + if err: + return None + if out.strip(): + return None + errorTitle = "cannot find input" + errorMessage = str( + "step2 fails when looking for input.\n" + "Input file might have been deleted or we have a DAS issue." + ) + h = hashlib.sha1((name + errorTitle).encode()).hexdigest()[:10] + return (h, errorTitle, errorMessage) + + +# Generic "catch all" solution for errors. This must be last in the list of +# understanding plugins. +def understandGenericError(name, t, p, info): + errorTitle = "generic error" + errorMessage = "I could not fully undestand what is going on, but some relval fails.\nPlease have a look at the errors." + h = hashlib.sha1((name + "generic error").encode()).hexdigest()[:10] + return (h, errorTitle, errorMessage) + + +understandingPlugins = [ + understandStep1Error, + understandAssertion, + understandFatalRootError, + understandGenericError, +] + + +def understandError(name, t, p, info): + """returns a tuple with a unique hash identifyingh this error + and a human readable message trying to explain it. + """ + # For the moment we simply have a generic error and + # we include the release queue in the hash so that we + # have errors being generated separately per release queue + for plugin in understandingPlugins: + result = plugin(name, t, p, info) + if not result: + continue + return result + assert False + + +if __name__ == "__main__": + parser = ArgumentParser() + parser.add_argument("--logdir", type=str, help="where to find the logs") + parser.add_argument("--filter", type=str, default="") + parser.add_argument("-n", "--dry-run", dest="dryRun", action="store_true", default=False) + args = parser.parse_args() + print(args.dryRun) + + # Generate a tuple with + # (, , ) + globExpr = join(args.logdir, "*/www/*/*/*") + print(globExpr) + releases = [r for r in glob(globExpr) if re.match(".*" + args.filter + ".*", r)] + RESULTS_PATH = "pyRelValMatrixLogs/run/runall-report-step123-.log" + releases = [r for r in releases if exists(join(r, RESULTS_PATH))] + names = sorted([basename(r) for r in releases]) + names.reverse() + types = [] + last_names = [] + for x in names: + if x.split("_X")[0] in types: + continue + types.append(x.split("_X")[0]) + last_names.append(x) + + types = [x + "_X" for x in types] + last_releases = [] + for r in releases: + for l in last_names: + if l in r: + last_releases.append(r) + last_releases.sort() + release_info = zip(last_names, types, last_releases) + + for x in release_info: + print("The following releases will be considered: ") + print("\n".join(["- %s for %s" % (x[0], x[1]) for x in release_info])) + + # Iterate on the latest releases and find out if they have issues, producing + # a map of workflows steps which are broken. + print("Parsing new issues") + validErrorReport = {} + for name, t, p in release_info: + errorLogPath = join(p, "pyRelValMatrixLogs/run/runall-report-step123-.log") + if not exists(errorLogPath): + print("Cannot find %s" % errorLogPath) + continue + # print errorLogPath + print("Processing %s" % errorLogPath) + workflows = readWorkflows(open(errorLogPath)) + if not workflows: + continue + # If we have error report we construct a hash which uniquely identifies the + # error (by hashing error message and release) and append all broken + # steps to it. + for workflow, info in workflows.items(): + for step in info["steps"]: + (h, errorTitle, errorMessage) = understandError(name, t, p, info) + if not h in validErrorReport: + validErrorReport[h] = { + "queue": t, + "current_release": name, + "error_title": errorTitle, + "error_text": errorMessage, + "error_hash": h, + "workflows": [], + } + print(workflow, step) + stepInfo = {"workflowId": info["workflowId"], "name": info["name"], "step": step} + validErrorReport[h]["workflows"].append(stepInfo) + + print("Parsing old issues.") + # Get from github all the issues which are associated to failing relvals. + # Parse them to have an understanding of current status. + issues = [] + gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) + repo = gh.get_organization("cms-sw").get_repo("cmssw") + labels = repo.get_labels() + relvalIssueLabel = [x for x in labels if x.name == "relval"] + issues = repo.get_issues(labels=relvalIssueLabel) + print("Old issues found: " + ", ".join(["#%s" % x.number for x in issues])) + pastIssues = {} + for issue in issues: + tm = re.match(ISSUE_TITLE_MATCHER, issue.title, re.DOTALL) + if not tm: + print("Unable to parse title %s for issue %s" % (issue.title, issue.number)) + continue + parts = tm.groups() + queue, error_title, first_release, error_hash = parts + + if not error_hash in pastIssues: + pastIssues[error_hash] = { + "queue": queue, + "first_release": first_release, + "error_hash": error_hash, + "workflows": [], + "issue": issue, + } + + # Parse the body to try to understand the previous set of failing tests. + # If the format of the report changed, this is handle by simply rewriting + # the body completely. + bm = re.match(ISSUE_BODY_MATCHER, issue.body, re.DOTALL) + if not bm: + print("Unable to parse body for issue %s. Issue will be updated" % (issue.number)) + continue + parts = bm.groups() + error_message, workflows = parts + pastIssues[error_hash]["workflows"] = parseSteps(workflows) + + print("Updating current status") + # Do the matching between current status and old status. + # Iterate on new status: + # - If an error was not reported. Add a new message + # - If an error was already reported, for a different + # set of steps, update the list of steps. + # - If an error was already reported, do not do anything. + # + for h, payload in validErrorReport.items(): + if not h in pastIssues: + print("New error detected for %s. Will post a message" % payload["queue"]) + postNewMessage(dryRun=args.dryRun, repo=repo, labels=relvalIssueLabel, **payload) + continue + + currentSteps = payload["workflows"] + pastSteps = pastIssues[h]["workflows"] + currentSteps = sorted([(float(x["workflowId"]), x["step"]) for x in currentSteps]) + pastSteps = sorted([(float(x["workflow"]), x["step"]) for x in pastSteps]) + + if currentSteps == pastSteps: + print("No changes in issue %s." % pastIssues[h]["issue"].number) + continue + + issue = pastIssues[h]["issue"] + print( + "Error %s is already found in github, but changed. Adapting description." + % issue.number + ) + updateBugReport(dryRun=args.dryRun, issue=issue, **payload) + + for h, payload in list(pastIssues.items()): + if h in validErrorReport: + continue + # Skip the queues which we have filtered. + if not re.match(".*" + args.filter + ".*", payload["queue"]): + continue + if args.dryRun: + print("Issue %s should really be closed." % payload["issue"].number) + else: + print("Closing issue %s." % payload["issue"].number) + issue.edit(state="closed") diff --git a/process-partial-logs-relval.py b/process-partial-logs-relval.py index 5a5a47ed7faa..09c3f07ed304 100755 --- a/process-partial-logs-relval.py +++ b/process-partial-logs-relval.py @@ -3,10 +3,10 @@ import os, sys from runPyRelValThread import PyRelValsThread -path=sys.argv[1] -newloc = os.path.dirname(path) + '/pyRelValMatrixLogs/run' -os.system('mkdir -p ' + newloc) -ProcessLogs = PyRelValsThread(1,path,"1of1",newloc) +path = sys.argv[1] +newloc = os.path.dirname(path) + "/pyRelValMatrixLogs/run" +os.system("mkdir -p " + newloc) +ProcessLogs = PyRelValsThread(1, path, "1of1", newloc) print("Generating runall log file: %s" % path) ProcessLogs.update_runall() print("Generating relval time info") diff --git a/process-pull-request b/process-pull-request deleted file mode 100755 index 0194b340c292..000000000000 --- a/process-pull-request +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python3 -""" -Returns top commit of a PR (mostly used to comments) -""" -from os.path import expanduser, dirname, abspath, join, exists -from optparse import OptionParser -from socket import setdefaulttimeout -from github_utils import api_rate_limits, get_pr_commits, get_pr_latest_commit, get_gh_token -setdefaulttimeout(120) -import sys -SCRIPT_DIR = dirname(abspath(sys.argv[0])) - -if __name__ == "__main__": - parser = OptionParser(usage="%prog ") - parser.add_option("-c", "--commit", dest="commit", action="store_true", help="Get last commit of the PR", default=False) - parser.add_option("-a", "--all", dest="all", action="store_true", help="Get all commits of the PR", default=False) - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) - parser.add_option("-f", "--force", dest="force", action="store_true", help="Force process the issue/PR even if it is ignored.", default=False) - parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw") - opts, args = parser.parse_args() - - if len(args) != 1: - parser.error("Too many/few arguments") - prId = int(args[0]) # Positional argument is "Pull request ID" - if opts.commit: - if opts.all: - for c in get_pr_commits(prId, opts.repository): - print(c['sha']) - else: - print(get_pr_latest_commit(args[0], opts.repository)) - else: - from github import Github - repo_dir = join(SCRIPT_DIR,'repos',opts.repository.replace("-","_")) - if exists(join(repo_dir,"repo_config.py")): sys.path.insert(0,repo_dir) - import repo_config - if not getattr(repo_config, 'RUN_DEFAULT_CMS_BOT', True): sys.exit(0) - gh = Github(login_or_token=get_gh_token(opts.repository)) - api_rate_limits(gh) - repo = gh.get_repo(opts.repository) - from process_pr import process_pr - process_pr(repo_config, gh, repo, repo.get_issue(prId), opts.dryRun, force=opts.force) diff --git a/process-pull-request b/process-pull-request new file mode 120000 index 000000000000..888ba5181a29 --- /dev/null +++ b/process-pull-request @@ -0,0 +1 @@ +process-pull-request.py \ No newline at end of file diff --git a/process-pull-request.py b/process-pull-request.py new file mode 100755 index 000000000000..74c18e757d6d --- /dev/null +++ b/process-pull-request.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +""" +Returns top commit of a PR (mostly used to comments) +""" +from os.path import expanduser, dirname, abspath, join, exists +from optparse import OptionParser +from socket import setdefaulttimeout +from github_utils import api_rate_limits, get_pr_commits, get_pr_latest_commit, get_gh_token + +setdefaulttimeout(120) +import sys + +SCRIPT_DIR = dirname(abspath(sys.argv[0])) + +if __name__ == "__main__": + parser = OptionParser(usage="%prog ") + parser.add_option( + "-c", + "--commit", + dest="commit", + action="store_true", + help="Get last commit of the PR", + default=False, + ) + parser.add_option( + "-a", + "--all", + dest="all", + action="store_true", + help="Get all commits of the PR", + default=False, + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, + ) + parser.add_option( + "-f", + "--force", + dest="force", + action="store_true", + help="Force process the issue/PR even if it is ignored.", + default=False, + ) + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default="cms-sw/cmssw", + ) + opts, args = parser.parse_args() + + if len(args) != 1: + parser.error("Too many/few arguments") + prId = int(args[0]) # Positional argument is "Pull request ID" + if opts.commit: + if opts.all: + for c in get_pr_commits(prId, opts.repository): + print(c["sha"]) + else: + print(get_pr_latest_commit(args[0], opts.repository)) + else: + from github import Github + + repo_dir = join(SCRIPT_DIR, "repos", opts.repository.replace("-", "_")) + if exists(join(repo_dir, "repo_config.py")): + sys.path.insert(0, repo_dir) + import repo_config + + if not getattr(repo_config, "RUN_DEFAULT_CMS_BOT", True): + sys.exit(0) + gh = Github(login_or_token=get_gh_token(opts.repository)) + api_rate_limits(gh) + repo = gh.get_repo(opts.repository) + from process_pr import process_pr + + process_pr(repo_config, gh, repo, repo.get_issue(prId), opts.dryRun, force=opts.force) diff --git a/process_pr.py b/process_pr.py index b08cf16179af..97702c0c5a73 100644 --- a/process_pr.py +++ b/process_pr.py @@ -1,10 +1,28 @@ -from categories import CMSSW_L2, CMSSW_L1, TRIGGER_PR_TESTS, CMSSW_ISSUES_TRACKERS, PR_HOLD_MANAGERS, EXTERNAL_REPOS,CMSDIST_REPOS +from categories import ( + CMSSW_L2, + CMSSW_L1, + TRIGGER_PR_TESTS, + CMSSW_ISSUES_TRACKERS, + PR_HOLD_MANAGERS, + EXTERNAL_REPOS, + CMSDIST_REPOS, +) from categories import CMSSW_CATEGORIES from releases import RELEASE_BRANCH_MILESTONE, RELEASE_BRANCH_PRODUCTION, CMSSW_DEVEL_BRANCH -from cms_static import VALID_CMSDIST_BRANCHES, NEW_ISSUE_PREFIX, NEW_PR_PREFIX, ISSUE_SEEN_MSG, BUILD_REL, \ - GH_CMSSW_REPO, GH_CMSDIST_REPO, CMSBOT_IGNORE_MSG, VALID_CMS_SW_REPOS_FOR_TESTS, CREATE_REPO -from cms_static import BACKPORT_STR,GH_CMSSW_ORGANIZATION, CMSBOT_NO_NOTIFY_MSG -from githublabels import TYPE_COMMANDS +from cms_static import ( + VALID_CMSDIST_BRANCHES, + NEW_ISSUE_PREFIX, + NEW_PR_PREFIX, + ISSUE_SEEN_MSG, + BUILD_REL, + GH_CMSSW_REPO, + GH_CMSDIST_REPO, + CMSBOT_IGNORE_MSG, + VALID_CMS_SW_REPOS_FOR_TESTS, + CREATE_REPO, +) +from cms_static import BACKPORT_STR, GH_CMSSW_ORGANIZATION, CMSBOT_NO_NOTIFY_MSG +from githublabels import TYPE_COMMANDS, TEST_IGNORE_REASON from repo_config import GH_REPO_ORGANIZATION import re, time from datetime import datetime @@ -17,1520 +35,2001 @@ from json import dumps, load try: - from categories import CMSSW_LABELS + from categories import CMSSW_LABELS except: - CMSSW_LABELS = {} + CMSSW_LABELS = {} try: - from categories import get_dpg_pog + from categories import get_dpg_pog except: - def get_dpg_pog(*args): return {} + + def get_dpg_pog(*args): + return {} + + try: - from categories import external_to_package + from categories import external_to_package except: - def external_to_package(*args): - return '' + + def external_to_package(*args): + return "" + + try: - from releases import get_release_managers, is_closed_branch + from releases import get_release_managers, is_closed_branch except: - def get_release_managers(*args): - return [] - def is_closed_branch(*args): - return False + + def get_release_managers(*args): + return [] + + def is_closed_branch(*args): + return False dpg_pog = get_dpg_pog() for l in CMSSW_LABELS.keys(): - if not l in dpg_pog: - del CMSSW_LABELS[l] - else: - CMSSW_LABELS[l] = [re.compile('^('+p+').*$') for p in CMSSW_LABELS[l]] + if not l in dpg_pog: + del CMSSW_LABELS[l] + else: + CMSSW_LABELS[l] = [re.compile("^(" + p + ").*$") for p in CMSSW_LABELS[l]] setdefaulttimeout(300) -CMSDIST_REPO_NAME=join(GH_REPO_ORGANIZATION, GH_CMSDIST_REPO) -CMSSW_REPO_NAME=join(GH_REPO_ORGANIZATION, GH_CMSSW_REPO) +CMSDIST_REPO_NAME = join(GH_REPO_ORGANIZATION, GH_CMSDIST_REPO) +CMSSW_REPO_NAME = join(GH_REPO_ORGANIZATION, GH_CMSSW_REPO) + # Prepare various comments regardless of whether they will be made or not. -def format(s, **kwds): return s % kwds - -TRIGERING_TESTS_MSG = 'The tests are being triggered in jenkins.' -TRIGERING_TESTS_MSG1 = 'Jenkins tests started for ' -TRIGERING_STYLE_TEST_MSG = 'The project style tests are being triggered in jenkins.' -IGNORING_TESTS_MSG = 'Ignoring test request.' -TESTS_RESULTS_MSG = '^\s*([-|+]1|I had the issue.*)\s*$' -FAILED_TESTS_MSG = 'The jenkins tests job failed, please try again.' -PUSH_TEST_ISSUE_MSG='^\[Jenkins CI\] Testing commit: [0-9a-f]+$' +def format(s, **kwds): + return s % kwds + + +TRIGERING_TESTS_MSG = "The tests are being triggered in jenkins." +TRIGERING_TESTS_MSG1 = "Jenkins tests started for " +TRIGERING_STYLE_TEST_MSG = "The project style tests are being triggered in jenkins." +IGNORING_TESTS_MSG = "Ignoring test request." +TESTS_RESULTS_MSG = "^\s*([-|+]1|I had the issue.*)\s*$" +FAILED_TESTS_MSG = "The jenkins tests job failed, please try again." +PUSH_TEST_ISSUE_MSG = "^\[Jenkins CI\] Testing commit: [0-9a-f]+$" HOLD_MSG = "Pull request has been put on hold by " -#Regexp to match the test requests -CODE_CHECKS_REGEXP=re.compile("code-checks(\s+with\s+cms.week[0-9].PR_[0-9a-f]{8}/[^\s]+|)(\s+and\s+apply\s+patch|)$") -WF_PATTERN="[1-9][0-9]*(\.[0-9]+|)" -CMSSW_QUEUE_PATTERN='CMSSW_[0-9]+_[0-9]+_(X|[A-Z][A-Z0-9]+_X|[0-9]+(_[a-zA-Z0-9_]+|))' -CMSSW_PACKAGE_PATTERN='[A-Z][a-zA-Z0-9]+(/[a-zA-Z0-9]+|)' -ARCH_PATTERN='[a-z0-9]+_[a-z0-9]+_[a-z0-9]+' -CMSSW_RELEASE_QUEUE_PATTERN=format('(%(cmssw)s|%(arch)s|%(cmssw)s/%(arch)s)', cmssw=CMSSW_QUEUE_PATTERN, arch=ARCH_PATTERN) -RELVAL_OPTS="[-][a-zA-Z0-9_.,\s/'-]+" -CLOSE_REQUEST=re.compile('^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)close\s*$',re.I) -REOPEN_REQUEST=re.compile('^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)(re|)open\s*$',re.I) -CMS_PR_PATTERN=format('(#[1-9][0-9]*|(%(cmsorgs)s)/+[a-zA-Z0-9_-]+#[1-9][0-9]*|https://+github.com/+(%(cmsorgs)s)/+[a-zA-Z0-9_-]+/+pull/+[1-9][0-9]*)', - cmsorgs='|'.join(EXTERNAL_REPOS)) -TEST_REGEXP = format("^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)test(\s+workflow(s|)\s+(%(workflow)s(\s*,\s*%(workflow)s|)*)|)(\s+with\s+(%(cms_pr)s(\s*,\s*%(cms_pr)s)*)|)(\s+for\s+%(release_queue)s|)(\s+using\s+full\s+cmssw|\s+using\s+(cms-|)addpkg\s+(%(pkg)s(,%(pkg)s)*)|)\s*$", - workflow=WF_PATTERN, - cms_pr=CMS_PR_PATTERN, - pkg=CMSSW_PACKAGE_PATTERN, - release_queue=CMSSW_RELEASE_QUEUE_PATTERN) +# Regexp to match the test requests +CODE_CHECKS_REGEXP = re.compile( + "code-checks(\s+with\s+cms.week[0-9].PR_[0-9a-f]{8}/[^\s]+|)(\s+and\s+apply\s+patch|)$" +) +WF_PATTERN = "[1-9][0-9]*(\.[0-9]+|)" +CMSSW_QUEUE_PATTERN = "CMSSW_[0-9]+_[0-9]+_(X|[A-Z][A-Z0-9]+_X|[0-9]+(_[a-zA-Z0-9_]+|))" +CMSSW_PACKAGE_PATTERN = "[A-Z][a-zA-Z0-9]+(/[a-zA-Z0-9]+|)" +ARCH_PATTERN = "[a-z0-9]+_[a-z0-9]+_[a-z0-9]+" +CMSSW_RELEASE_QUEUE_PATTERN = format( + "(%(cmssw)s|%(arch)s|%(cmssw)s/%(arch)s)", cmssw=CMSSW_QUEUE_PATTERN, arch=ARCH_PATTERN +) +RELVAL_OPTS = "[-][a-zA-Z0-9_.,\s/'-]+" +CLOSE_REQUEST = re.compile("^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)close\s*$", re.I) +REOPEN_REQUEST = re.compile("^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)(re|)open\s*$", re.I) +CMS_PR_PATTERN = format( + "(#[1-9][0-9]*|(%(cmsorgs)s)/+[a-zA-Z0-9_-]+#[1-9][0-9]*|https://+github.com/+(%(cmsorgs)s)/+[a-zA-Z0-9_-]+/+pull/+[1-9][0-9]*)", + cmsorgs="|".join(EXTERNAL_REPOS), +) +TEST_REGEXP = format( + "^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)test(\s+workflow(s|)\s+(%(workflow)s(\s*,\s*%(workflow)s|)*)|)(\s+with\s+(%(cms_pr)s(\s*,\s*%(cms_pr)s)*)|)(\s+for\s+%(release_queue)s|)(\s+using\s+full\s+cmssw|\s+using\s+(cms-|)addpkg\s+(%(pkg)s(,%(pkg)s)*)|)\s*$", + workflow=WF_PATTERN, + cms_pr=CMS_PR_PATTERN, + pkg=CMSSW_PACKAGE_PATTERN, + release_queue=CMSSW_RELEASE_QUEUE_PATTERN, +) AUTO_TEST_REPOS = ["cms-sw/cmssw"] REGEX_TEST_REG = re.compile(TEST_REGEXP, re.I) -REGEX_TEST_ABORT = re.compile("^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)abort(\s+test|)$", re.I) -TEST_WAIT_GAP=720 +REGEX_TEST_ABORT = re.compile( + "^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)abort(\s+test|)$", re.I +) +REGEX_TEST_IGNORE = re.compile( + r"^\s*(?:(?:@|)cmsbuild\s*[,]*\s+|)(?:please\s*[,]*\s+|)ignore\s+tests-rejected\s+(?:with|)([a-z -]+)$", + re.I, +) +TEST_WAIT_GAP = 720 ALL_CHECK_FUNCTIONS = None EXTRA_RELVALS_TESTS = ["threading", "gpu", "high-stats", "nano"] -EXTRA_RELVALS_TESTS_OPTS ="_" + "|_".join(EXTRA_RELVALS_TESTS) +EXTRA_RELVALS_TESTS_OPTS = "_" + "|_".join(EXTRA_RELVALS_TESTS) EXTRA_TESTS = "|".join(EXTRA_RELVALS_TESTS) + "|profiling|none" -SKIP_TESTS = "|".join(["static","header"]) +SKIP_TESTS = "|".join(["static", "header"]) ENABLE_TEST_PTRN = "enable(_test(s|)|)" -JENKINS_NODES = '[a-zA-Z0-9_|&\s()-]+' +JENKINS_NODES = "[a-zA-Z0-9_|&\s()-]+" MULTILINE_COMMENTS_MAP = { - "(workflow|relval)(s|)("+EXTRA_RELVALS_TESTS_OPTS+"|)": [format('%(workflow)s(\s*,\s*%(workflow)s|)*', workflow= WF_PATTERN), "MATRIX_EXTRAS"], - "(workflow|relval)(s|)_profiling": [format('%(workflow)s(\s*,\s*%(workflow)s|)*', workflow= WF_PATTERN),"PROFILING_WORKFLOWS"], - "pull_request(s|)": [format('%(cms_pr)s(,%(cms_pr)s)*', cms_pr=CMS_PR_PATTERN ), "PULL_REQUESTS"], - "full_cmssw|full": ['true|false', "BUILD_FULL_CMSSW"], - "disable_poison": ['true|false', "DISABLE_POISON"], - "use_ib_tag": ['true|false', "USE_IB_TAG"], - "baseline": ['self|default', "USE_BASELINE"], - "skip_test(s|)": [format("(%(tests)s)(\s*,\s*(%(tests)s))*",tests=SKIP_TESTS), "SKIP_TESTS"], - "dry_run": ['true|false', "DRY_RUN"], - "jenkins_(slave|node)": [JENKINS_NODES , "RUN_ON_SLAVE"], - "(arch(itecture(s|))|release|release/arch)" : [ CMSSW_RELEASE_QUEUE_PATTERN, "RELEASE_FORMAT"], - ENABLE_TEST_PTRN: [format("(%(tests)s)(\s*,\s*(%(tests)s))*",tests=EXTRA_TESTS), "ENABLE_BOT_TESTS"], - "ignore_test(s|)": ["build-warnings|clang-warnings", "IGNORE_BOT_TESTS"], - "container": ["[a-zA-Z][a-zA-Z0-9_-]+/[a-zA-Z][a-zA-Z0-9_-]+(:[a-zA-Z0-9_-]+|)", "DOCKER_IMGAGE"], - "cms-addpkg|addpkg":[format('%(pkg)s(,%(pkg)s)*', pkg=CMSSW_PACKAGE_PATTERN), "EXTRA_CMSSW_PACKAGES"], - "build_verbose": ['true|false', "BUILD_VERBOSE"], - "(workflow|relval)(s|)_opt(ion|)(s|)("+EXTRA_RELVALS_TESTS_OPTS+"|_input|)": [RELVAL_OPTS,"EXTRA_MATRIX_ARGS",True], - "(workflow|relval)(s|)_command_opt(ion|)(s|)("+EXTRA_RELVALS_TESTS_OPTS+"|_input|)": [RELVAL_OPTS,"EXTRA_MATRIX_COMMAND_ARGS",True] - } + "(workflow|relval)(s|)(" + + EXTRA_RELVALS_TESTS_OPTS + + "|)": [format("%(workflow)s(\s*,\s*%(workflow)s|)*", workflow=WF_PATTERN), "MATRIX_EXTRAS"], + "(workflow|relval)(s|)_profiling": [ + format("%(workflow)s(\s*,\s*%(workflow)s|)*", workflow=WF_PATTERN), + "PROFILING_WORKFLOWS", + ], + "pull_request(s|)": [ + format("%(cms_pr)s(,%(cms_pr)s)*", cms_pr=CMS_PR_PATTERN), + "PULL_REQUESTS", + ], + "full_cmssw|full": ["true|false", "BUILD_FULL_CMSSW"], + "disable_poison": ["true|false", "DISABLE_POISON"], + "use_ib_tag": ["true|false", "USE_IB_TAG"], + "baseline": ["self|default", "USE_BASELINE"], + "skip_test(s|)": [format("(%(tests)s)(\s*,\s*(%(tests)s))*", tests=SKIP_TESTS), "SKIP_TESTS"], + "dry_run": ["true|false", "DRY_RUN"], + "jenkins_(slave|node)": [JENKINS_NODES, "RUN_ON_SLAVE"], + "(arch(itecture(s|))|release|release/arch)": [CMSSW_RELEASE_QUEUE_PATTERN, "RELEASE_FORMAT"], + ENABLE_TEST_PTRN: [ + format("(%(tests)s)(\s*,\s*(%(tests)s))*", tests=EXTRA_TESTS), + "ENABLE_BOT_TESTS", + ], + "ignore_test(s|)": ["build-warnings|clang-warnings", "IGNORE_BOT_TESTS"], + "container": [ + "[a-zA-Z][a-zA-Z0-9_-]+/[a-zA-Z][a-zA-Z0-9_-]+(:[a-zA-Z0-9_-]+|)", + "DOCKER_IMGAGE", + ], + "cms-addpkg|addpkg": [ + format("%(pkg)s(,%(pkg)s)*", pkg=CMSSW_PACKAGE_PATTERN), + "EXTRA_CMSSW_PACKAGES", + ], + "build_verbose": ["true|false", "BUILD_VERBOSE"], + "(workflow|relval)(s|)_opt(ion|)(s|)(" + + EXTRA_RELVALS_TESTS_OPTS + + "|_input|)": [RELVAL_OPTS, "EXTRA_MATRIX_ARGS", True], + "(workflow|relval)(s|)_command_opt(ion|)(s|)(" + + EXTRA_RELVALS_TESTS_OPTS + + "|_input|)": [RELVAL_OPTS, "EXTRA_MATRIX_COMMAND_ARGS", True], +} L2_DATA = {} + def init_l2_data(cms_repo): - l2_data = {} - if cms_repo: - with open(join(dirname(__file__),"cmssw_l2","l2.json")) as ref: - l2_data = load(ref) - for user in CMSSW_L2: - if (user in l2_data) and ('end_date' in l2_data[user][-1]): - del l2_data[user][-1]['end_date'] - else: - for user in CMSSW_L2: - l2_data[user] = [{'start_date': 0, 'category': CMSSW_L2[user]}] - return l2_data + l2_data = {} + if cms_repo: + with open(join(dirname(__file__), "cmssw_l2", "l2.json")) as ref: + l2_data = load(ref) + for user in CMSSW_L2: + if (user in l2_data) and ("end_date" in l2_data[user][-1]): + del l2_data[user][-1]["end_date"] + else: + for user in CMSSW_L2: + l2_data[user] = [{"start_date": 0, "category": CMSSW_L2[user]}] + return l2_data + def get_commenter_categories(commenter, comment_date): - if commenter not in L2_DATA: return [] - for item in L2_DATA[commenter]: - if (comment_date0) - create_test_property = False - repo_cache = {repository: repo} - packages = set([]) - chg_files = [] - package_categories = {} - extra_labels = {'mtype':[]} - add_external_category = False - signing_categories = set([]) - new_package_message = "" - mustClose = False - reOpen = False - releaseManagers = [] - signatures = {} - watchers = [] - #Process Pull Request - pkg_categories = set([]) - REGEX_TYPE_CMDS="^type\s+(([-+]|)[a-z][a-z0-9-]+)(\s*,\s*([-+]|)[a-z][a-z0-9-]+)*$" - REGEX_EX_CMDS="^urgent$|^backport\s+(of\s+|)(#|http(s|):/+github\.com/+%s/+pull/+)\d+$" % (repo.full_name) - known_ignore_tests="%s" % MULTILINE_COMMENTS_MAP["ignore_test(s|)"][0] - REGEX_EX_IGNORE_CHKS='^ignore\s+((%s)(\s*,\s*(%s))*|none)$' % (known_ignore_tests, known_ignore_tests) - REGEX_EX_ENABLE_TESTS='^enable\s+(%s)$' % MULTILINE_COMMENTS_MAP[ENABLE_TEST_PTRN][0] - L2_DATA = init_l2_data (cms_repo) - last_commit_date = None - last_commit_obj = None - push_test_issue = False - requestor = issue.user.login.encode("ascii", "ignore").decode() - ignore_tests = '' - enable_tests = '' - commit_statuses = None - bot_status_name = "bot/jenkins" - bot_ack_name = "bot/ack" - bot_test_param_name = "bot/test_parameters" - cms_status_prefix = "cms" - bot_status = None - code_checks_status = [] - pre_checks_state = {} - default_pre_checks = ["code-checks"] - #For future pre_checks - #if prId>=somePRNumber: default_pre_checks+=["some","new","checks"] - pre_checks_url = {} - if issue.pull_request: - pr = repo.get_pull(prId) - if pr.changed_files==0: - print("Ignoring: PR with no files changed") - return - if cmssw_repo and cms_repo and (pr.base.ref == CMSSW_DEVEL_BRANCH): - if pr.state != "closed": - print("This pull request must go in to master branch") - if not dryRun: - edit_pr(repo.full_name, prId, base="master") - msg = format("%(gh_user_char)s%(user)s, %(dev_branch)s branch is closed for direct updates. cms-bot is going to move this PR to master branch.\n" - "In future, please use cmssw master branch to submit your changes.\n", - user=requestor, - gh_user_char=gh_user_char, - dev_branch=CMSSW_DEVEL_BRANCH) - issue.create_comment(msg) - return - # A pull request is by default closed if the branch is a closed one. - if is_closed_branch(pr.base.ref): mustClose = True - # Process the changes for the given pull request so that we can determine the - # signatures it requires. - if cmssw_repo or not external_repo: - if cmssw_repo: - if (pr.base.ref=="master"): signing_categories.add("code-checks") - updateMilestone(repo, issue, pr, dryRun) - chg_files = get_changed_files(repo, pr) - packages = sorted([x for x in set([cmssw_file2Package(repo_config, f) - for f in chg_files])]) - for pkg_file in chg_files: - for ex_lab, pkgs_regexp in list(CMSSW_LABELS.items()): - for regex in pkgs_regexp: - if regex.match(pkg_file): - extra_labels['mtype'].append(ex_lab) - print("Non-Blocking label:%s:%s:%s" % (ex_lab,regex.pattern,pkg_file)) - break - if not extra_labels['mtype']: del extra_labels['mtype'] - print("Extra non-blocking labels:",extra_labels) - print("First Package: ",packages[0]) - create_test_property = True - else: - add_external_category = True - packages = set (["externals/"+repository]) - ex_pkg = external_to_package(repository) - if ex_pkg: packages.add(ex_pkg) - if (repo_org!=GH_CMSSW_ORGANIZATION) or (repo_name in VALID_CMS_SW_REPOS_FOR_TESTS): - create_test_property = True - if (repo_name == GH_CMSDIST_REPO) and (not re.match(VALID_CMSDIST_BRANCHES,pr.base.ref)): - print("Skipping PR as it does not belong to valid CMSDIST branch") - return - - print("Following packages affected:") - print("\n".join(packages)) - for package in packages: - package_categories[package] = set([]) - for category in get_package_categories(package): - package_categories[package].add(category) - pkg_categories.add(category) - signing_categories.update(pkg_categories) - - # For PR, we always require tests. - signing_categories.add("tests") - if add_external_category: signing_categories.add("externals") - if cms_repo: - print("This pull request requires ORP approval") - signing_categories.add("orp") - - print("Following categories affected:") - print("\n".join(signing_categories)) - - if cmssw_repo: - # If there is a new package, add also a dummy "new" category. - all_packages = [package for category_packages in list(CMSSW_CATEGORIES.values()) - for package in category_packages] - has_category = all([package in all_packages for package in packages]) - if not has_category: - new_package_message = "\nThe following packages do not have a category, yet:\n\n" - new_package_message += "\n".join([package for package in packages if not package in all_packages]) + "\n" - new_package_message += "Please create a PR for https://github.com/cms-sw/cms-bot/blob/master/categories_map.py to assign category\n" - print(new_package_message) - signing_categories.add("new-package") - - # Add watchers.yaml information to the WATCHERS dict. - WATCHERS = read_repo_file(repo_config, "watchers.yaml", {}) - # Given the files modified by the PR, check if there are additional developers watching one or more. - author = pr.user.login - watchers = set([user for chg_file in chg_files - for user, watched_regexp in list(WATCHERS.items()) - for regexp in watched_regexp - if re.match("^" + regexp + ".*", chg_file) and user != author]) - #Handle category watchers - catWatchers = read_repo_file(repo_config, "category-watchers.yaml", {}) - non_block_cats = [] if not 'mtype' in extra_labels else extra_labels['mtype'] - for user, cats in list(catWatchers.items()): - for cat in cats: - if (cat in signing_categories) or (cat in non_block_cats): - print("Added ",user, " to watch due to cat",cat) - watchers.add(user) - - # Handle watchers - watchingGroups = read_repo_file(repo_config, "groups.yaml", {}) - for watcher in [x for x in watchers]: - if not watcher in watchingGroups: continue - watchers.remove(watcher) - watchers.update(set(watchingGroups[watcher])) - watchers = set([gh_user_char + u for u in watchers]) - print("Watchers " + ", ".join(watchers)) - last_commit_obj = get_last_commit(pr) - if last_commit_obj is None: return - last_commit = last_commit_obj.commit - commit_statuses = last_commit_obj.get_combined_status().statuses - bot_status = get_status(bot_status_name, commit_statuses) - if not bot_status: - bot_status_name = "bot/%s/jenkins" % prId - bot_ack_name = "bot/%s/ack" % prId - bot_test_param_name = "bot/%s/test_parameters" % prId - cms_status_prefix = "cms/%s" % prId - bot_status = get_status(bot_status_name, commit_statuses) - code_checks_status = [s for s in commit_statuses if s.context == "%s/code-checks" % cms_status_prefix] - print("PR Statuses:",commit_statuses) - print(len(commit_statuses)) - last_commit_date = last_commit.committer.date - print("Latest commit by ",last_commit.committer.name.encode("ascii", "ignore").decode()," at ",last_commit_date) - print("Latest commit message: ",last_commit.message.encode("ascii", "ignore").decode()) - print("Latest commit sha: ",last_commit.sha) - print("PR update time",pr.updated_at) - print("Time UTC:",datetime.utcnow()) - if last_commit_date>datetime.utcnow(): - print("==== Future commit found ====") - add_labels = True - try: add_labels = repo_config.ADD_LABELS - except: pass - if (not dryRun) and add_labels: - labels = [x.name.encode("ascii", "ignore").decode() for x in issue.labels] - if not 'future-commit' in labels: - labels.append('future-commit') - issue.edit(labels=labels) - return - extra_rm = get_release_managers (pr.base.ref) - if repository==CMSDIST_REPO_NAME: - br = "_".join(pr.base.ref.split("/")[:2][-1].split("_")[:3])+"_X" - if br: extra_rm=extra_rm+get_release_managers (br) - releaseManagers=list(set(extra_rm+CMSSW_L1)) - else: + global L2_DATA + if (not force) and ignore_issue(repo_config, repo, issue): + return + gh_user_char = "@" + if not notify_user(issue): + gh_user_char = "" + api_rate_limits(gh) + prId = issue.number + repository = repo.full_name + repo_org, repo_name = repository.split("/", 1) + auto_test_repo = AUTO_TEST_REPOS try: - if (repo_config.OPEN_ISSUE_FOR_PUSH_TESTS) and (requestor == cmsbuild_user) and re.match(PUSH_TEST_ISSUE_MSG,issue.title): + if repo_config.AUTO_TEST_REPOS: + auto_test_repo = [repository] + else: + auto_test_repo = [] + except: + pass + if not cmsbuild_user: + cmsbuild_user = repo_config.CMSBUILD_USER + print("Working on ", repo.full_name, " for PR/Issue ", prId, "with admin user", cmsbuild_user) + print("Notify User: ", gh_user_char) + set_gh_user(cmsbuild_user) + cmssw_repo = repo_name == GH_CMSSW_REPO + cms_repo = repo_org in EXTERNAL_REPOS + external_repo = (repository != CMSSW_REPO_NAME) and ( + len([e for e in EXTERNAL_REPOS if repo_org == e]) > 0 + ) + create_test_property = False + repo_cache = {repository: repo} + packages = set([]) + chg_files = [] + package_categories = {} + extra_labels = {"mtype": []} + add_external_category = False + signing_categories = set([]) + new_package_message = "" + mustClose = False + reOpen = False + releaseManagers = [] + signatures = {} + watchers = [] + # Process Pull Request + pkg_categories = set([]) + REGEX_TYPE_CMDS = "^type\s+(([-+]|)[a-z][a-z0-9-]+)(\s*,\s*([-+]|)[a-z][a-z0-9-]+)*$" + REGEX_EX_CMDS = "^urgent$|^backport\s+(of\s+|)(#|http(s|):/+github\.com/+%s/+pull/+)\d+$" % ( + repo.full_name + ) + known_ignore_tests = "%s" % MULTILINE_COMMENTS_MAP["ignore_test(s|)"][0] + REGEX_EX_IGNORE_CHKS = "^ignore\s+((%s)(\s*,\s*(%s))*|none)$" % ( + known_ignore_tests, + known_ignore_tests, + ) + REGEX_EX_ENABLE_TESTS = "^enable\s+(%s)$" % MULTILINE_COMMENTS_MAP[ENABLE_TEST_PTRN][0] + L2_DATA = init_l2_data(cms_repo) + last_commit_date = None + last_commit_obj = None + push_test_issue = False + requestor = issue.user.login.encode("ascii", "ignore").decode() + ignore_tests = "" + enable_tests = "" + commit_statuses = None + bot_status_name = "bot/jenkins" + bot_ack_name = "bot/ack" + bot_test_param_name = "bot/test_parameters" + cms_status_prefix = "cms" + bot_status = None + code_checks_status = [] + pre_checks_state = {} + default_pre_checks = ["code-checks"] + # For future pre_checks + # if prId>=somePRNumber: default_pre_checks+=["some","new","checks"] + pre_checks_url = {} + if issue.pull_request: + pr = repo.get_pull(prId) + if pr.changed_files == 0: + print("Ignoring: PR with no files changed") + return + if cmssw_repo and cms_repo and (pr.base.ref == CMSSW_DEVEL_BRANCH): + if pr.state != "closed": + print("This pull request must go in to master branch") + if not dryRun: + edit_pr(repo.full_name, prId, base="master") + msg = format( + "%(gh_user_char)s%(user)s, %(dev_branch)s branch is closed for direct updates. cms-bot is going to move this PR to master branch.\n" + "In future, please use cmssw master branch to submit your changes.\n", + user=requestor, + gh_user_char=gh_user_char, + dev_branch=CMSSW_DEVEL_BRANCH, + ) + issue.create_comment(msg) + return + # A pull request is by default closed if the branch is a closed one. + if is_closed_branch(pr.base.ref): + mustClose = True + # Process the changes for the given pull request so that we can determine the + # signatures it requires. + if cmssw_repo or not external_repo: + if cmssw_repo: + if pr.base.ref == "master": + signing_categories.add("code-checks") + updateMilestone(repo, issue, pr, dryRun) + chg_files = get_changed_files(repo, pr) + packages = sorted( + [x for x in set([cmssw_file2Package(repo_config, f) for f in chg_files])] + ) + for pkg_file in chg_files: + for ex_lab, pkgs_regexp in list(CMSSW_LABELS.items()): + for regex in pkgs_regexp: + if regex.match(pkg_file): + extra_labels["mtype"].append(ex_lab) + print( + "Non-Blocking label:%s:%s:%s" % (ex_lab, regex.pattern, pkg_file) + ) + break + if not extra_labels["mtype"]: + del extra_labels["mtype"] + print("Extra non-blocking labels:", extra_labels) + print("First Package: ", packages[0]) + create_test_property = True + else: + add_external_category = True + packages = set(["externals/" + repository]) + ex_pkg = external_to_package(repository) + if ex_pkg: + packages.add(ex_pkg) + if (repo_org != GH_CMSSW_ORGANIZATION) or (repo_name in VALID_CMS_SW_REPOS_FOR_TESTS): + create_test_property = True + if (repo_name == GH_CMSDIST_REPO) and ( + not re.match(VALID_CMSDIST_BRANCHES, pr.base.ref) + ): + print("Skipping PR as it does not belong to valid CMSDIST branch") + return + + print("Following packages affected:") + print("\n".join(packages)) + for package in packages: + package_categories[package] = set([]) + for category in get_package_categories(package): + package_categories[package].add(category) + pkg_categories.add(category) + signing_categories.update(pkg_categories) + + # For PR, we always require tests. signing_categories.add("tests") - push_test_issue = True - except: pass - if repository==CMSSW_REPO_NAME and re.match(CREATE_REPO, issue.title): - with open("query-new-data-repo-issues-" +str(issue.number) + ".properties", "w") as f: - f.write("ISSUE_NUMBER="+str(issue.number)+"\n") - - # Process the issue comments - signatures = dict([(x, "pending") for x in signing_categories]) - extra_pre_checks = [] - pre_checks = [] - if issue.pull_request: - pre_checks = [c for c in signing_categories if c in default_pre_checks] - for pre_check in pre_checks+["code-checks"]: - pre_checks_state[pre_check] = get_status_state("%s/%s" % (cms_status_prefix, pre_check), commit_statuses) - print("Pre check status:",pre_checks_state) - already_seen = None - pull_request_updated = False - comparison_done = False - comparison_notrun = False - mustMerge = False - release_queue = '' - release_arch = '' - cmssw_prs = '' - extra_wfs = '' - global_test_params = {} - assign_cats = {} - hold = {} - last_test_start_time = None - abort_test = None - need_external = False - backport_pr_num = "" - comp_warnings = False - extra_testers = [] - all_comments = [issue] - code_checks_tools = "" - new_bot_tests = True - test_comment = None - trigger_test = False - ack_comment = None - test_params_msg = "" - test_params_comment = None - code_check_apply_patch = False - - #start of parsing comments section - for c in issue.get_comments(): all_comments.append(c) - for comment in all_comments: - ack_comment = comment - commenter = comment.user.login.encode("ascii", "ignore").decode() - commenter_categories = get_commenter_categories(commenter, int(comment.created_at.strftime('%s'))) - valid_commenter = (commenter in TRIGGER_PR_TESTS + releaseManagers + [repo_org]) or (len(commenter_categories)>0) - if (not valid_commenter) and (requestor!=commenter): continue - comment_msg = comment.body.encode("ascii", "ignore").decode() if comment.body else "" - # The first line is an invariant. - comment_lines = [ l.strip() for l in comment_msg.split("\n") if l.strip() ] - first_line = comment_lines[0:1] - if not first_line: continue - first_line = first_line[0] - if (commenter == cmsbuild_user) and re.match(ISSUE_SEEN_MSG, first_line): - already_seen = comment - backport_pr_num = get_backported_pr(comment_msg) - if issue.pull_request and last_commit_date: - if (comment.created_at >= last_commit_date): pull_request_updated = False - else: pull_request_updated = True - continue - - assign_type, new_cats = get_assign_categories(first_line) - if new_cats: - if (assign_type == "new categories assigned:") and (commenter == cmsbuild_user): - for ex_cat in new_cats: - if ex_cat in assign_cats: assign_cats[ex_cat] = 1 - if commenter_categories or (commenter in CMSSW_ISSUES_TRACKERS): - if assign_type == "assign": - for ex_cat in new_cats: - if not ex_cat in signing_categories: - assign_cats[ex_cat] = 0 - signing_categories.add(ex_cat) - signatures[ex_cat]="pending" - elif assign_type == "unassign": - for ex_cat in new_cats: - if ex_cat in assign_cats: - assign_cats.pop(ex_cat) - signing_categories.remove(ex_cat) - signatures.pop(ex_cat) - continue - - # Some of the special users can say "hold" prevent automatic merging of - # fully signed PRs. - if re.match("^hold$", first_line, re.I): - if commenter_categories or (commenter in releaseManagers + PR_HOLD_MANAGERS): hold[commenter]=1 - continue - if re.match(REGEX_EX_CMDS, first_line, re.I): - if commenter_categories or (commenter in releaseManagers + [requestor]): - check_extra_labels(first_line.lower(), extra_labels) - continue - if re.match(REGEX_TYPE_CMDS, first_line, re.I): - if commenter_categories or (commenter in releaseManagers + [requestor]): - valid_labs = check_type_labels(first_line.lower(), extra_labels) - if not dryRun: - if valid_labs: set_comment_emoji(comment.id, repository, emoji="+1") - else: set_comment_emoji(comment.id, repository, emoji="-1") - if re.match(REGEX_EX_IGNORE_CHKS, first_line, re.I): - if valid_commenter: - ignore_tests = check_ignore_bot_tests (first_line.split(" ",1)[-1]) - continue - if re.match(REGEX_EX_ENABLE_TESTS, first_line, re.I): - if valid_commenter: - enable_tests, ignore = check_enable_bot_tests (first_line.split(" ",1)[-1]) - if not dryRun: - set_comment_emoji(comment.id, repository, emoji="+1") - continue - if re.match('^allow\s+@([^ ]+)\s+test\s+rights$',first_line, re.I): - if commenter_categories or (commenter in releaseManagers): - tester = first_line.split("@",1)[-1].split(" ",1)[0] - if not tester in TRIGGER_PR_TESTS: - TRIGGER_PR_TESTS.append(tester) - extra_testers.append(tester) - print("Added user in test category:",tester) - continue - if re.match("^unhold$", first_line, re.I): - if 'orp' in commenter_categories: - hold = {} - elif commenter_categories or (commenter in releaseManagers + PR_HOLD_MANAGERS): - if commenter in hold: del hold[commenter] - continue - if (commenter == cmsbuild_user) and (re.match("^"+HOLD_MSG+".+", first_line)): - for u in first_line.split(HOLD_MSG,2)[1].split(","): - u = u.strip().lstrip("@") - if u in hold: hold[u]=0 - if CLOSE_REQUEST.match(first_line): - if (commenter_categories or (commenter in releaseManagers)) or \ - ((not issue.pull_request) and (commenter in CMSSW_ISSUES_TRACKERS)): - reOpen = False - if issue.state == "open": - mustClose = True - print("==>Closing request received from %s" % commenter) - continue - if REOPEN_REQUEST.match(first_line): - if (commenter_categories or (commenter in releaseManagers)) or \ - ((not issue.pull_request) and (commenter in CMSSW_ISSUES_TRACKERS)): - mustClose = False - if (issue.state == "closed") and (comment.created_at >= issue.closed_at): - reOpen = True - print("==>Reopen request received from %s" % commenter) - continue - if valid_commenter: - valid_multiline_comment , test_params, test_params_m = multiline_check_function(first_line, comment_lines, repository) - if test_params_m: - test_params_msg = str(comment.id) + ":" + test_params_m - test_params_comment = comment - elif valid_multiline_comment: - test_params_comment = comment - global_test_params = dict(test_params) - if 'ENABLE_BOT_TESTS' in global_test_params: - enable_tests = global_test_params['ENABLE_BOT_TESTS'] - test_params_msg = str(comment.id) + ":" + dumps(global_test_params, sort_keys=True) - continue + if add_external_category: + signing_categories.add("externals") + if cms_repo: + print("This pull request requires ORP approval") + signing_categories.add("orp") + + print("Following categories affected:") + print("\n".join(signing_categories)) + + if cmssw_repo: + # If there is a new package, add also a dummy "new" category. + all_packages = [ + package + for category_packages in list(CMSSW_CATEGORIES.values()) + for package in category_packages + ] + has_category = all([package in all_packages for package in packages]) + if not has_category: + new_package_message = "\nThe following packages do not have a category, yet:\n\n" + new_package_message += ( + "\n".join([package for package in packages if not package in all_packages]) + + "\n" + ) + new_package_message += "Please create a PR for https://github.com/cms-sw/cms-bot/blob/master/categories_map.py to assign category\n" + print(new_package_message) + signing_categories.add("new-package") + + # Add watchers.yaml information to the WATCHERS dict. + WATCHERS = read_repo_file(repo_config, "watchers.yaml", {}) + # Given the files modified by the PR, check if there are additional developers watching one or more. + author = pr.user.login + watchers = set( + [ + user + for chg_file in chg_files + for user, watched_regexp in list(WATCHERS.items()) + for regexp in watched_regexp + if re.match("^" + regexp + ".*", chg_file) and user != author + ] + ) + # Handle category watchers + catWatchers = read_repo_file(repo_config, "category-watchers.yaml", {}) + non_block_cats = [] if not "mtype" in extra_labels else extra_labels["mtype"] + for user, cats in list(catWatchers.items()): + for cat in cats: + if (cat in signing_categories) or (cat in non_block_cats): + print("Added ", user, " to watch due to cat", cat) + watchers.add(user) + + # Handle watchers + watchingGroups = read_repo_file(repo_config, "groups.yaml", {}) + for watcher in [x for x in watchers]: + if not watcher in watchingGroups: + continue + watchers.remove(watcher) + watchers.update(set(watchingGroups[watcher])) + watchers = set([gh_user_char + u for u in watchers]) + print("Watchers " + ", ".join(watchers)) + last_commit_obj = get_last_commit(pr) + if last_commit_obj is None: + return + last_commit = last_commit_obj.commit + commit_statuses = last_commit_obj.get_combined_status().statuses + bot_status = get_status(bot_status_name, commit_statuses) + if not bot_status: + bot_status_name = "bot/%s/jenkins" % prId + bot_ack_name = "bot/%s/ack" % prId + bot_test_param_name = "bot/%s/test_parameters" % prId + cms_status_prefix = "cms/%s" % prId + bot_status = get_status(bot_status_name, commit_statuses) + code_checks_status = [ + s for s in commit_statuses if s.context == "%s/code-checks" % cms_status_prefix + ] + print("PR Statuses:", commit_statuses) + print(len(commit_statuses)) + last_commit_date = last_commit.committer.date + print( + "Latest commit by ", + last_commit.committer.name.encode("ascii", "ignore").decode(), + " at ", + last_commit_date, + ) + print("Latest commit message: ", last_commit.message.encode("ascii", "ignore").decode()) + print("Latest commit sha: ", last_commit.sha) + print("PR update time", pr.updated_at) + print("Time UTC:", datetime.utcnow()) + if last_commit_date > datetime.utcnow(): + print("==== Future commit found ====") + add_labels = True + try: + add_labels = repo_config.ADD_LABELS + except: + pass + if (not dryRun) and add_labels: + labels = [x.name.encode("ascii", "ignore").decode() for x in issue.labels] + if not "future-commit" in labels: + labels.append("future-commit") + issue.edit(labels=labels) + return + extra_rm = get_release_managers(pr.base.ref) + if repository == CMSDIST_REPO_NAME: + br = "_".join(pr.base.ref.split("/")[:2][-1].split("_")[:3]) + "_X" + if br: + extra_rm = extra_rm + get_release_managers(br) + releaseManagers = list(set(extra_rm + CMSSW_L1)) + else: + try: + if ( + (repo_config.OPEN_ISSUE_FOR_PUSH_TESTS) + and (requestor == cmsbuild_user) + and re.match(PUSH_TEST_ISSUE_MSG, issue.title) + ): + signing_categories.add("tests") + push_test_issue = True + except: + pass + if repository == CMSSW_REPO_NAME and re.match(CREATE_REPO, issue.title): + with open("query-new-data-repo-issues-" + str(issue.number) + ".properties", "w") as f: + f.write("ISSUE_NUMBER=" + str(issue.number) + "\n") + + # Process the issue comments + signatures = dict([(x, "pending") for x in signing_categories]) + extra_pre_checks = [] + pre_checks = [] + if issue.pull_request: + pre_checks = [c for c in signing_categories if c in default_pre_checks] + for pre_check in pre_checks + ["code-checks"]: + pre_checks_state[pre_check] = get_status_state( + "%s/%s" % (cms_status_prefix, pre_check), commit_statuses + ) + print("Pre check status:", pre_checks_state) + already_seen = None + pull_request_updated = False + comparison_done = False + comparison_notrun = False + mustMerge = False + release_queue = "" + release_arch = "" + cmssw_prs = "" + extra_wfs = "" + global_test_params = {} + assign_cats = {} + hold = {} + last_test_start_time = None + abort_test = None + need_external = False + backport_pr_num = "" + comp_warnings = False + extra_testers = [] + all_comments = [issue] + code_checks_tools = "" + new_bot_tests = True + test_comment = None + trigger_test = False + ack_comment = None + test_params_msg = "" + test_params_comment = None + code_check_apply_patch = False + + # start of parsing comments section + for c in issue.get_comments(): + all_comments.append(c) + for comment in all_comments: + ack_comment = comment + commenter = comment.user.login.encode("ascii", "ignore").decode() + commenter_categories = get_commenter_categories( + commenter, int(comment.created_at.strftime("%s")) + ) + valid_commenter = (commenter in TRIGGER_PR_TESTS + releaseManagers + [repo_org]) or ( + len(commenter_categories) > 0 + ) + if (not valid_commenter) and (requestor != commenter): + continue + comment_msg = comment.body.encode("ascii", "ignore").decode() if comment.body else "" + # The first line is an invariant. + comment_lines = [l.strip() for l in comment_msg.split("\n") if l.strip()] + first_line = comment_lines[0:1] + if not first_line: + continue + first_line = first_line[0] + if (commenter == cmsbuild_user) and re.match(ISSUE_SEEN_MSG, first_line): + already_seen = comment + backport_pr_num = get_backported_pr(comment_msg) + if issue.pull_request and last_commit_date: + if comment.created_at >= last_commit_date: + pull_request_updated = False + else: + pull_request_updated = True + continue - if cmssw_repo: - m = CODE_CHECKS_REGEXP.match(first_line) - if m: - first_line = "code-checks" - code_check_apply_patch = False - if m.group(1): - code_checks_tools = m.group(1).strip().split(" ")[-1] - if m.group(2): - code_check_apply_patch = True - - # Ignore all other messages which are before last commit. - if issue.pull_request and (comment.created_at < last_commit_date): - continue - - if (cmssw_repo and first_line=="code-checks"): - signatures[first_line] = "pending" - if first_line not in pre_checks+extra_pre_checks: - extra_pre_checks.append(first_line) - if code_checks_status and (code_checks_status[0].updated_at>=comment.created_at): - continue - if first_line in pre_checks: - if pre_checks_state["code-checks"] in ["pending", ""]: - continue - elif pre_checks_state["code-checks"] in ["pending"]: - continue - pre_checks_state["code-checks"] = "" - print("Found:Code Checks request", code_checks_tools) - continue - - # Check for cmsbuild_user comments and tests requests only for pull requests - if commenter == cmsbuild_user: - if not issue.pull_request and not push_test_issue: continue - sec_line = comment_lines[1:2] - if not sec_line: sec_line = "" - else: sec_line = sec_line[0] - if re.match("Comparison is ready", first_line): - if ('tests' in signatures) and signatures["tests"]!='pending': comparison_done = True - elif "-code-checks" == first_line: - signatures["code-checks"] = "rejected" - pre_checks_url["code-checks"] = comment.html_url - elif "+code-checks" == first_line: - signatures["code-checks"] = "approved" - pre_checks_url["code-checks"] = comment.html_url - elif re.match("^Comparison not run.+",first_line): - if ('tests' in signatures) and signatures["tests"]!='pending': comparison_notrun = True - elif re.match( FAILED_TESTS_MSG, first_line) or re.match(IGNORING_TESTS_MSG, first_line): - signatures["tests"] = "pending" - elif re.match("Pull request ([^ #]+|)[#][0-9]+ was updated[.].*", first_line): - pull_request_updated = False - elif re.match( TRIGERING_TESTS_MSG, first_line) or re.match( TRIGERING_TESTS_MSG1, first_line): - signatures["tests"] = "started" - last_test_start_time = comment.created_at - abort_test = None - need_external = False - if sec_line.startswith("Using externals from cms-sw/cmsdist#"): need_external = True - elif sec_line.startswith('Tested with other pull request'): need_external = True - elif sec_line.startswith('Using extra pull request'): need_external = True - elif re.match( TESTS_RESULTS_MSG, first_line): - test_sha = sec_line.replace("Tested at: ","").strip() - if (not push_test_issue) and (test_sha != last_commit.sha) and (test_sha != 'UNKNOWN') and (not "I had the issue " in first_line): - print("Ignoring test results for sha:",test_sha) - continue - comparison_done = False - comparison_notrun = False - comp_warnings = False - if "+1" in first_line: - signatures["tests"] = "approved" - comp_warnings = len([1 for l in comment_lines if 'Compilation Warnings: Yes' in l ])>0 - pre_checks_url["tests"] = comment.html_url - elif "-1" in first_line: - signatures["tests"] = "rejected" - pre_checks_url["tests"] = comment.html_url - else: - signatures["tests"] = "pending" - print('Previous tests already finished, resetting test request state to ',signatures["tests"]) + assign_type, new_cats = get_assign_categories(first_line) + if new_cats: + if (assign_type == "new categories assigned:") and (commenter == cmsbuild_user): + for ex_cat in new_cats: + if ex_cat in assign_cats: + assign_cats[ex_cat] = 1 + if commenter_categories or (commenter in CMSSW_ISSUES_TRACKERS): + if assign_type == "assign": + for ex_cat in new_cats: + if not ex_cat in signing_categories: + assign_cats[ex_cat] = 0 + signing_categories.add(ex_cat) + signatures[ex_cat] = "pending" + elif assign_type == "unassign": + for ex_cat in new_cats: + if ex_cat in assign_cats: + assign_cats.pop(ex_cat) + signing_categories.remove(ex_cat) + signatures.pop(ex_cat) + continue - if (issue.pull_request or push_test_issue): - # Check if the release manager asked for merging this. - if ((commenter in releaseManagers) or ('orp' in commenter_categories)) and re.match("^\s*(merge)\s*$", first_line, re.I): - mustMerge = True - mustClose = False - if ('orp' in commenter_categories) and ('orp' in signatures): signatures["orp"] = "approved" - continue - - # Check if the someone asked to trigger the tests - if valid_commenter: - ok, v2, v3, v4 = check_test_cmd(first_line, repository, global_test_params) - if ok: - test_comment = comment - abort_test = None - cmssw_prs = v2 - extra_wfs = v3 - release_queue = v4 - release_arch = '' - if '/' in release_queue: - release_queue, release_arch = release_queue.split('/',1) - elif re.match('^'+ARCH_PATTERN+'$', release_queue): - release_arch = release_queue - release_queue = '' - print('Tests requested:', commenter, 'asked to test this PR with cmssw_prs=%s, release_queue=%s, arch=%s and workflows=%s' % (cmssw_prs, release_queue, release_arch, extra_wfs)) - print("Comment message:",first_line) - signatures["tests"] = "pending" - continue - elif REGEX_TEST_ABORT.match(first_line) and (signatures["tests"] == "pending"): - abort_test = comment - test_comment = None - signatures["tests"] = "pending" - - # Check L2 signoff for users in this PR signing categories - if [ x for x in commenter_categories if x in signing_categories]: - ctype = "" - selected_cats = [] - if re.match("^([+]1|approve[d]?|sign|signed)$", first_line, re.I): - ctype = "+1" - selected_cats = commenter_categories - elif re.match("^([-]1|reject|rejected)$", first_line, re.I): - ctype = "-1" - selected_cats = commenter_categories - elif re.match("^[+-][a-z][a-z0-9-]+$", first_line, re.I): - category_name = first_line[1:].lower() - if category_name in commenter_categories: - ctype = first_line[0]+"1" - selected_cats = [ category_name ] - if ctype == "+1": - for sign in selected_cats: - signatures[sign] = "approved" - if (test_comment is None) and ((repository in auto_test_repo) or ('*' in auto_test_repo)): - test_comment = comment - if sign == "orp": mustClose = False - elif ctype == "-1": - for sign in selected_cats: - signatures[sign] = "rejected" - if sign == "orp": mustClose = False - continue - - # end of parsing comments section - - if push_test_issue: - auto_close_push_test_issue = True - try: auto_close_push_test_issue=repo_config.AUTO_CLOSE_PUSH_TESTS_ISSUE - except: pass - if auto_close_push_test_issue and (issue.state == "open") and ('tests' in signatures) and ((signatures["tests"] in ["approved","rejected"]) or abort_test): - print("Closing the issue as it has been tested/aborted") - if not dryRun: issue.edit(state="closed") - if abort_test: - job, bnum = get_jenkins_job(issue) - if job and bnum: - params = {} - params["JENKINS_PROJECT_TO_KILL"]=job - params["JENKINS_BUILD_NUMBER"]=bnum - create_property_file("trigger-abort-%s" % job, params, dryRun) - return - - is_hold = len(hold)>0 - new_blocker = False - blockers = "" - for u in hold: - blockers += " "+gh_user_char+u+"," - if hold[u]: new_blocker = True - blockers = blockers.rstrip(",") - - new_assign_cats = [] - for ex_cat in assign_cats: - if assign_cats[ex_cat]==1: continue - new_assign_cats.append(ex_cat) - - print("All assigned cats:",",".join(list(assign_cats.keys()))) - print("Newly assigned cats:",",".join(new_assign_cats)) - print("Ignore tests:",ignore_tests) - print("Enable tests:",enable_tests) - print("Tests: %s" % (cmssw_prs)) - print("Abort:",abort_test) - print("Test:",test_comment, bot_status) - - dryRunOrig = dryRun - for cat in pre_checks: - if (cat in signatures) and (signatures[cat]!="approved"): - dryRun=True - break - - old_labels = set([x.name.encode("ascii", "ignore").decode() for x in issue.labels]) - print("Stats:",backport_pr_num,extra_labels) - print("Old Labels:",sorted(old_labels)) - print("Compilation Warnings: ",comp_warnings) - print("Singnatures: ",signatures) - if "mtype" in extra_labels: - extra_labels["mtype"] = list(set(extra_labels["mtype"])) - if "type" in extra_labels: - extra_labels["type"] = [extra_labels["type"][-1]] - - #Always set test pending label - if "tests" in signatures: - if test_comment is not None: - turl = test_comment.html_url - if bot_status: - print("BOT STATUS:\n %s\n %s\n %s\n %s" % (bot_status,bot_status.description,bot_status.target_url,test_comment.html_url)) - if bot_status and bot_status.description.startswith("Old style tests"): - new_bot_tests = False - elif (not bot_status) and (signatures["tests"]!="pending"): - new_bot_tests = False - if (not bot_status) or (bot_status.target_url != turl): - if bot_status or (signatures["tests"]=="pending"): - new_bot_tests = True - trigger_test = True - signatures["tests"]="started" - desc = "requested by %s at %s UTC." % (test_comment.user.login.encode("ascii", "ignore").decode(), test_comment.created_at) - if not new_bot_tests: - desc = "Old style tests %s" % desc - else: - desc = "Tests %s" % desc - print(desc) - if not dryRun: - last_commit_obj.create_status("success", description=desc, target_url=turl, context=bot_status_name) - set_comment_emoji(test_comment.id, repository) - if bot_status: - print(bot_status.target_url,turl,signatures["tests"],bot_status.description) - if bot_status and bot_status.target_url == turl and signatures["tests"]=="pending" and (" requested by " in bot_status.description): - signatures["tests"]="started" - if get_status_state("%s/unknown/release" % cms_status_prefix, commit_statuses) == "error": - signatures["tests"]="pending" - if signatures["tests"]=="started" and new_bot_tests: - lab_stats = {} - for status in commit_statuses: - if not status.context.startswith(cms_status_prefix+"/"): continue - cdata = status.context.split("/") - if cdata[-1] not in ["optional", "required"]: + # Some of the special users can say "hold" prevent automatic merging of + # fully signed PRs. + if re.match("^hold$", first_line, re.I): + if commenter_categories or (commenter in releaseManagers + PR_HOLD_MANAGERS): + hold[commenter] = 1 continue - if (cdata[-1] not in lab_stats) or (cdata[-1] == 'required'): lab_stats[cdata[-1]] = [] - lab_stats[cdata[-1]].append("pending") - if status.state == "pending": + if re.match(REGEX_EX_CMDS, first_line, re.I): + if commenter_categories or (commenter in releaseManagers + [requestor]): + check_extra_labels(first_line.lower(), extra_labels) continue - scontext = "/".join(cdata[:-1]) - all_states = {} - result_url = "" - for s in [i for i in commit_statuses if ((i.context==scontext) or (i.context.startswith(scontext+"/")))]: - if (not result_url) and ('/jenkins-artifacts/' in s.target_url): - xdata = s.target_url.split("/") - while xdata and (not xdata[-2].startswith('PR-')): - xdata.pop() - if xdata: result_url = "/".join(xdata) - if s.context == status.context: continue - if s.state not in all_states: all_states[s.state] = [] - all_states[s.state].append(s.context) - print("Test status for %s: %s" % (status.context, all_states)) - if "pending" in all_states: - if status.description.startswith("Finished"): - print("Some test might have been restarted for %s. Resetting the status" % status.context) - if not dryRun: - last_commit_obj.create_status("success", description="OK", target_url=status.target_url, context=status.context) + if re.match(REGEX_TYPE_CMDS, first_line, re.I): + if commenter_categories or (commenter in releaseManagers + [requestor]): + valid_labs = check_type_labels(first_line.lower(), extra_labels) + if not dryRun: + if valid_labs: + set_comment_emoji(comment.id, repository, emoji="+1") + else: + set_comment_emoji(comment.id, repository, emoji="-1") + if re.match(REGEX_EX_IGNORE_CHKS, first_line, re.I): + if valid_commenter: + ignore_tests = check_ignore_bot_tests(first_line.split(" ", 1)[-1]) continue - if "success" in all_states: - lab_stats[cdata[-1]][-1] = "success" - if "error" in all_states: - if [c for c in all_states['error'] if ('/opt/' not in c)]: - lab_stats[cdata[-1]][-1] = "error" - print("Final Status:",status.context,cdata[-1],lab_stats[cdata[-1]][-1],status.description) - if (lab_stats[cdata[-1]][-1] != "pending") and (not status.description.startswith("Finished")): - if result_url: - url = result_url.replace("/SDT/jenkins-artifacts/", "/SDT/cgi-bin/get_pr_results/jenkins-artifacts/")+"/pr-result" - print("PR Result:", url) - e, o = run_cmd("curl -k -s -L --max-time 60 %s" % url) - if e: - print(o) - raise Exception("System-error: unable to get PR result") - if o and (not dryRun): - res="+1" - if lab_stats[cdata[-1]][-1]=="error": res="-1" - res = "%s\n\n%s" % (res, o) - issue.create_comment(res) + if re.match(REGEX_EX_ENABLE_TESTS, first_line, re.I): + if valid_commenter: + enable_tests, ignore = check_enable_bot_tests(first_line.split(" ", 1)[-1]) + if not dryRun: + set_comment_emoji(comment.id, repository, emoji="+1") + continue + if re.match("^allow\s+@([^ ]+)\s+test\s+rights$", first_line, re.I): + if commenter_categories or (commenter in releaseManagers): + tester = first_line.split("@", 1)[-1].split(" ", 1)[0] + if not tester in TRIGGER_PR_TESTS: + TRIGGER_PR_TESTS.append(tester) + extra_testers.append(tester) + print("Added user in test category:", tester) + continue + if re.match("^unhold$", first_line, re.I): + if "orp" in commenter_categories: + hold = {} + elif commenter_categories or (commenter in releaseManagers + PR_HOLD_MANAGERS): + if commenter in hold: + del hold[commenter] + continue + if (commenter == cmsbuild_user) and (re.match("^" + HOLD_MSG + ".+", first_line)): + for u in first_line.split(HOLD_MSG, 2)[1].split(","): + u = u.strip().lstrip("@") + if u in hold: + hold[u] = 0 + if CLOSE_REQUEST.match(first_line): + if (commenter_categories or (commenter in releaseManagers)) or ( + (not issue.pull_request) and (commenter in CMSSW_ISSUES_TRACKERS) + ): + reOpen = False + if issue.state == "open": + mustClose = True + print("==>Closing request received from %s" % commenter) + continue + if REOPEN_REQUEST.match(first_line): + if (commenter_categories or (commenter in releaseManagers)) or ( + (not issue.pull_request) and (commenter in CMSSW_ISSUES_TRACKERS) + ): + mustClose = False + if (issue.state == "closed") and (comment.created_at >= issue.closed_at): + reOpen = True + print("==>Reopen request received from %s" % commenter) + continue + if valid_commenter: + valid_multiline_comment, test_params, test_params_m = multiline_check_function( + first_line, comment_lines, repository + ) + if test_params_m: + test_params_msg = str(comment.id) + ":" + test_params_m + test_params_comment = comment + elif valid_multiline_comment: + test_params_comment = comment + global_test_params = dict(test_params) + if "ENABLE_BOT_TESTS" in global_test_params: + enable_tests = global_test_params["ENABLE_BOT_TESTS"] + test_params_msg = str(comment.id) + ":" + dumps(global_test_params, sort_keys=True) + continue + + if cmssw_repo: + m = CODE_CHECKS_REGEXP.match(first_line) + if m: + first_line = "code-checks" + code_check_apply_patch = False + if m.group(1): + code_checks_tools = m.group(1).strip().split(" ")[-1] + if m.group(2): + code_check_apply_patch = True + + # Check L2 signoff for users in this PR signing categories + if [x for x in commenter_categories if x in signing_categories]: + ctype = "" + selected_cats = [] + if re.match("^([+]1|approve[d]?|sign|signed)$", first_line, re.I): + ctype = "+1" + selected_cats = commenter_categories + elif re.match("^([-]1|reject|rejected)$", first_line, re.I): + ctype = "-1" + selected_cats = commenter_categories + elif re.match("^[+-][a-z][a-z0-9-]+$", first_line, re.I): + category_name = first_line[1:].lower() + if category_name in commenter_categories: + ctype = first_line[0] + "1" + selected_cats = [category_name] + if ctype == "+1": + for sign in selected_cats: + signatures[sign] = "approved" + if (test_comment is None) and ( + (repository in auto_test_repo) or ("*" in auto_test_repo) + ): + test_comment = comment + if sign == "orp": + mustClose = False + elif ctype == "-1": + for sign in selected_cats: + signatures[sign] = "rejected" + if sign == "orp": + mustClose = False + + # Ignore all other messages which are before last commit. + if issue.pull_request and (comment.created_at < last_commit_date): + continue + + if cmssw_repo and first_line == "code-checks": + signatures[first_line] = "pending" + if first_line not in pre_checks + extra_pre_checks: + extra_pre_checks.append(first_line) + if code_checks_status and (code_checks_status[0].updated_at >= comment.created_at): + continue + if first_line in pre_checks: + if pre_checks_state["code-checks"] in ["pending", ""]: + continue + elif pre_checks_state["code-checks"] in ["pending"]: + continue + pre_checks_state["code-checks"] = "" + print("Found:Code Checks request", code_checks_tools) + continue + + # Check for cmsbuild_user comments and tests requests only for pull requests + if commenter == cmsbuild_user: + if not issue.pull_request and not push_test_issue: + continue + sec_line = comment_lines[1:2] + if not sec_line: + sec_line = "" + else: + sec_line = sec_line[0] + if re.match("Comparison is ready", first_line): + if ("tests" in signatures) and signatures["tests"] != "pending": + comparison_done = True + elif "-code-checks" == first_line: + signatures["code-checks"] = "rejected" + pre_checks_url["code-checks"] = comment.html_url + elif "+code-checks" == first_line: + signatures["code-checks"] = "approved" + pre_checks_url["code-checks"] = comment.html_url + elif re.match("^Comparison not run.+", first_line): + if ("tests" in signatures) and signatures["tests"] != "pending": + comparison_notrun = True + elif re.match(FAILED_TESTS_MSG, first_line) or re.match( + IGNORING_TESTS_MSG, first_line + ): + signatures["tests"] = "pending" + elif re.match("Pull request ([^ #]+|)[#][0-9]+ was updated[.].*", first_line): + pull_request_updated = False + elif re.match(TRIGERING_TESTS_MSG, first_line) or re.match( + TRIGERING_TESTS_MSG1, first_line + ): + signatures["tests"] = "started" + last_test_start_time = comment.created_at + abort_test = None + need_external = False + if sec_line.startswith("Using externals from cms-sw/cmsdist#"): + need_external = True + elif sec_line.startswith("Tested with other pull request"): + need_external = True + elif sec_line.startswith("Using extra pull request"): + need_external = True + elif re.match(TESTS_RESULTS_MSG, first_line): + test_sha = sec_line.replace("Tested at: ", "").strip() + if ( + (not push_test_issue) + and (test_sha != last_commit.sha) + and (test_sha != "UNKNOWN") + and (not "I had the issue " in first_line) + ): + print("Ignoring test results for sha:", test_sha) + continue + comparison_done = False + comparison_notrun = False + comp_warnings = False + if "+1" in first_line: + signatures["tests"] = "approved" + comp_warnings = ( + len([1 for l in comment_lines if "Compilation Warnings: Yes" in l]) > 0 + ) + pre_checks_url["tests"] = comment.html_url + elif "-1" in first_line: + signatures["tests"] = "rejected" + pre_checks_url["tests"] = comment.html_url + else: + signatures["tests"] = "pending" + print( + "Previous tests already finished, resetting test request state to ", + signatures["tests"], + ) + + if issue.pull_request or push_test_issue: + # Check if the release manager asked for merging this. + if ((commenter in releaseManagers) or ("orp" in commenter_categories)) and re.match( + "^\s*(merge)\s*$", first_line, re.I + ): + mustMerge = True + mustClose = False + if ("orp" in commenter_categories) and ("orp" in signatures): + signatures["orp"] = "approved" + continue + + # Check if the someone asked to trigger the tests + if valid_commenter: + ok, v2, v3, v4 = check_test_cmd(first_line, repository, global_test_params) + if ok: + test_comment = comment + abort_test = None + cmssw_prs = v2 + extra_wfs = v3 + release_queue = v4 + release_arch = "" + if "/" in release_queue: + release_queue, release_arch = release_queue.split("/", 1) + elif re.match("^" + ARCH_PATTERN + "$", release_queue): + release_arch = release_queue + release_queue = "" + print( + "Tests requested:", + commenter, + "asked to test this PR with cmssw_prs=%s, release_queue=%s, arch=%s and workflows=%s" + % (cmssw_prs, release_queue, release_arch, extra_wfs), + ) + print("Comment message:", first_line) + signatures["tests"] = "pending" + continue + elif REGEX_TEST_ABORT.match(first_line) and (signatures["tests"] == "pending"): + abort_test = comment + test_comment = None + signatures["tests"] = "pending" + elif REGEX_TEST_IGNORE.match(first_line) and (signatures["tests"] == "rejected"): + reason = REGEX_TEST_IGNORE.match(first_line)[1].strip() + if reason not in TEST_IGNORE_REASON: + print("Invalid ignore reason:", reason) + set_comment_emoji(comment.id, repository, "-1") + reason = "" + + if reason: + signatures["tests"] = reason + set_comment_emoji(comment.id, repository) + + # end of parsing comments section + + if push_test_issue: + auto_close_push_test_issue = True + try: + auto_close_push_test_issue = repo_config.AUTO_CLOSE_PUSH_TESTS_ISSUE + except: + pass + if ( + auto_close_push_test_issue + and (issue.state == "open") + and ("tests" in signatures) + and ((signatures["tests"] in ["approved", "rejected"]) or abort_test) + ): + print("Closing the issue as it has been tested/aborted") if not dryRun: - last_commit_obj.create_status("success", description="Finished", target_url=status.target_url, context=status.context) - print("Lab Status",lab_stats) - lab_state = "required" - if lab_state not in lab_stats: lab_state = "optional" - if (lab_state in lab_stats) and ("pending" not in lab_stats[lab_state]): - signatures["tests"]="approved" - if "error" in lab_stats[lab_state]: - signatures["tests"]="rejected" - elif not bot_status: - if not dryRun: - last_commit_obj.create_status("pending", description="Waiting for authorized user to issue the test command.", context=bot_status_name) - else: - print("DryRun: Setting status Waiting for authorized user to issue the test command.") - - # Labels coming from signature. - labels = [] - for cat in signing_categories: - l = cat+"-pending" - if cat in signatures: l = cat+"-"+signatures[cat] - labels.append(l) - - if not issue.pull_request and len(signing_categories)==0: - labels.append("pending-assignment") - if is_hold: labels.append("hold") - - if "backport" in extra_labels: - if backport_pr_num!=extra_labels["backport"][1]: - try: - bp_pr = repo.get_pull(int(extra_labels["backport"][1])) - backport_pr_num=extra_labels["backport"][1] - if bp_pr.merged: extra_labels["backport"][0]="backport-ok" - except Exception as e : - print("Error: Unknown PR", backport_pr_num,"\n",e) - backport_pr_num="" - extra_labels.pop("backport") - - if already_seen: - if dryRun: print("Update PR seen message to include backport PR number",backport_pr_num) + issue.edit(state="closed") + if abort_test: + job, bnum = get_jenkins_job(issue) + if job and bnum: + params = {} + params["JENKINS_PROJECT_TO_KILL"] = job + params["JENKINS_BUILD_NUMBER"] = bnum + create_property_file("trigger-abort-%s" % job, params, dryRun) + return + + is_hold = len(hold) > 0 + new_blocker = False + blockers = "" + for u in hold: + blockers += " " + gh_user_char + u + "," + if hold[u]: + new_blocker = True + blockers = blockers.rstrip(",") + + new_assign_cats = [] + for ex_cat in assign_cats: + if assign_cats[ex_cat] == 1: + continue + new_assign_cats.append(ex_cat) + + print("All assigned cats:", ",".join(list(assign_cats.keys()))) + print("Newly assigned cats:", ",".join(new_assign_cats)) + print("Ignore tests:", ignore_tests) + print("Enable tests:", enable_tests) + print("Tests: %s" % (cmssw_prs)) + print("Abort:", abort_test) + print("Test:", test_comment, bot_status) + + dryRunOrig = dryRun + for cat in pre_checks: + if (cat in signatures) and (signatures[cat] != "approved"): + dryRun = True + break + + old_labels = set([x.name.encode("ascii", "ignore").decode() for x in issue.labels]) + print("Stats:", backport_pr_num, extra_labels) + print("Old Labels:", sorted(old_labels)) + print("Compilation Warnings: ", comp_warnings) + print("Singnatures: ", signatures) + if "mtype" in extra_labels: + extra_labels["mtype"] = list(set(extra_labels["mtype"])) + if "type" in extra_labels: + extra_labels["type"] = [extra_labels["type"][-1]] + + # Always set test pending label + if "tests" in signatures: + if test_comment is not None: + turl = test_comment.html_url + if bot_status: + print( + "BOT STATUS:\n %s\n %s\n %s\n %s" + % ( + bot_status, + bot_status.description, + bot_status.target_url, + test_comment.html_url, + ) + ) + if bot_status and bot_status.description.startswith("Old style tests"): + new_bot_tests = False + elif (not bot_status) and (signatures["tests"] != "pending"): + new_bot_tests = False + if (not bot_status) or (bot_status.target_url != turl): + if bot_status or (signatures["tests"] == "pending"): + new_bot_tests = True + trigger_test = True + signatures["tests"] = "started" + desc = "requested by %s at %s UTC." % ( + test_comment.user.login.encode("ascii", "ignore").decode(), + test_comment.created_at, + ) + if not new_bot_tests: + desc = "Old style tests %s" % desc + else: + desc = "Tests %s" % desc + print(desc) + if not dryRun: + last_commit_obj.create_status( + "success", description=desc, target_url=turl, context=bot_status_name + ) + set_comment_emoji(test_comment.id, repository) + if bot_status: + print(bot_status.target_url, turl, signatures["tests"], bot_status.description) + if ( + bot_status + and bot_status.target_url == turl + and signatures["tests"] == "pending" + and (" requested by " in bot_status.description) + ): + signatures["tests"] = "started" + if ( + get_status_state("%s/unknown/release" % cms_status_prefix, commit_statuses) + == "error" + ): + signatures["tests"] = "pending" + if signatures["tests"] == "started" and new_bot_tests: + lab_stats = {} + for status in commit_statuses: + if not status.context.startswith(cms_status_prefix + "/"): + continue + cdata = status.context.split("/") + if cdata[-1] not in ["optional", "required"]: + continue + if (cdata[-1] not in lab_stats) or (cdata[-1] == "required"): + lab_stats[cdata[-1]] = [] + lab_stats[cdata[-1]].append("pending") + if status.state == "pending": + continue + scontext = "/".join(cdata[:-1]) + all_states = {} + result_url = "" + for s in [ + i + for i in commit_statuses + if ((i.context == scontext) or (i.context.startswith(scontext + "/"))) + ]: + if (not result_url) and ("/jenkins-artifacts/" in s.target_url): + xdata = s.target_url.split("/") + while xdata and (not xdata[-2].startswith("PR-")): + xdata.pop() + if xdata: + result_url = "/".join(xdata) + if s.context == status.context: + continue + if s.state not in all_states: + all_states[s.state] = [] + all_states[s.state].append(s.context) + print("Test status for %s: %s" % (status.context, all_states)) + if "pending" in all_states: + if status.description.startswith("Finished"): + print( + "Some test might have been restarted for %s. Resetting the status" + % status.context + ) + if not dryRun: + last_commit_obj.create_status( + "success", + description="OK", + target_url=status.target_url, + context=status.context, + ) + continue + if "success" in all_states: + lab_stats[cdata[-1]][-1] = "success" + if "error" in all_states: + if [c for c in all_states["error"] if ("/opt/" not in c)]: + lab_stats[cdata[-1]][-1] = "error" + print( + "Final Status:", + status.context, + cdata[-1], + lab_stats[cdata[-1]][-1], + status.description, + ) + if (lab_stats[cdata[-1]][-1] != "pending") and ( + not status.description.startswith("Finished") + ): + if result_url: + url = ( + result_url.replace( + "/SDT/jenkins-artifacts/", + "/SDT/cgi-bin/get_pr_results/jenkins-artifacts/", + ) + + "/pr-result" + ) + print("PR Result:", url) + e, o = run_cmd("curl -k -s -L --max-time 60 %s" % url) + if e: + print(o) + raise Exception("System-error: unable to get PR result") + if o and (not dryRun): + res = "+1" + if lab_stats[cdata[-1]][-1] == "error": + res = "-1" + res = "%s\n\n%s" % (res, o) + issue.create_comment(res) + if not dryRun: + last_commit_obj.create_status( + "success", + description="Finished", + target_url=status.target_url, + context=status.context, + ) + print("Lab Status", lab_stats) + lab_state = "required" + if lab_state not in lab_stats: + lab_state = "optional" + if (lab_state in lab_stats) and ("pending" not in lab_stats[lab_state]): + signatures["tests"] = "approved" + if "error" in lab_stats[lab_state]: + signatures["tests"] = "rejected" + elif not bot_status: + if not dryRun: + last_commit_obj.create_status( + "pending", + description="Waiting for authorized user to issue the test command.", + context=bot_status_name, + ) + else: + print( + "DryRun: Setting status Waiting for authorized user to issue the test command." + ) + + # Labels coming from signature. + labels = [] + for cat in signing_categories: + l = cat + "-pending" + if cat in signatures: + l = cat + "-" + signatures[cat] + labels.append(l) + + if not issue.pull_request and len(signing_categories) == 0: + labels.append("pending-assignment") + if is_hold: + labels.append("hold") + + if "backport" in extra_labels: + if backport_pr_num != extra_labels["backport"][1]: + try: + bp_pr = repo.get_pull(int(extra_labels["backport"][1])) + backport_pr_num = extra_labels["backport"][1] + if bp_pr.merged: + extra_labels["backport"][0] = "backport-ok" + except Exception as e: + print("Error: Unknown PR", backport_pr_num, "\n", e) + backport_pr_num = "" + extra_labels.pop("backport") + + if already_seen: + if dryRun: + print("Update PR seen message to include backport PR number", backport_pr_num) + else: + new_msg = "" + for l in already_seen.body.encode("ascii", "ignore").decode().split("\n"): + if BACKPORT_STR in l: + continue + new_msg += l + "\n" + if backport_pr_num: + new_msg = "%s%s%s\n" % (new_msg, BACKPORT_STR, backport_pr_num) + already_seen.edit(body=new_msg) + elif "backport-ok" in old_labels: + extra_labels["backport"][0] = "backport-ok" + + # Add additional labels + for lab in extra_testers: + labels.append("allow-" + lab) + for lab in extra_labels: + if lab != "mtype": + labels.append(extra_labels[lab][0]) + else: + for slab in extra_labels[lab]: + labels.append(slab) + if comp_warnings: + labels.append("compilation-warnings") + + if cms_repo and issue.pull_request and (not new_bot_tests): + if comparison_done: + labels.append("comparison-available") + elif comparison_notrun: + labels.append("comparison-notrun") else: - new_msg = "" - for l in already_seen.body.encode("ascii", "ignore").decode().split("\n"): - if BACKPORT_STR in l: continue - new_msg += l+"\n" - if backport_pr_num: new_msg="%s%s%s\n" % (new_msg, BACKPORT_STR, backport_pr_num) - already_seen.edit(body=new_msg) - elif ("backport-ok" in old_labels): - extra_labels["backport"][0]="backport-ok" - - # Add additional labels - for lab in extra_testers: labels.append("allow-"+lab) - for lab in extra_labels: - if lab != "mtype": - labels.append(extra_labels[lab][0]) + labels.append("comparison-pending") + + if ("PULL_REQUESTS" in global_test_params) or cmssw_prs: + need_external = True + # Now updated the labels. + xlabs = ["backport", "urgent", "backport-ok", "compilation-warnings"] + for lab in TYPE_COMMANDS: + xlabs.append(lab) + + if set(labels).intersection(set("tests-" + x for x in TEST_IGNORE_REASON)): + labels.append("tests-approved") + + missingApprovals = [ + x + for x in labels + if not x.endswith("-approved") + and not x.startswith("orp") + and not x.startswith("tests") + and not x.startswith("pending-assignment") + and not x.startswith("comparison") + and not x.startswith("code-checks") + and not x.startswith("allow-") + and not x in xlabs + ] + + if not missingApprovals: + print("The pull request is complete.") + if missingApprovals: + labels.append("pending-signatures") + elif not "pending-assignment" in labels: + labels.append("fully-signed") + if need_external: + labels.append("requires-external") + labels = set(labels) + print("New Labels:", sorted(labels)) + + new_categories = set([]) + for nc_lab in pkg_categories: + ncat = [nc_lab for oc_lab in old_labels if oc_lab.startswith(nc_lab + "-")] + if ncat: + continue + new_categories.add(nc_lab) + + if new_assign_cats: + new_l2s = [ + gh_user_char + name + for name, l2_categories in list(CMSSW_L2.items()) + for signature in new_assign_cats + if signature in l2_categories + ] + if not dryRun: + issue.create_comment( + "New categories assigned: " + + ",".join(new_assign_cats) + + "\n\n" + + ",".join(new_l2s) + + " you have been requested to review this Pull request/Issue and eventually sign? Thanks" + ) + + # update blocker massge + if new_blocker: + if not dryRun: + issue.create_comment( + HOLD_MSG + + blockers + + "\nThey need to issue an `unhold` command to remove the `hold` state or L1 can `unhold` it for all" + ) + print("Blockers:", blockers) + + print("Changed Labels:", labels - old_labels, old_labels - labels) + if old_labels == labels: + print("Labels unchanged.") + elif not dryRunOrig: + add_labels = True + try: + add_labels = repo_config.ADD_LABELS + except: + pass + if add_labels: + issue.edit(labels=list(labels)) + + # Check if it needs to be automatically closed. + if mustClose: + if issue.state == "open": + print("This pull request must be closed.") + if not dryRunOrig: + issue.edit(state="closed") + elif reOpen: + if issue.state == "closed": + print("This pull request must be reopened.") + if not dryRunOrig: + issue.edit(state="open") + + if not issue.pull_request: + issueMessage = None + if not already_seen: + backport_msg = "" + if backport_pr_num: + backport_msg = "%s%s\n" % (BACKPORT_STR, backport_pr_num) + uname = "" + if issue.user.name: + uname = issue.user.name.encode("ascii", "ignore").decode() + l2s = ", ".join([gh_user_char + name for name in CMSSW_ISSUES_TRACKERS]) + issueMessage = format( + "%(msgPrefix)s %(gh_user_char)s%(user)s" + " %(name)s.\n\n" + "%(l2s)s can you please review it and eventually sign/assign?" + " Thanks.\n\n" + 'cms-bot commands are listed here\n%(backport_msg)s', + msgPrefix=NEW_ISSUE_PREFIX, + user=requestor, + gh_user_char=gh_user_char, + name=uname, + backport_msg=backport_msg, + l2s=l2s, + ) + elif ("fully-signed" in labels) and (not "fully-signed" in old_labels): + issueMessage = "This issue is fully signed and ready to be closed." + print("Issue Message:", issueMessage) + if issueMessage and not dryRun: + issue.create_comment(issueMessage) + return + + # get release managers + SUPER_USERS = read_repo_file(repo_config, "super-users.yaml", []) + releaseManagersList = ", ".join([gh_user_char + x for x in set(releaseManagers + SUPER_USERS)]) + + if cmssw_prs: + global_test_params["PULL_REQUESTS"] = cmssw_prs + if extra_wfs: + global_test_params["MATRIX_EXTRAS"] = extra_wfs + if release_queue: + global_test_params["RELEASE_FORMAT"] = release_queue + if not "PULL_REQUESTS" in global_test_params: + global_test_params["PULL_REQUESTS"] = "%s#%s" % (repository, prId) else: - for slab in extra_labels[lab]: - labels.append(slab) - if comp_warnings: labels.append("compilation-warnings") - - if cms_repo and issue.pull_request and (not new_bot_tests): - if comparison_done: - labels.append("comparison-available") - elif comparison_notrun: - labels.append("comparison-notrun") + global_test_params["PULL_REQUESTS"] = "%s#%s %s" % ( + repository, + prId, + global_test_params["PULL_REQUESTS"], + ) + if ignore_tests: + if ignore_tests == "NONE": + ignore_tests = "" + global_test_params["IGNORE_BOT_TESTS"] = ignore_tests + if enable_tests: + if enable_tests == "NONE": + enable_tests = "" + global_test_params["ENABLE_BOT_TESTS"] = enable_tests + if release_arch: + global_test_params["ARCHITECTURE_FILTER"] = release_arch + global_test_params["EXTRA_RELVALS_TESTS"] = " ".join( + [t.upper().replace("-", "_") for t in EXTRA_RELVALS_TESTS] + ) + + print("All Parameters:", global_test_params) + # For now, only trigger tests for cms-sw/cmssw and cms-sw/cmsdist + if create_test_property: + global_test_params["CONTEXT_PREFIX"] = cms_status_prefix + if trigger_test: + create_properties_file_tests( + repository, prId, global_test_params, dryRun, abort=False, repo_config=repo_config + ) + if not dryRun: + set_comment_emoji(test_comment.id, repository) + elif abort_test and bot_status and (not bot_status.description.startswith("Aborted")): + if not has_user_emoji(abort_test, repository, "+1", cmsbuild_user): + create_properties_file_tests( + repository, prId, global_test_params, dryRun, abort=True + ) + if not dryRun: + set_comment_emoji(abort_test.id, repository) + last_commit_obj.create_status( + "pending", + description="Aborted, waiting for authorized user to issue the test command.", + target_url=abort_test.html_url, + context=bot_status_name, + ) + + # Do not complain about tests + requiresTestMessage = " after it passes the integration tests" + if "tests-approved" in labels: + requiresTestMessage = " (tests are also fine)" + elif "tests-rejected" in labels: + requiresTestMessage = " (but tests are reportedly failing)" + elif labels.intersection(set("tests-" + x for x in TEST_IGNORE_REASON)): + requiresTestMessage = " (test failures were overridden)" + + autoMergeMsg = "" + if ( + ("fully-signed" in labels) + and ("tests-approved" in labels) + and ((not "orp" in signatures) or (signatures["orp"] == "approved")) + ): + autoMergeMsg = "This pull request will be automatically merged." else: - labels.append("comparison-pending") - - if ('PULL_REQUESTS' in global_test_params) or cmssw_prs: - need_external = True - # Now updated the labels. - xlabs = ["backport", "urgent", "backport-ok", "compilation-warnings"] - for lab in TYPE_COMMANDS: xlabs.append(lab) - missingApprovals = [x - for x in labels - if not x.endswith("-approved") - and not x.startswith("orp") - and not x.startswith("tests") - and not x.startswith("pending-assignment") - and not x.startswith("comparison") - and not x.startswith("code-checks") - and not x.startswith("allow-") - and not x in xlabs] - - if not missingApprovals: - print("The pull request is complete.") - if missingApprovals: - labels.append("pending-signatures") - elif not "pending-assignment" in labels: - labels.append("fully-signed") - if need_external: labels.append("requires-external") - labels = set(labels) - print("New Labels:", sorted(labels)) - - new_categories = set ([]) - for nc_lab in pkg_categories: - ncat = [ nc_lab for oc_lab in old_labels if oc_lab.startswith(nc_lab+'-') ] - if ncat: continue - new_categories.add(nc_lab) - - if new_assign_cats: - new_l2s = [gh_user_char + name - for name, l2_categories in list(CMSSW_L2.items()) - for signature in new_assign_cats - if signature in l2_categories] - if not dryRun: issue.create_comment("New categories assigned: "+",".join(new_assign_cats)+"\n\n"+",".join(new_l2s)+" you have been requested to review this Pull request/Issue and eventually sign? Thanks") - - #update blocker massge - if new_blocker: - if not dryRun: issue.create_comment(HOLD_MSG+blockers+'\nThey need to issue an `unhold` command to remove the `hold` state or L1 can `unhold` it for all') - print("Blockers:",blockers) - - print("Changed Labels:",labels-old_labels,old_labels-labels) - if old_labels == labels: - print("Labels unchanged.") - elif not dryRunOrig: - add_labels = True - try: add_labels = repo_config.ADD_LABELS - except: pass - if add_labels: issue.edit(labels=list(labels)) - - # Check if it needs to be automatically closed. - if mustClose: - if issue.state == "open": - print("This pull request must be closed.") - if not dryRunOrig: issue.edit(state="closed") - elif reOpen: - if issue.state == "closed": - print("This pull request must be reopened.") - if not dryRunOrig: issue.edit(state="open") - - if not issue.pull_request: - issueMessage = None - if not already_seen: - backport_msg="" - if backport_pr_num: backport_msg="%s%s\n" % (BACKPORT_STR,backport_pr_num) - uname = "" - if issue.user.name: uname = issue.user.name.encode("ascii", "ignore").decode() - l2s = ", ".join([ gh_user_char + name for name in CMSSW_ISSUES_TRACKERS ]) - issueMessage = format("%(msgPrefix)s %(gh_user_char)s%(user)s" - " %(name)s.\n\n" - "%(l2s)s can you please review it and eventually sign/assign?" - " Thanks.\n\n" - "cms-bot commands are listed here\n%(backport_msg)s", - msgPrefix=NEW_ISSUE_PREFIX, - user=requestor, - gh_user_char=gh_user_char, - name=uname, - backport_msg=backport_msg, - l2s=l2s) - elif ("fully-signed" in labels) and (not "fully-signed" in old_labels): - issueMessage = "This issue is fully signed and ready to be closed." - print("Issue Message:",issueMessage) - if issueMessage and not dryRun: issue.create_comment(issueMessage) - return - - # get release managers - SUPER_USERS = read_repo_file(repo_config, "super-users.yaml", []) - releaseManagersList = ", ".join([gh_user_char + x for x in set(releaseManagers + SUPER_USERS)]) - - if cmssw_prs: - global_test_params['PULL_REQUESTS'] = cmssw_prs - if extra_wfs: - global_test_params['MATRIX_EXTRAS'] = extra_wfs - if release_queue: - global_test_params['RELEASE_FORMAT'] = release_queue - if not 'PULL_REQUESTS' in global_test_params: - global_test_params['PULL_REQUESTS'] = '%s#%s' % (repository, prId) - else: - global_test_params['PULL_REQUESTS'] = '%s#%s %s' % (repository, prId, global_test_params['PULL_REQUESTS']) - if ignore_tests: - if ignore_tests == 'NONE': ignore_tests = '' - global_test_params['IGNORE_BOT_TESTS'] = ignore_tests - if enable_tests: - if enable_tests == 'NONE': enable_tests = '' - global_test_params['ENABLE_BOT_TESTS'] = enable_tests - if release_arch: - global_test_params['ARCHITECTURE_FILTER'] = release_arch - global_test_params['EXTRA_RELVALS_TESTS'] = " ".join([ t.upper().replace("-", "_") for t in EXTRA_RELVALS_TESTS]) - - print("All Parameters:",global_test_params) - #For now, only trigger tests for cms-sw/cmssw and cms-sw/cmsdist - if create_test_property: - global_test_params["CONTEXT_PREFIX"] = cms_status_prefix - if trigger_test: - create_properties_file_tests(repository, prId, global_test_params, dryRun, abort=False, repo_config=repo_config) - if not dryRun: - set_comment_emoji(test_comment.id, repository) - elif abort_test and bot_status and (not bot_status.description.startswith("Aborted")): - if not has_user_emoji(abort_test, repository, "+1", cmsbuild_user): - create_properties_file_tests(repository, prId, global_test_params, dryRun, abort=True) + if is_hold: + autoMergeMsg = format( + "This PR is put on hold by %(blockers)s. They have" + " to `unhold` to remove the `hold` state or" + " %(managers)s will have to `merge` it by" + " hand.", + blockers=blockers, + managers=releaseManagersList, + ) + elif "new-package-pending" in labels: + autoMergeMsg = format( + "This pull request requires a new package and " + " will not be merged. %(managers)s", + managers=releaseManagersList, + ) + elif ("orp" in signatures) and (signatures["orp"] != "approved"): + autoMergeMsg = format( + "This pull request will now be reviewed by the release team" + " before it's merged. %(managers)s (and backports should be raised in the release meeting by the corresponding L2)", + managers=releaseManagersList, + ) + + devReleaseRelVal = "" + if (pr.base.ref in RELEASE_BRANCH_PRODUCTION) and (pr.base.ref != "master"): + devReleaseRelVal = ( + " and once validation in the development release cycle " + + CMSSW_DEVEL_BRANCH + + " is complete" + ) + + if ("fully-signed" in labels) and (not "fully-signed" in old_labels): + messageFullySigned = format( + "This pull request is fully signed and it will be" + " integrated in one of the next %(branch)s IBs" + "%(requiresTest)s" + "%(devReleaseRelVal)s." + " %(autoMerge)s", + requiresTest=requiresTestMessage, + autoMerge=autoMergeMsg, + devReleaseRelVal=devReleaseRelVal, + branch=pr.base.ref, + ) + print("Fully signed message updated") if not dryRun: - set_comment_emoji(abort_test.id, repository) - last_commit_obj.create_status("pending", description="Aborted, waiting for authorized user to issue the test command.", target_url=abort_test.html_url, context=bot_status_name) - - # Do not complain about tests - requiresTestMessage = " after it passes the integration tests" - if "tests-approved" in labels: - requiresTestMessage = " (tests are also fine)" - elif "tests-rejected" in labels: - requiresTestMessage = " (but tests are reportedly failing)" - - autoMergeMsg = "" - if (("fully-signed" in labels) and ("tests-approved" in labels) and - ((not "orp" in signatures) or (signatures["orp"] == "approved"))): - autoMergeMsg = "This pull request will be automatically merged." - else: - if is_hold: - autoMergeMsg = format("This PR is put on hold by %(blockers)s. They have" - " to `unhold` to remove the `hold` state or" - " %(managers)s will have to `merge` it by" - " hand.", - blockers=blockers, - managers=releaseManagersList) - elif "new-package-pending" in labels: - autoMergeMsg = format("This pull request requires a new package and " - " will not be merged. %(managers)s", - managers=releaseManagersList) - elif ("orp" in signatures) and (signatures["orp"] != "approved"): - autoMergeMsg = format("This pull request will now be reviewed by the release team" - " before it's merged. %(managers)s (and backports should be raised in the release meeting by the corresponding L2)", - managers=releaseManagersList) - - devReleaseRelVal = "" - if (pr.base.ref in RELEASE_BRANCH_PRODUCTION) and (pr.base.ref != "master"): - devReleaseRelVal = " and once validation in the development release cycle "+CMSSW_DEVEL_BRANCH+" is complete" - - if ("fully-signed" in labels) and (not "fully-signed" in old_labels): - messageFullySigned = format("This pull request is fully signed and it will be" - " integrated in one of the next %(branch)s IBs" - "%(requiresTest)s" - "%(devReleaseRelVal)s." - " %(autoMerge)s", - requiresTest=requiresTestMessage, - autoMerge = autoMergeMsg, - devReleaseRelVal=devReleaseRelVal, - branch=pr.base.ref) - print("Fully signed message updated") - if not dryRun: issue.create_comment(messageFullySigned) - - unsigned = [k for (k, v) in list(signatures.items()) if v == "pending"] - missing_notifications = [gh_user_char + name - for name, l2_categories in list(CMSSW_L2.items()) - for signature in signing_categories - if signature in l2_categories - and signature in unsigned and signature not in ["orp"] ] - - missing_notifications = set(missing_notifications) - # Construct message for the watchers - watchersMsg = "" - if watchers: - watchersMsg = format("%(watchers)s this is something you requested to" - " watch as well.\n", - watchers=", ".join(watchers)) - # Construct message for the release managers. - managers = ", ".join([gh_user_char + x for x in releaseManagers]) - - releaseManagersMsg = "" - if releaseManagers: - releaseManagersMsg = format("%(managers)s you are the release manager for this.\n", - managers = managers) - - # Add a Warning if the pull request was done against a patch branch - if cmssw_repo: - warning_msg = '' - if 'patchX' in pr.base.ref: - print('Must warn that this is a patch branch') - base_release = pr.base.ref.replace( '_patchX', '' ) - base_release_branch = re.sub( '[0-9]+$', 'X', base_release ) - warning_msg = format("Note that this branch is designed for requested bug " - "fixes specific to the %(base_rel)s release.\nIf you " - "wish to make a pull request for the %(base_branch)s " - "release cycle, please use the %(base_branch)s branch instead\n", - base_rel=base_release, - base_branch=base_release_branch) - - # We do not want to spam people for the old pull requests. - pkg_msg = [] - for pkg in packages: - if pkg in package_categories: - pkg_msg.append("- %s (**%s**)" % (pkg, ", ".join(package_categories[pkg]))) + issue.create_comment(messageFullySigned) + + unsigned = [k for (k, v) in list(signatures.items()) if v == "pending"] + missing_notifications = [ + gh_user_char + name + for name, l2_categories in list(CMSSW_L2.items()) + for signature in signing_categories + if signature in l2_categories and signature in unsigned and signature not in ["orp"] + ] + + missing_notifications = set(missing_notifications) + # Construct message for the watchers + watchersMsg = "" + if watchers: + watchersMsg = format( + "%(watchers)s this is something you requested to" " watch as well.\n", + watchers=", ".join(watchers), + ) + # Construct message for the release managers. + managers = ", ".join([gh_user_char + x for x in releaseManagers]) + + releaseManagersMsg = "" + if releaseManagers: + releaseManagersMsg = format( + "%(managers)s you are the release manager for this.\n", managers=managers + ) + + # Add a Warning if the pull request was done against a patch branch + if cmssw_repo: + warning_msg = "" + if "patchX" in pr.base.ref: + print("Must warn that this is a patch branch") + base_release = pr.base.ref.replace("_patchX", "") + base_release_branch = re.sub("[0-9]+$", "X", base_release) + warning_msg = format( + "Note that this branch is designed for requested bug " + "fixes specific to the %(base_rel)s release.\nIf you " + "wish to make a pull request for the %(base_branch)s " + "release cycle, please use the %(base_branch)s branch instead\n", + base_rel=base_release, + base_branch=base_release_branch, + ) + + # We do not want to spam people for the old pull requests. + pkg_msg = [] + for pkg in packages: + if pkg in package_categories: + pkg_msg.append("- %s (**%s**)" % (pkg, ", ".join(package_categories[pkg]))) + else: + pkg_msg.append("- %s (**new**)" % pkg) + messageNewPR = format( + "%(msgPrefix)s %(gh_user_char)s%(user)s" + " %(name)s for %(branch)s.\n\n" + "It involves the following packages:\n\n" + "%(packages)s\n\n" + "%(new_package_message)s\n" + "%(l2s)s can you please review it and eventually sign?" + " Thanks.\n" + "%(watchers)s" + "%(releaseManagers)s" + "%(patch_branch_warning)s\n" + 'cms-bot commands are listed here\n', + msgPrefix=NEW_PR_PREFIX, + user=pr.user.login, + gh_user_char=gh_user_char, + name=pr.user.name and "(%s)" % pr.user.name or "", + branch=pr.base.ref, + l2s=", ".join(missing_notifications), + packages="\n".join(pkg_msg), + new_package_message=new_package_message, + watchers=watchersMsg, + releaseManagers=releaseManagersMsg, + patch_branch_warning=warning_msg, + ) + + messageUpdatedPR = format( + "Pull request #%(pr)s was updated." + " %(signers)s can you please check and sign again.\n", + pr=pr.number, + signers=", ".join(missing_notifications), + ) + else: + messageNewPR = format( + "%(msgPrefix)s %(gh_user_char)s%(user)s" + " %(name)s for branch %(branch)s.\n\n" + "%(l2s)s can you please review it and eventually sign?" + " Thanks.\n" + "%(watchers)s" + "%(releaseManagers)s" + 'cms-bot commands are listed here\n', + msgPrefix=NEW_PR_PREFIX, + user=pr.user.login, + gh_user_char=gh_user_char, + name=pr.user.name and "(%s)" % pr.user.name or "", + branch=pr.base.ref, + l2s=", ".join(missing_notifications), + releaseManagers=releaseManagersMsg, + watchers=watchersMsg, + ) + + messageUpdatedPR = format("Pull request #%(pr)s was updated.", pr=pr.number) + + # Finally decide whether or not we should close the pull request: + messageBranchClosed = format( + "This branch is closed for updates." + " Closing this pull request.\n" + " Please bring this up in the ORP" + " meeting if really needed.\n" + ) + + commentMsg = "" + print("Status: Not see= %s, Updated: %s" % (already_seen, pull_request_updated)) + if is_closed_branch(pr.base.ref) and (pr.state != "closed"): + commentMsg = messageBranchClosed + elif (not already_seen) or pull_request_updated: + if not already_seen: + commentMsg = messageNewPR else: - pkg_msg.append("- %s (**new**)" % pkg) - messageNewPR = format("%(msgPrefix)s %(gh_user_char)s%(user)s" - " %(name)s for %(branch)s.\n\n" - "It involves the following packages:\n\n" - "%(packages)s\n\n" - "%(new_package_message)s\n" - "%(l2s)s can you please review it and eventually sign?" - " Thanks.\n" - "%(watchers)s" - "%(releaseManagers)s" - "%(patch_branch_warning)s\n" - "cms-bot commands are listed here\n", - msgPrefix=NEW_PR_PREFIX, - user=pr.user.login, - gh_user_char=gh_user_char, - name=pr.user.name and "(%s)" % pr.user.name or "", - branch=pr.base.ref, - l2s=", ".join(missing_notifications), - packages="\n".join(pkg_msg), - new_package_message=new_package_message, - watchers=watchersMsg, - releaseManagers=releaseManagersMsg, - patch_branch_warning=warning_msg) - - messageUpdatedPR = format("Pull request #%(pr)s was updated." - " %(signers)s can you please check and sign again.\n", - pr=pr.number, - signers=", ".join(missing_notifications)) - else: - messageNewPR = format("%(msgPrefix)s %(gh_user_char)s%(user)s" - " %(name)s for branch %(branch)s.\n\n" - "%(l2s)s can you please review it and eventually sign?" - " Thanks.\n" - "%(watchers)s" - "%(releaseManagers)s" - "cms-bot commands are listed here\n", - msgPrefix=NEW_PR_PREFIX, - user=pr.user.login, - gh_user_char=gh_user_char, - name=pr.user.name and "(%s)" % pr.user.name or "", - branch=pr.base.ref, - l2s=", ".join(missing_notifications), - releaseManagers=releaseManagersMsg, - watchers=watchersMsg) - - messageUpdatedPR = format("Pull request #%(pr)s was updated.", - pr=pr.number) - - # Finally decide whether or not we should close the pull request: - messageBranchClosed = format("This branch is closed for updates." - " Closing this pull request.\n" - " Please bring this up in the ORP" - " meeting if really needed.\n") - - commentMsg = "" - print("Status: Not see= %s, Updated: %s" % (already_seen, pull_request_updated)) - if is_closed_branch(pr.base.ref) and (pr.state != "closed"): - commentMsg = messageBranchClosed - elif (not already_seen) or pull_request_updated: - if not already_seen: commentMsg = messageNewPR - else: commentMsg = messageUpdatedPR - elif new_categories: - commentMsg = messageUpdatedPR - elif not missingApprovals: - print("Pull request is already fully signed. Not sending message.") - else: - print("Already notified L2 about " + str(pr.number)) - if commentMsg and not dryRun: - print("The following comment will be made:") - try: - print(commentMsg.decode("ascii", "replace")) - except: - pass - for pre_check in pre_checks+extra_pre_checks: - if pre_check not in signatures: signatures[pre_check] = "pending" - print("PRE CHECK: %s,%s,%s" % (pre_check, signatures[pre_check], pre_checks_state[pre_check])) - if signatures[pre_check]!="pending": - if pre_checks_state[pre_check] in ["pending", ""]: - state = "success" if signatures[pre_check]=="approved" else "error" - url = pre_checks_url[pre_check] - print("Setting status: %s,%s,%s" % (pre_check, state, url)) - if not dryRunOrig: - last_commit_obj.create_status(state, target_url=url, description="Check details", context="%s/%s" % (cms_status_prefix, pre_check)) - continue - if (not dryRunOrig) and (pre_checks_state[pre_check]==""): - params = {"PULL_REQUEST" : "%s" % (prId), "CONTEXT_PREFIX": cms_status_prefix} - if pre_check=="code-checks": - params["CMSSW_TOOL_CONF"] = code_checks_tools - params["APPLY_PATCH"] = str(code_check_apply_patch).lower() - create_properties_file_tests(repository, prId, params, dryRunOrig, abort=False, req_type=pre_check) - last_commit_obj.create_status("pending", description="%s requested" % pre_check, context="%s/%s" % (cms_status_prefix, pre_check)) + commentMsg = messageUpdatedPR + elif new_categories: + commentMsg = messageUpdatedPR + elif not missingApprovals: + print("Pull request is already fully signed. Not sending message.") + else: + print("Already notified L2 about " + str(pr.number)) + if commentMsg and not dryRun: + print("The following comment will be made:") + try: + print(commentMsg.decode("ascii", "replace")) + except: + pass + for pre_check in pre_checks + extra_pre_checks: + if pre_check not in signatures: + signatures[pre_check] = "pending" + print( + "PRE CHECK: %s,%s,%s" % (pre_check, signatures[pre_check], pre_checks_state[pre_check]) + ) + if signatures[pre_check] != "pending": + if pre_checks_state[pre_check] in ["pending", ""]: + state = "success" if signatures[pre_check] == "approved" else "error" + url = pre_checks_url[pre_check] + print("Setting status: %s,%s,%s" % (pre_check, state, url)) + if not dryRunOrig: + last_commit_obj.create_status( + state, + target_url=url, + description="Check details", + context="%s/%s" % (cms_status_prefix, pre_check), + ) + continue + if (not dryRunOrig) and (pre_checks_state[pre_check] == ""): + params = {"PULL_REQUEST": "%s" % (prId), "CONTEXT_PREFIX": cms_status_prefix} + if pre_check == "code-checks": + params["CMSSW_TOOL_CONF"] = code_checks_tools + params["APPLY_PATCH"] = str(code_check_apply_patch).lower() + create_properties_file_tests( + repository, prId, params, dryRunOrig, abort=False, req_type=pre_check + ) + last_commit_obj.create_status( + "pending", + description="%s requested" % pre_check, + context="%s/%s" % (cms_status_prefix, pre_check), + ) + else: + print("Dryrun: Setting pending status for %s" % pre_check) + + if commentMsg and not dryRun: + issue.create_comment(commentMsg) + + # Check if it needs to be automatically merged. + if all( + [ + "fully-signed" in labels, + "tests-approved" in labels, + "orp-approved" in labels, + not "hold" in labels, + not "new-package-pending" in labels, + ] + ): + print("This pull request can be automatically merged") + mustMerge = True else: - print("Dryrun: Setting pending status for %s" % pre_check) - - if commentMsg and not dryRun: - issue.create_comment(commentMsg) - - # Check if it needs to be automatically merged. - if all(["fully-signed" in labels, - "tests-approved" in labels, - "orp-approved" in labels, - not "hold" in labels, - not "new-package-pending" in labels]): - print("This pull request can be automatically merged") - mustMerge = True - else: - print("This pull request will not be automatically merged.") - if mustMerge == True: - print("This pull request must be merged.") - if not dryRun and (pr.state == "open"): pr.merge() - - state = get_status(bot_test_param_name, commit_statuses) - if len(test_params_msg)>140: test_params_msg=test_params_msg[:135]+"..." - if ((not state) and (test_params_msg!="")) or (state and state.description != test_params_msg): - if test_params_msg=="": test_params_msg="No special test parameter set." - print("Test params:",test_params_msg) - url = "" - if test_params_comment: - e = get_user_emoji(test_params_comment, repository, cmsbuild_user) - print(e) - if not dryRun: - emoji = "-1" if 'ERRORS: ' in test_params_msg else "+1" - if e and (e['content']!=emoji): - delete_comment_emoji(str(e['id']), test_params_comment.id, repository) - state = "success" if emoji=="+1" else "error" - last_commit_obj.create_status(state, description=test_params_msg, target_url=test_params_comment.html_url, context=bot_test_param_name) - if (not e) or (e['content']!=emoji): - set_comment_emoji(test_params_comment.id, repository, emoji=emoji) - if ack_comment: - state = get_status(bot_ack_name, commit_statuses) - if (not state) or (state.target_url != ack_comment.html_url): - desc = "Comment by %s at %s UTC processed." % (ack_comment.user.login.encode("ascii", "ignore").decode(), ack_comment.created_at) - print(desc) - if not dryRun: - last_commit_obj.create_status("success", description=desc, target_url=ack_comment.html_url, context=bot_ack_name) + print("This pull request will not be automatically merged.") + if mustMerge == True: + print("This pull request must be merged.") + if not dryRun and (pr.state == "open"): + pr.merge() + + state = get_status(bot_test_param_name, commit_statuses) + if len(test_params_msg) > 140: + test_params_msg = test_params_msg[:135] + "..." + if ((not state) and (test_params_msg != "")) or ( + state and state.description != test_params_msg + ): + if test_params_msg == "": + test_params_msg = "No special test parameter set." + print("Test params:", test_params_msg) + url = "" + if test_params_comment: + e = get_user_emoji(test_params_comment, repository, cmsbuild_user) + print(e) + if not dryRun: + emoji = "-1" if "ERRORS: " in test_params_msg else "+1" + if e and (e["content"] != emoji): + delete_comment_emoji(str(e["id"]), test_params_comment.id, repository) + state = "success" if emoji == "+1" else "error" + last_commit_obj.create_status( + state, + description=test_params_msg, + target_url=test_params_comment.html_url, + context=bot_test_param_name, + ) + if (not e) or (e["content"] != emoji): + set_comment_emoji(test_params_comment.id, repository, emoji=emoji) + if ack_comment: + state = get_status(bot_ack_name, commit_statuses) + if (not state) or (state.target_url != ack_comment.html_url): + desc = "Comment by %s at %s UTC processed." % ( + ack_comment.user.login.encode("ascii", "ignore").decode(), + ack_comment.created_at, + ) + print(desc) + if not dryRun: + last_commit_obj.create_status( + "success", + description=desc, + target_url=ack_comment.html_url, + context=bot_ack_name, + ) diff --git a/python/archived_argparse.py b/python/archived_argparse.py index bcea63c77a5a..10f057c0d611 100644 --- a/python/archived_argparse.py +++ b/python/archived_argparse.py @@ -64,29 +64,29 @@ still considered an implementation detail.) """ -__version__ = '1.4.0' # we use our own version number independant of the - # one in stdlib and we release this on pypi. +__version__ = "1.4.0" # we use our own version number independant of the +# one in stdlib and we release this on pypi. __external_lib__ = True # to make sure the tests really test THIS lib, - # not the builtin one in Python stdlib +# not the builtin one in Python stdlib __all__ = [ - 'ArgumentParser', - 'ArgumentError', - 'ArgumentTypeError', - 'FileType', - 'HelpFormatter', - 'ArgumentDefaultsHelpFormatter', - 'RawDescriptionHelpFormatter', - 'RawTextHelpFormatter', - 'Namespace', - 'Action', - 'ONE_OR_MORE', - 'OPTIONAL', - 'PARSER', - 'REMAINDER', - 'SUPPRESS', - 'ZERO_OR_MORE', + "ArgumentParser", + "ArgumentError", + "ArgumentTypeError", + "FileType", + "HelpFormatter", + "ArgumentDefaultsHelpFormatter", + "RawDescriptionHelpFormatter", + "RawTextHelpFormatter", + "Namespace", + "Action", + "ONE_OR_MORE", + "OPTIONAL", + "PARSER", + "REMAINDER", + "SUPPRESS", + "ZERO_OR_MORE", ] @@ -122,22 +122,23 @@ def sorted(iterable, reverse=False): def _callable(obj): - return hasattr(obj, '__call__') or hasattr(obj, '__bases__') + return hasattr(obj, "__call__") or hasattr(obj, "__bases__") -SUPPRESS = '==SUPPRESS==' +SUPPRESS = "==SUPPRESS==" -OPTIONAL = '?' -ZERO_OR_MORE = '*' -ONE_OR_MORE = '+' -PARSER = 'A...' -REMAINDER = '...' -_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' +OPTIONAL = "?" +ZERO_OR_MORE = "*" +ONE_OR_MORE = "+" +PARSER = "A..." +REMAINDER = "..." +_UNRECOGNIZED_ARGS_ATTR = "_unrecognized_args" # ============================= # Utility functions and classes # ============================= + class _AttributeHolder(object): """Abstract base class that provides __repr__. @@ -153,8 +154,8 @@ def __repr__(self): for arg in self._get_args(): arg_strings.append(repr(arg)) for name, value in self._get_kwargs(): - arg_strings.append('%s=%r' % (name, value)) - return '%s(%s)' % (type_name, ', '.join(arg_strings)) + arg_strings.append("%s=%r" % (name, value)) + return "%s(%s)" % (type_name, ", ".join(arg_strings)) def _get_kwargs(self): return sorted(self.__dict__.items()) @@ -173,6 +174,7 @@ def _ensure_value(namespace, name, value): # Formatting Help # =============== + class HelpFormatter(object): """Formatter for generating usage messages and argument help strings. @@ -180,16 +182,11 @@ class HelpFormatter(object): provided by the class are considered an implementation detail. """ - def __init__(self, - prog, - indent_increment=2, - max_help_position=24, - width=None): - + def __init__(self, prog, indent_increment=2, max_help_position=24, width=None): # default setting for width if width is None: try: - width = int(_os.environ['COLUMNS']) + width = int(_os.environ["COLUMNS"]) except (KeyError, ValueError): width = 80 width -= 2 @@ -206,8 +203,8 @@ def __init__(self, self._root_section = self._Section(self, None) self._current_section = self._root_section - self._whitespace_matcher = _re.compile(r'\s+') - self._long_break_matcher = _re.compile(r'\n\n\n+') + self._whitespace_matcher = _re.compile(r"\s+") + self._long_break_matcher = _re.compile(r"\n\n\n+") # =============================== # Section and indentation methods @@ -218,11 +215,10 @@ def _indent(self): def _dedent(self): self._current_indent -= self._indent_increment - assert self._current_indent >= 0, 'Indent decreased below 0.' + assert self._current_indent >= 0, "Indent decreased below 0." self._level -= 1 class _Section(object): - def __init__(self, formatter, parent, heading=None): self.formatter = formatter self.parent = parent @@ -242,17 +238,17 @@ def format_help(self): # return nothing if the section was empty if not item_help: - return '' + return "" # add the heading if the section was non-empty if self.heading is not SUPPRESS and self.heading is not None: current_indent = self.formatter._current_indent - heading = '%*s%s:\n' % (current_indent, '', self.heading) + heading = "%*s%s:\n" % (current_indent, "", self.heading) else: - heading = '' + heading = "" # join the section-initial newline, the heading and the help - return join(['\n', heading, item_help, '\n']) + return join(["\n", heading, item_help, "\n"]) def _add_item(self, func, args): self._current_section.items.append((func, args)) @@ -281,7 +277,6 @@ def add_usage(self, usage, actions, groups, prefix=None): def add_argument(self, action): if action.help is not SUPPRESS: - # find all invocations get_invocation = self._format_action_invocation invocations = [get_invocation(action)] @@ -291,8 +286,7 @@ def add_argument(self, action): # update the maximum item length invocation_length = max([len(s) for s in invocations]) action_length = invocation_length + self._current_indent - self._action_max_length = max(self._action_max_length, - action_length) + self._action_max_length = max(self._action_max_length, action_length) # add the item to the list self._add_item(self._format_action, [action]) @@ -307,18 +301,16 @@ def add_arguments(self, actions): def format_help(self): help = self._root_section.format_help() if help: - help = self._long_break_matcher.sub('\n\n', help) - help = help.strip('\n') + '\n' + help = self._long_break_matcher.sub("\n\n", help) + help = help.strip("\n") + "\n" return help def _join_parts(self, part_strings): - return ''.join([part - for part in part_strings - if part and part is not SUPPRESS]) + return "".join([part for part in part_strings if part and part is not SUPPRESS]) def _format_usage(self, usage, actions, groups, prefix): if prefix is None: - prefix = _('usage: ') + prefix = _("usage: ") # if usage is specified, use that if usage is not None: @@ -326,11 +318,11 @@ def _format_usage(self, usage, actions, groups, prefix): # if no optionals or positionals are available, usage is just prog elif usage is None and not actions: - usage = '%(prog)s' % dict(prog=self._prog) + usage = "%(prog)s" % dict(prog=self._prog) # if optionals and positionals are available, calculate usage elif usage is None: - prog = '%(prog)s' % dict(prog=self._prog) + prog = "%(prog)s" % dict(prog=self._prog) # split optionals from positionals optionals = [] @@ -344,20 +336,19 @@ def _format_usage(self, usage, actions, groups, prefix): # build full usage string format = self._format_actions_usage action_usage = format(optionals + positionals, groups) - usage = ' '.join([s for s in [prog, action_usage] if s]) + usage = " ".join([s for s in [prog, action_usage] if s]) # wrap the usage parts if it's too long text_width = self._width - self._current_indent if len(prefix) + len(usage) > text_width: - # break usage into wrappable parts - part_regexp = r'\(.*?\)+|\[.*?\]+|\S+' + part_regexp = r"\(.*?\)+|\[.*?\]+|\S+" opt_usage = format(optionals, groups) pos_usage = format(positionals, groups) opt_parts = _re.findall(part_regexp, opt_usage) pos_parts = _re.findall(part_regexp, pos_usage) - assert ' '.join(opt_parts) == opt_usage - assert ' '.join(pos_parts) == pos_usage + assert " ".join(opt_parts) == opt_usage + assert " ".join(pos_parts) == pos_usage # helper for wrapping lines def get_lines(parts, indent, prefix=None): @@ -369,20 +360,20 @@ def get_lines(parts, indent, prefix=None): line_len = len(indent) - 1 for part in parts: if line_len + 1 + len(part) > text_width: - lines.append(indent + ' '.join(line)) + lines.append(indent + " ".join(line)) line = [] line_len = len(indent) - 1 line.append(part) line_len += len(part) + 1 if line: - lines.append(indent + ' '.join(line)) + lines.append(indent + " ".join(line)) if prefix is not None: - lines[0] = lines[0][len(indent):] + lines[0] = lines[0][len(indent) :] return lines # if prog is short, follow it with optionals or positionals if len(prefix) + len(prog) <= 0.75 * text_width: - indent = ' ' * (len(prefix) + len(prog) + 1) + indent = " " * (len(prefix) + len(prog) + 1) if opt_parts: lines = get_lines([prog] + opt_parts, indent, prefix) lines.extend(get_lines(pos_parts, indent)) @@ -393,7 +384,7 @@ def get_lines(parts, indent, prefix=None): # if prog is long, put it on its own line else: - indent = ' ' * len(prefix) + indent = " " * len(prefix) parts = opt_parts + pos_parts lines = get_lines(parts, indent) if len(lines) > 1: @@ -403,10 +394,10 @@ def get_lines(parts, indent, prefix=None): lines = [prog] + lines # join lines into usage - usage = '\n'.join(lines) + usage = "\n".join(lines) # prefix with 'usage:' - return '%s%s\n\n' % (prefix, usage) + return "%s%s\n\n" % (prefix, usage) def _format_actions_usage(self, actions, groups): # find group indices and identify actions in groups @@ -424,30 +415,29 @@ def _format_actions_usage(self, actions, groups): group_actions.add(action) if not group.required: if start in inserts: - inserts[start] += ' [' + inserts[start] += " [" else: - inserts[start] = '[' - inserts[end] = ']' + inserts[start] = "[" + inserts[end] = "]" else: if start in inserts: - inserts[start] += ' (' + inserts[start] += " (" else: - inserts[start] = '(' - inserts[end] = ')' + inserts[start] = "(" + inserts[end] = ")" for i in range(start + 1, end): - inserts[i] = '|' + inserts[i] = "|" # collect all actions format strings parts = [] for i, action in enumerate(actions): - # suppressed arguments are marked with None # remove | separators for suppressed arguments if action.help is SUPPRESS: parts.append(None) - if inserts.get(i) == '|': + if inserts.get(i) == "|": inserts.pop(i) - elif inserts.get(i + 1) == '|': + elif inserts.get(i + 1) == "|": inserts.pop(i + 1) # produce all arg strings @@ -456,7 +446,7 @@ def _format_actions_usage(self, actions, groups): # if it's in a group, strip the outer [] if action in group_actions: - if part[0] == '[' and part[-1] == ']': + if part[0] == "[" and part[-1] == "]": part = part[1:-1] # add the action string to the list @@ -469,18 +459,18 @@ def _format_actions_usage(self, actions, groups): # if the Optional doesn't take a value, format is: # -s or --long if action.nargs == 0: - part = '%s' % option_string + part = "%s" % option_string # if the Optional takes a value, format is: # -s ARGS or --long ARGS else: default = action.dest.upper() args_string = self._format_args(action, default) - part = '%s %s' % (option_string, args_string) + part = "%s %s" % (option_string, args_string) # make it look optional if it's not required or in a group if not action.required and action not in group_actions: - part = '[%s]' % part + part = "[%s]" % part # add the action string to the list parts.append(part) @@ -490,50 +480,49 @@ def _format_actions_usage(self, actions, groups): parts[i:i] = [inserts[i]] # join all the action items with spaces - text = ' '.join([item for item in parts if item is not None]) + text = " ".join([item for item in parts if item is not None]) # clean up separators for mutually exclusive groups - open = r'[\[(]' - close = r'[\])]' - text = _re.sub(r'(%s) ' % open, r'\1', text) - text = _re.sub(r' (%s)' % close, r'\1', text) - text = _re.sub(r'%s *%s' % (open, close), r'', text) - text = _re.sub(r'\(([^|]*)\)', r'\1', text) + open = r"[\[(]" + close = r"[\])]" + text = _re.sub(r"(%s) " % open, r"\1", text) + text = _re.sub(r" (%s)" % close, r"\1", text) + text = _re.sub(r"%s *%s" % (open, close), r"", text) + text = _re.sub(r"\(([^|]*)\)", r"\1", text) text = text.strip() # return the text return text def _format_text(self, text): - if '%(prog)' in text: + if "%(prog)" in text: text = text % dict(prog=self._prog) text_width = self._width - self._current_indent - indent = ' ' * self._current_indent - return self._fill_text(text, text_width, indent) + '\n\n' + indent = " " * self._current_indent + return self._fill_text(text, text_width, indent) + "\n\n" def _format_action(self, action): # determine the required width and the entry label - help_position = min(self._action_max_length + 2, - self._max_help_position) + help_position = min(self._action_max_length + 2, self._max_help_position) help_width = self._width - help_position action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) # ho nelp; start on same line and add a final newline if not action.help: - tup = self._current_indent, '', action_header - action_header = '%*s%s\n' % tup + tup = self._current_indent, "", action_header + action_header = "%*s%s\n" % tup # short action name; start on the same line and pad two spaces elif len(action_header) <= action_width: - tup = self._current_indent, '', action_width, action_header - action_header = '%*s%-*s ' % tup + tup = self._current_indent, "", action_width, action_header + action_header = "%*s%-*s " % tup indent_first = 0 # long action name; start on the next line else: - tup = self._current_indent, '', action_header - action_header = '%*s%s\n' % tup + tup = self._current_indent, "", action_header + action_header = "%*s%s\n" % tup indent_first = help_position # collect the pieces of the action help @@ -543,13 +532,13 @@ def _format_action(self, action): if action.help: help_text = self._expand_help(action) help_lines = self._split_lines(help_text, help_width) - parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) + parts.append("%*s%s\n" % (indent_first, "", help_lines[0])) for line in help_lines[1:]: - parts.append('%*s%s\n' % (help_position, '', line)) + parts.append("%*s%s\n" % (help_position, "", line)) # or add a newline if the description doesn't end with one - elif not action_header.endswith('\n'): - parts.append('\n') + elif not action_header.endswith("\n"): + parts.append("\n") # if there are any sub-actions, add their help as well for subaction in self._iter_indented_subactions(action): @@ -560,7 +549,7 @@ def _format_action(self, action): def _format_action_invocation(self, action): if not action.option_strings: - metavar, = self._metavar_formatter(action, action.dest)(1) + (metavar,) = self._metavar_formatter(action, action.dest)(1) return metavar else: @@ -577,16 +566,16 @@ def _format_action_invocation(self, action): default = action.dest.upper() args_string = self._format_args(action, default) for option_string in action.option_strings: - parts.append('%s %s' % (option_string, args_string)) + parts.append("%s %s" % (option_string, args_string)) - return ', '.join(parts) + return ", ".join(parts) def _metavar_formatter(self, action, default_metavar): if action.metavar is not None: result = action.metavar elif action.choices is not None: choice_strs = [str(choice) for choice in action.choices] - result = '{%s}' % ','.join(choice_strs) + result = "{%s}" % ",".join(choice_strs) else: result = default_metavar @@ -594,26 +583,27 @@ def format(tuple_size): if isinstance(result, tuple): return result else: - return (result, ) * tuple_size + return (result,) * tuple_size + return format def _format_args(self, action, default_metavar): get_metavar = self._metavar_formatter(action, default_metavar) if action.nargs is None: - result = '%s' % get_metavar(1) + result = "%s" % get_metavar(1) elif action.nargs == OPTIONAL: - result = '[%s]' % get_metavar(1) + result = "[%s]" % get_metavar(1) elif action.nargs == ZERO_OR_MORE: - result = '[%s [%s ...]]' % get_metavar(2) + result = "[%s [%s ...]]" % get_metavar(2) elif action.nargs == ONE_OR_MORE: - result = '%s [%s ...]' % get_metavar(2) + result = "%s [%s ...]" % get_metavar(2) elif action.nargs == REMAINDER: - result = '...' + result = "..." elif action.nargs == PARSER: - result = '%s ...' % get_metavar(1) + result = "%s ..." % get_metavar(1) else: - formats = ['%s' for _ in range(action.nargs)] - result = ' '.join(formats) % get_metavar(action.nargs) + formats = ["%s" for _ in range(action.nargs)] + result = " ".join(formats) % get_metavar(action.nargs) return result def _expand_help(self, action): @@ -622,11 +612,11 @@ def _expand_help(self, action): if params[name] is SUPPRESS: del params[name] for name in list(params): - if hasattr(params[name], '__name__'): + if hasattr(params[name], "__name__"): params[name] = params[name].__name__ - if params.get('choices') is not None: - choices_str = ', '.join([str(c) for c in params['choices']]) - params['choices'] = choices_str + if params.get("choices") is not None: + choices_str = ", ".join([str(c) for c in params["choices"]]) + params["choices"] = choices_str return self._get_help_string(action) % params def _iter_indented_subactions(self, action): @@ -641,13 +631,12 @@ def _iter_indented_subactions(self, action): self._dedent() def _split_lines(self, text, width): - text = self._whitespace_matcher.sub(' ', text).strip() + text = self._whitespace_matcher.sub(" ", text).strip() return _textwrap.wrap(text, width) def _fill_text(self, text, width, indent): - text = self._whitespace_matcher.sub(' ', text).strip() - return _textwrap.fill(text, width, initial_indent=indent, - subsequent_indent=indent) + text = self._whitespace_matcher.sub(" ", text).strip() + return _textwrap.fill(text, width, initial_indent=indent, subsequent_indent=indent) def _get_help_string(self, action): return action.help @@ -661,7 +650,7 @@ class RawDescriptionHelpFormatter(HelpFormatter): """ def _fill_text(self, text, width, indent): - return ''.join([indent + line for line in text.splitlines(True)]) + return "".join([indent + line for line in text.splitlines(True)]) class RawTextHelpFormatter(RawDescriptionHelpFormatter): @@ -684,11 +673,11 @@ class ArgumentDefaultsHelpFormatter(HelpFormatter): def _get_help_string(self, action): help = action.help - if '%(default)' not in action.help: + if "%(default)" not in action.help: if action.default is not SUPPRESS: defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: - help += ' (default: %(default)s)' + help += " (default: %(default)s)" return help @@ -696,11 +685,12 @@ def _get_help_string(self, action): # Options and Arguments # ===================== + def _get_action_name(argument): if argument is None: return None elif argument.option_strings: - return '/'.join(argument.option_strings) + return "/".join(argument.option_strings) elif argument.metavar not in (None, SUPPRESS): return argument.metavar elif argument.dest not in (None, SUPPRESS): @@ -722,15 +712,15 @@ def __init__(self, argument, message): def __str__(self): if self.argument_name is None: - format = '%(message)s' + format = "%(message)s" else: - format = 'argument %(argument_name)s: %(message)s' - return format % dict(message=self.message, - argument_name=self.argument_name) + format = "argument %(argument_name)s: %(message)s" + return format % dict(message=self.message, argument_name=self.argument_name) class ArgumentTypeError(Exception): """An error from trying to convert a command line string to a type.""" + pass @@ -738,6 +728,7 @@ class ArgumentTypeError(Exception): # Action classes # ============== + class Action(_AttributeHolder): """Information about how to convert command line strings to Python objects. @@ -789,17 +780,19 @@ class Action(_AttributeHolder): help string. If None, the 'dest' value will be used as the name. """ - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): + def __init__( + self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None, + ): self.option_strings = option_strings self.dest = dest self.nargs = nargs @@ -813,41 +806,44 @@ def __init__(self, def _get_kwargs(self): names = [ - 'option_strings', - 'dest', - 'nargs', - 'const', - 'default', - 'type', - 'choices', - 'help', - 'metavar', + "option_strings", + "dest", + "nargs", + "const", + "default", + "type", + "choices", + "help", + "metavar", ] return [(name, getattr(self, name)) for name in names] def __call__(self, parser, namespace, values, option_string=None): - raise NotImplementedError(_('.__call__() not defined')) + raise NotImplementedError(_(".__call__() not defined")) class _StoreAction(Action): - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): + def __init__( + self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None, + ): if nargs == 0: - raise ValueError('nargs for store actions must be > 0; if you ' - 'have nothing to store, actions such as store ' - 'true or store const may be more appropriate') + raise ValueError( + "nargs for store actions must be > 0; if you " + "have nothing to store, actions such as store " + "true or store const may be more appropriate" + ) if const is not None and nargs != OPTIONAL: - raise ValueError('nargs must be %r to supply const' % OPTIONAL) + raise ValueError("nargs must be %r to supply const" % OPTIONAL) super(_StoreAction, self).__init__( option_strings=option_strings, dest=dest, @@ -858,22 +854,17 @@ def __init__(self, choices=choices, required=required, help=help, - metavar=metavar) + metavar=metavar, + ) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, values) class _StoreConstAction(Action): - - def __init__(self, - option_strings, - dest, - const, - default=None, - required=False, - help=None, - metavar=None): + def __init__( + self, option_strings, dest, const, default=None, required=False, help=None, metavar=None + ): super(_StoreConstAction, self).__init__( option_strings=option_strings, dest=dest, @@ -881,65 +872,59 @@ def __init__(self, const=const, default=default, required=required, - help=help) + help=help, + ) def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.const) class _StoreTrueAction(_StoreConstAction): - - def __init__(self, - option_strings, - dest, - default=False, - required=False, - help=None): + def __init__(self, option_strings, dest, default=False, required=False, help=None): super(_StoreTrueAction, self).__init__( option_strings=option_strings, dest=dest, const=True, default=default, required=required, - help=help) + help=help, + ) class _StoreFalseAction(_StoreConstAction): - - def __init__(self, - option_strings, - dest, - default=True, - required=False, - help=None): + def __init__(self, option_strings, dest, default=True, required=False, help=None): super(_StoreFalseAction, self).__init__( option_strings=option_strings, dest=dest, const=False, default=default, required=required, - help=help) + help=help, + ) class _AppendAction(Action): - - def __init__(self, - option_strings, - dest, - nargs=None, - const=None, - default=None, - type=None, - choices=None, - required=False, - help=None, - metavar=None): + def __init__( + self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None, + ): if nargs == 0: - raise ValueError('nargs for append actions must be > 0; if arg ' - 'strings are not supplying the value to append, ' - 'the append const action may be more appropriate') + raise ValueError( + "nargs for append actions must be > 0; if arg " + "strings are not supplying the value to append, " + "the append const action may be more appropriate" + ) if const is not None and nargs != OPTIONAL: - raise ValueError('nargs must be %r to supply const' % OPTIONAL) + raise ValueError("nargs must be %r to supply const" % OPTIONAL) super(_AppendAction, self).__init__( option_strings=option_strings, dest=dest, @@ -950,7 +935,8 @@ def __init__(self, choices=choices, required=required, help=help, - metavar=metavar) + metavar=metavar, + ) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) @@ -959,15 +945,9 @@ def __call__(self, parser, namespace, values, option_string=None): class _AppendConstAction(Action): - - def __init__(self, - option_strings, - dest, - const, - default=None, - required=False, - help=None, - metavar=None): + def __init__( + self, option_strings, dest, const, default=None, required=False, help=None, metavar=None + ): super(_AppendConstAction, self).__init__( option_strings=option_strings, dest=dest, @@ -976,7 +956,8 @@ def __init__(self, default=default, required=required, help=help, - metavar=metavar) + metavar=metavar, + ) def __call__(self, parser, namespace, values, option_string=None): items = _copy.copy(_ensure_value(namespace, self.dest, [])) @@ -985,20 +966,15 @@ def __call__(self, parser, namespace, values, option_string=None): class _CountAction(Action): - - def __init__(self, - option_strings, - dest, - default=None, - required=False, - help=None): + def __init__(self, option_strings, dest, default=None, required=False, help=None): super(_CountAction, self).__init__( option_strings=option_strings, dest=dest, nargs=0, default=default, required=required, - help=help) + help=help, + ) def __call__(self, parser, namespace, values, option_string=None): new_count = _ensure_value(namespace, self.dest, 0) + 1 @@ -1006,18 +982,10 @@ def __call__(self, parser, namespace, values, option_string=None): class _HelpAction(Action): - - def __init__(self, - option_strings, - dest=SUPPRESS, - default=SUPPRESS, - help=None): + def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None): super(_HelpAction, self).__init__( - option_strings=option_strings, - dest=dest, - default=default, - nargs=0, - help=help) + option_strings=option_strings, dest=dest, default=default, nargs=0, help=help + ) def __call__(self, parser, namespace, values, option_string=None): parser.print_help() @@ -1025,19 +993,17 @@ def __call__(self, parser, namespace, values, option_string=None): class _VersionAction(Action): - - def __init__(self, - option_strings, - version=None, - dest=SUPPRESS, - default=SUPPRESS, - help="show program's version number and exit"): + def __init__( + self, + option_strings, + version=None, + dest=SUPPRESS, + default=SUPPRESS, + help="show program's version number and exit", + ): super(_VersionAction, self).__init__( - option_strings=option_strings, - dest=dest, - default=default, - nargs=0, - help=help) + option_strings=option_strings, dest=dest, default=default, nargs=0, help=help + ) self.version = version def __call__(self, parser, namespace, values, option_string=None): @@ -1050,25 +1016,15 @@ def __call__(self, parser, namespace, values, option_string=None): class _SubParsersAction(Action): - class _ChoicesPseudoAction(Action): - def __init__(self, name, aliases, help): metavar = dest = name if aliases: - metavar += ' (%s)' % ', '.join(aliases) + metavar += " (%s)" % ", ".join(aliases) sup = super(_SubParsersAction._ChoicesPseudoAction, self) - sup.__init__(option_strings=[], dest=dest, help=help, - metavar=metavar) - - def __init__(self, - option_strings, - prog, - parser_class, - dest=SUPPRESS, - help=None, - metavar=None): + sup.__init__(option_strings=[], dest=dest, help=help, metavar=metavar) + def __init__(self, option_strings, prog, parser_class, dest=SUPPRESS, help=None, metavar=None): self._prog_prefix = prog self._parser_class = parser_class self._name_parser_map = {} @@ -1080,18 +1036,19 @@ def __init__(self, nargs=PARSER, choices=self._name_parser_map, help=help, - metavar=metavar) + metavar=metavar, + ) def add_parser(self, name, **kwargs): # set prog from the existing prefix - if kwargs.get('prog') is None: - kwargs['prog'] = '%s %s' % (self._prog_prefix, name) + if kwargs.get("prog") is None: + kwargs["prog"] = "%s %s" % (self._prog_prefix, name) - aliases = kwargs.pop('aliases', ()) + aliases = kwargs.pop("aliases", ()) # create a pseudo-action to hold the choice help - if 'help' in kwargs: - help = kwargs.pop('help') + if "help" in kwargs: + help = kwargs.pop("help") choice_action = self._ChoicesPseudoAction(name, aliases, help) self._choices_actions.append(choice_action) @@ -1120,8 +1077,8 @@ def __call__(self, parser, namespace, values, option_string=None): try: parser = self._name_parser_map[parser_name] except KeyError: - tup = parser_name, ', '.join(self._name_parser_map) - msg = _('unknown parser %r (choices: %s)' % tup) + tup = parser_name, ", ".join(self._name_parser_map) + msg = _("unknown parser %r (choices: %s)" % tup) raise ArgumentError(self, msg) # parse all the remaining options into the namespace @@ -1137,6 +1094,7 @@ def __call__(self, parser, namespace, values, option_string=None): # Type classes # ============== + class FileType(object): """Factory for creating file object types @@ -1150,16 +1108,16 @@ class FileType(object): the builtin open() function. """ - def __init__(self, mode='r', bufsize=None): + def __init__(self, mode="r", bufsize=None): self._mode = mode self._bufsize = bufsize def __call__(self, string): # the special argument "-" means sys.std{in,out} - if string == '-': - if 'r' in self._mode: + if string == "-": + if "r" in self._mode: return _sys.stdin - elif 'w' in self._mode: + elif "w" in self._mode: return _sys.stdout else: msg = _('argument "-" with mode %r' % self._mode) @@ -1178,13 +1136,15 @@ def __call__(self, string): def __repr__(self): args = [self._mode, self._bufsize] - args_str = ', '.join([repr(arg) for arg in args if arg is not None]) - return '%s(%s)' % (type(self).__name__, args_str) + args_str = ", ".join([repr(arg) for arg in args if arg is not None]) + return "%s(%s)" % (type(self).__name__, args_str) + # =========================== # Optional and Positional Parsing # =========================== + class Namespace(_AttributeHolder): """Simple object for storing attributes. @@ -1209,12 +1169,7 @@ def __contains__(self, key): class _ActionsContainer(object): - - def __init__(self, - description, - prefix_chars, - argument_default, - conflict_handler): + def __init__(self, description, prefix_chars, argument_default, conflict_handler): super(_ActionsContainer, self).__init__() self.description = description @@ -1226,17 +1181,17 @@ def __init__(self, self._registries = {} # register actions - self.register('action', None, _StoreAction) - self.register('action', 'store', _StoreAction) - self.register('action', 'store_const', _StoreConstAction) - self.register('action', 'store_true', _StoreTrueAction) - self.register('action', 'store_false', _StoreFalseAction) - self.register('action', 'append', _AppendAction) - self.register('action', 'append_const', _AppendConstAction) - self.register('action', 'count', _CountAction) - self.register('action', 'help', _HelpAction) - self.register('action', 'version', _VersionAction) - self.register('action', 'parsers', _SubParsersAction) + self.register("action", None, _StoreAction) + self.register("action", "store", _StoreAction) + self.register("action", "store_const", _StoreConstAction) + self.register("action", "store_true", _StoreTrueAction) + self.register("action", "store_false", _StoreFalseAction) + self.register("action", "append", _AppendAction) + self.register("action", "append_const", _AppendConstAction) + self.register("action", "count", _CountAction) + self.register("action", "help", _HelpAction) + self.register("action", "version", _VersionAction) + self.register("action", "parsers", _SubParsersAction) # raise an exception if the conflict handler is invalid self._get_handler() @@ -1253,7 +1208,7 @@ def __init__(self, self._defaults = {} # determines whether an "option" looks like a negative number - self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') + self._negative_number_matcher = _re.compile(r"^-\d+$|^-\d*\.\d+$") # whether or not there are any optionals that look like negative # numbers -- uses a list so it can be shared and edited @@ -1287,7 +1242,6 @@ def get_default(self, dest): return action.default return self._defaults.get(dest, None) - # ======================= # Adding argument actions # ======================= @@ -1302,8 +1256,8 @@ def add_argument(self, *args, **kwargs): # argument chars = self.prefix_chars if not args or len(args) == 1 and args[0][0] not in chars: - if args and 'dest' in kwargs: - raise ValueError('dest supplied twice for positional argument') + if args and "dest" in kwargs: + raise ValueError("dest supplied twice for positional argument") kwargs = self._get_positional_kwargs(*args, **kwargs) # otherwise, we're adding an optional argument @@ -1311,12 +1265,12 @@ def add_argument(self, *args, **kwargs): kwargs = self._get_optional_kwargs(*args, **kwargs) # if no default was supplied, use the parser-level default - if 'default' not in kwargs: - dest = kwargs['dest'] + if "default" not in kwargs: + dest = kwargs["dest"] if dest in self._defaults: - kwargs['default'] = self._defaults[dest] + kwargs["default"] = self._defaults[dest] elif self.argument_default is not None: - kwargs['default'] = self.argument_default + kwargs["default"] = self.argument_default # create the action object, and add it to the parser action_class = self._pop_action_class(kwargs) @@ -1325,9 +1279,9 @@ def add_argument(self, *args, **kwargs): action = action_class(**kwargs) # raise an error if the action type is not callable - type_func = self._registry_get('type', action.type, action.type) + type_func = self._registry_get("type", action.type, action.type) if not _callable(type_func): - raise ValueError('%r is not callable' % type_func) + raise ValueError("%r is not callable" % type_func) return self._add_action(action) @@ -1370,21 +1324,21 @@ def _add_container_actions(self, container): title_group_map = {} for group in self._action_groups: if group.title in title_group_map: - msg = _('cannot merge actions - two groups are named %r') + msg = _("cannot merge actions - two groups are named %r") raise ValueError(msg % (group.title)) title_group_map[group.title] = group # map each action to its group group_map = {} for group in container._action_groups: - # if a group with the title exists, use that, otherwise # create a new group matching the container's group if group.title not in title_group_map: title_group_map[group.title] = self.add_argument_group( title=group.title, description=group.description, - conflict_handler=group.conflict_handler) + conflict_handler=group.conflict_handler, + ) # map the actions to their new group for action in group._group_actions: @@ -1394,8 +1348,7 @@ def _add_container_actions(self, container): # NOTE: if add_mutually_exclusive_group ever gains title= and # description= then this code will need to be expanded as above for group in container._mutually_exclusive_groups: - mutex_group = self.add_mutually_exclusive_group( - required=group.required) + mutex_group = self.add_mutually_exclusive_group(required=group.required) # map the actions to their new mutex group for action in group._group_actions: @@ -1407,16 +1360,16 @@ def _add_container_actions(self, container): def _get_positional_kwargs(self, dest, **kwargs): # make sure required is not specified - if 'required' in kwargs: + if "required" in kwargs: msg = _("'required' is an invalid argument for positionals") raise TypeError(msg) # mark positional arguments as required if at least one is # always required - if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: - kwargs['required'] = True - if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: - kwargs['required'] = True + if kwargs.get("nargs") not in [OPTIONAL, ZERO_OR_MORE]: + kwargs["required"] = True + if kwargs.get("nargs") == ZERO_OR_MORE and "default" not in kwargs: + kwargs["required"] = True # return the keyword arguments with no option strings return dict(kwargs, dest=dest, option_strings=[]) @@ -1428,8 +1381,7 @@ def _get_optional_kwargs(self, *args, **kwargs): for option_string in args: # error on strings that don't start with an appropriate prefix if not option_string[0] in self.prefix_chars: - msg = _('invalid option string %r: ' - 'must start with a character %r') + msg = _("invalid option string %r: " "must start with a character %r") tup = option_string, self.prefix_chars raise ValueError(msg % tup) @@ -1441,7 +1393,7 @@ def _get_optional_kwargs(self, *args, **kwargs): long_option_strings.append(option_string) # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' - dest = kwargs.pop('dest', None) + dest = kwargs.pop("dest", None) if dest is None: if long_option_strings: dest_option_string = long_option_strings[0] @@ -1449,28 +1401,27 @@ def _get_optional_kwargs(self, *args, **kwargs): dest_option_string = option_strings[0] dest = dest_option_string.lstrip(self.prefix_chars) if not dest: - msg = _('dest= is required for options like %r') + msg = _("dest= is required for options like %r") raise ValueError(msg % option_string) - dest = dest.replace('-', '_') + dest = dest.replace("-", "_") # return the updated keyword arguments return dict(kwargs, dest=dest, option_strings=option_strings) def _pop_action_class(self, kwargs, default=None): - action = kwargs.pop('action', default) - return self._registry_get('action', action, action) + action = kwargs.pop("action", default) + return self._registry_get("action", action, action) def _get_handler(self): # determine function from conflict handler string - handler_func_name = '_handle_conflict_%s' % self.conflict_handler + handler_func_name = "_handle_conflict_%s" % self.conflict_handler try: return getattr(self, handler_func_name) except AttributeError: - msg = _('invalid conflict_resolution value: %r') + msg = _("invalid conflict_resolution value: %r") raise ValueError(msg % self.conflict_handler) def _check_conflict(self, action): - # find all options that conflict with this option confl_optionals = [] for option_string in action.option_strings: @@ -1484,17 +1435,15 @@ def _check_conflict(self, action): conflict_handler(action, confl_optionals) def _handle_conflict_error(self, action, conflicting_actions): - message = _('conflicting option string(s): %s') - conflict_string = ', '.join([option_string - for option_string, action - in conflicting_actions]) + message = _("conflicting option string(s): %s") + conflict_string = ", ".join( + [option_string for option_string, action in conflicting_actions] + ) raise ArgumentError(action, message % conflict_string) def _handle_conflict_resolve(self, action, conflicting_actions): - # remove all conflicting options for option_string, action in conflicting_actions: - # remove the conflicting option action.option_strings.remove(option_string) self._option_string_actions.pop(option_string, None) @@ -1506,13 +1455,12 @@ def _handle_conflict_resolve(self, action, conflicting_actions): class _ArgumentGroup(_ActionsContainer): - def __init__(self, container, title=None, description=None, **kwargs): # add any missing keyword arguments by checking the container update = kwargs.setdefault - update('conflict_handler', container.conflict_handler) - update('prefix_chars', container.prefix_chars) - update('argument_default', container.argument_default) + update("conflict_handler", container.conflict_handler) + update("prefix_chars", container.prefix_chars) + update("argument_default", container.argument_default) super_init = super(_ArgumentGroup, self).__init__ super_init(description=description, **kwargs) @@ -1525,8 +1473,7 @@ def __init__(self, container, title=None, description=None, **kwargs): self._actions = container._actions self._option_string_actions = container._option_string_actions self._defaults = container._defaults - self._has_negative_number_optionals = \ - container._has_negative_number_optionals + self._has_negative_number_optionals = container._has_negative_number_optionals def _add_action(self, action): action = super(_ArgumentGroup, self)._add_action(action) @@ -1539,7 +1486,6 @@ def _remove_action(self, action): class _MutuallyExclusiveGroup(_ArgumentGroup): - def __init__(self, container, required=False): super(_MutuallyExclusiveGroup, self).__init__(container) self.required = required @@ -1547,7 +1493,7 @@ def __init__(self, container, required=False): def _add_action(self, action): if action.required: - msg = _('mutually exclusive arguments must be optional') + msg = _("mutually exclusive arguments must be optional") raise ValueError(msg) action = self._container._add_action(action) self._group_actions.append(action) @@ -1576,33 +1522,39 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer): - add_help -- Add a -h/-help option """ - def __init__(self, - prog=None, - usage=None, - description=None, - epilog=None, - version=None, - parents=[], - formatter_class=HelpFormatter, - prefix_chars='-', - fromfile_prefix_chars=None, - argument_default=None, - conflict_handler='error', - add_help=True): - + def __init__( + self, + prog=None, + usage=None, + description=None, + epilog=None, + version=None, + parents=[], + formatter_class=HelpFormatter, + prefix_chars="-", + fromfile_prefix_chars=None, + argument_default=None, + conflict_handler="error", + add_help=True, + ): if version is not None: import warnings + warnings.warn( """The "version" argument to ArgumentParser is deprecated. """ """Please use """ """"add_argument(..., action='version', version="N", ...)" """ - """instead""", DeprecationWarning) + """instead""", + DeprecationWarning, + ) superinit = super(ArgumentParser, self).__init__ - superinit(description=description, - prefix_chars=prefix_chars, - argument_default=argument_default, - conflict_handler=conflict_handler) + superinit( + description=description, + prefix_chars=prefix_chars, + argument_default=argument_default, + conflict_handler=conflict_handler, + ) # default setting for prog if prog is None: @@ -1617,32 +1569,39 @@ def __init__(self, self.add_help = add_help add_group = self.add_argument_group - self._positionals = add_group(_('positional arguments')) - self._optionals = add_group(_('optional arguments')) + self._positionals = add_group(_("positional arguments")) + self._optionals = add_group(_("optional arguments")) self._subparsers = None # register types def identity(string): return string - self.register('type', None, identity) + + self.register("type", None, identity) # add help and version arguments if necessary # (using explicit default to override global argument_default) - if '-' in prefix_chars: - default_prefix = '-' + if "-" in prefix_chars: + default_prefix = "-" else: default_prefix = prefix_chars[0] if self.add_help: self.add_argument( - default_prefix+'h', default_prefix*2+'help', - action='help', default=SUPPRESS, - help=_('show this help message and exit')) + default_prefix + "h", + default_prefix * 2 + "help", + action="help", + default=SUPPRESS, + help=_("show this help message and exit"), + ) if self.version: self.add_argument( - default_prefix+'v', default_prefix*2+'version', - action='version', default=SUPPRESS, + default_prefix + "v", + default_prefix * 2 + "version", + action="version", + default=SUPPRESS, version=self.version, - help=_("show program's version number and exit")) + help=_("show program's version number and exit"), + ) # add parent arguments and defaults for parent in parents: @@ -1659,13 +1618,13 @@ def identity(string): # ======================= def _get_kwargs(self): names = [ - 'prog', - 'usage', - 'description', - 'version', - 'formatter_class', - 'conflict_handler', - 'add_help', + "prog", + "usage", + "description", + "version", + "formatter_class", + "conflict_handler", + "add_help", ] return [(name, getattr(self, name)) for name in names] @@ -1674,29 +1633,29 @@ def _get_kwargs(self): # ================================== def add_subparsers(self, **kwargs): if self._subparsers is not None: - self.error(_('cannot have multiple subparser arguments')) + self.error(_("cannot have multiple subparser arguments")) # add the parser class to the arguments if it's not present - kwargs.setdefault('parser_class', type(self)) + kwargs.setdefault("parser_class", type(self)) - if 'title' in kwargs or 'description' in kwargs: - title = _(kwargs.pop('title', 'subcommands')) - description = _(kwargs.pop('description', None)) + if "title" in kwargs or "description" in kwargs: + title = _(kwargs.pop("title", "subcommands")) + description = _(kwargs.pop("description", None)) self._subparsers = self.add_argument_group(title, description) else: self._subparsers = self._positionals # prog defaults to the usage message of this parser, skipping # optional arguments and with no "usage:" prefix - if kwargs.get('prog') is None: + if kwargs.get("prog") is None: formatter = self._get_formatter() positionals = self._get_positional_actions() groups = self._mutually_exclusive_groups - formatter.add_usage(self.usage, positionals, groups, '') - kwargs['prog'] = formatter.format_help().strip() + formatter.add_usage(self.usage, positionals, groups, "") + kwargs["prog"] = formatter.format_help().strip() # create the parsers action and add it to the positionals list - parsers_class = self._pop_action_class(kwargs, 'parsers') + parsers_class = self._pop_action_class(kwargs, "parsers") action = parsers_class(option_strings=[], **kwargs) self._subparsers._add_action(action) @@ -1711,14 +1670,10 @@ def _add_action(self, action): return action def _get_optional_actions(self): - return [action - for action in self._actions - if action.option_strings] + return [action for action in self._actions if action.option_strings] def _get_positional_actions(self): - return [action - for action in self._actions - if not action.option_strings] + return [action for action in self._actions if not action.option_strings] # ===================================== # Command line argument parsing methods @@ -1726,8 +1681,8 @@ def _get_positional_actions(self): def parse_args(self, args=None, namespace=None): args, argv = self.parse_known_args(args, namespace) if argv: - msg = _('unrecognized arguments: %s') - self.error(msg % ' '.join(argv)) + msg = _("unrecognized arguments: %s") + self.error(msg % " ".join(argv)) return args def parse_known_args(self, args=None, namespace=None): @@ -1775,7 +1730,7 @@ def _parse_known_args(self, arg_strings, namespace): for i, mutex_action in enumerate(mutex_group._group_actions): conflicts = action_conflicts.setdefault(mutex_action, []) conflicts.extend(group_actions[:i]) - conflicts.extend(group_actions[i + 1:]) + conflicts.extend(group_actions[i + 1 :]) # find all option indices, and determine the arg_string_pattern # which has an 'O' if there is an option at an index, @@ -1784,26 +1739,25 @@ def _parse_known_args(self, arg_strings, namespace): arg_string_pattern_parts = [] arg_strings_iter = iter(arg_strings) for i, arg_string in enumerate(arg_strings_iter): - # all args after -- are non-options - if arg_string == '--': - arg_string_pattern_parts.append('-') + if arg_string == "--": + arg_string_pattern_parts.append("-") for arg_string in arg_strings_iter: - arg_string_pattern_parts.append('A') + arg_string_pattern_parts.append("A") # otherwise, add the arg to the arg strings # and note the index if it was an option else: option_tuple = self._parse_optional(arg_string) if option_tuple is None: - pattern = 'A' + pattern = "A" else: option_string_indices[i] = option_tuple - pattern = 'O' + pattern = "O" arg_string_pattern_parts.append(pattern) # join the pieces together to form the pattern - arg_strings_pattern = ''.join(arg_string_pattern_parts) + arg_strings_pattern = "".join(arg_string_pattern_parts) # converts arg strings to the appropriate and then takes the action seen_actions = set() @@ -1820,7 +1774,7 @@ def take_action(action, argument_strings, option_string=None): seen_non_default_actions.add(action) for conflict_action in action_conflicts.get(action, []): if conflict_action in seen_non_default_actions: - msg = _('not allowed with argument %s') + msg = _("not allowed with argument %s") action_name = _get_action_name(conflict_action) raise ArgumentError(action, msg % action_name) @@ -1831,7 +1785,6 @@ def take_action(action, argument_strings, option_string=None): # function to convert arg_strings into an optional action def consume_optional(start_index): - # get the optional identified at this index option_tuple = option_string_indices[start_index] action, option_string, explicit_arg = option_tuple @@ -1841,7 +1794,6 @@ def consume_optional(start_index): match_argument = self._match_argument action_tuples = [] while True: - # if we found no optional action, skip it if action is None: extras.append(arg_strings[start_index]) @@ -1850,7 +1802,7 @@ def consume_optional(start_index): # if there is an explicit argument, try to match the # optional's string arguments to only this if explicit_arg is not None: - arg_count = match_argument(action, 'A') + arg_count = match_argument(action, "A") # if the action is a single-dash option and takes no # arguments, try to parse more single-dash options out @@ -1866,7 +1818,7 @@ def consume_optional(start_index): action = optionals_map[option_string] explicit_arg = new_explicit_arg else: - msg = _('ignored explicit argument %r') + msg = _("ignored explicit argument %r") raise ArgumentError(action, msg % explicit_arg) # if the action expect exactly one argument, we've @@ -1880,7 +1832,7 @@ def consume_optional(start_index): # error if a double-dash option did not use the # explicit argument else: - msg = _('ignored explicit argument %r') + msg = _("ignored explicit argument %r") raise ArgumentError(action, msg % explicit_arg) # if there is no explicit argument, try to match the @@ -1916,13 +1868,13 @@ def consume_positionals(start_index): # slice off the appropriate arg strings for each Positional # and add the Positional and its args to the list for action, arg_count in zip(positionals, arg_counts): - args = arg_strings[start_index: start_index + arg_count] + args = arg_strings[start_index : start_index + arg_count] start_index += arg_count take_action(action, args) # slice off the Positionals that we just parsed and return the # index at which the Positionals' string args stopped - positionals[:] = positionals[len(arg_counts):] + positionals[:] = positionals[len(arg_counts) :] return start_index # consume Positionals and Optionals alternately, until we have @@ -1934,12 +1886,10 @@ def consume_positionals(start_index): else: max_option_string_index = -1 while start_index <= max_option_string_index: - # consume any Positionals preceding the next option - next_option_string_index = min([ - index - for index in option_string_indices - if index >= start_index]) + next_option_string_index = min( + [index for index in option_string_indices if index >= start_index] + ) if start_index != next_option_string_index: positionals_end_index = consume_positionals(start_index) @@ -1970,25 +1920,26 @@ def consume_positionals(start_index): # if we didn't use all the Positional objects, there were too few # arg strings supplied. if positionals: - self.error(_('too few arguments')) + self.error(_("too few arguments")) # make sure all required actions were present, and convert defaults. for action in self._actions: if action not in seen_actions: if action.required: name = _get_action_name(action) - self.error(_('argument %s is required') % name) + self.error(_("argument %s is required") % name) else: # Convert action default now instead of doing it before # parsing arguments to avoid calling convert functions # twice (which may fail) if the argument was given, but # only if it was defined already in the namespace - if (action.default is not None and - isinstance(action.default, basestring) and - hasattr(namespace, action.dest) and - action.default is getattr(namespace, action.dest)): - setattr(namespace, action.dest, - self._get_value(action, action.default)) + if ( + action.default is not None + and isinstance(action.default, basestring) + and hasattr(namespace, action.dest) + and action.default is getattr(namespace, action.dest) + ): + setattr(namespace, action.dest, self._get_value(action, action.default)) # make sure all required groups had one option present for group in self._mutually_exclusive_groups: @@ -1999,11 +1950,13 @@ def consume_positionals(start_index): # if no actions were used, report the error else: - names = [_get_action_name(action) - for action in group._group_actions - if action.help is not SUPPRESS] - msg = _('one of the arguments %s is required') - self.error(msg % ' '.join(names)) + names = [ + _get_action_name(action) + for action in group._group_actions + if action.help is not SUPPRESS + ] + msg = _("one of the arguments %s is required") + self.error(msg % " ".join(names)) # return the updated namespace and the extra arguments return namespace, extras @@ -2012,7 +1965,6 @@ def _read_args_from_files(self, arg_strings): # expand arguments referencing files new_arg_strings = [] for arg_string in arg_strings: - # for regular arguments, just add them back into the list if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: new_arg_strings.append(arg_string) @@ -2048,11 +2000,11 @@ def _match_argument(self, action, arg_strings_pattern): # raise an exception if we weren't able to find a match if match is None: nargs_errors = { - None: _('expected one argument'), - OPTIONAL: _('expected at most one argument'), - ONE_OR_MORE: _('expected at least one argument'), + None: _("expected one argument"), + OPTIONAL: _("expected at most one argument"), + ONE_OR_MORE: _("expected at least one argument"), } - default = _('expected %s argument(s)') % action.nargs + default = _("expected %s argument(s)") % action.nargs msg = nargs_errors.get(action.nargs, default) raise ArgumentError(action, msg) @@ -2065,8 +2017,7 @@ def _match_arguments_partial(self, actions, arg_strings_pattern): result = [] for i in range(len(actions), 0, -1): actions_slice = actions[:i] - pattern = ''.join([self._get_nargs_pattern(action) - for action in actions_slice]) + pattern = "".join([self._get_nargs_pattern(action) for action in actions_slice]) match = _re.match(pattern, arg_strings_pattern) if match is not None: result.extend([len(string) for string in match.groups()]) @@ -2094,8 +2045,8 @@ def _parse_optional(self, arg_string): return None # if the option string before the "=" is present, return the action - if '=' in arg_string: - option_string, explicit_arg = arg_string.split('=', 1) + if "=" in arg_string: + option_string, explicit_arg = arg_string.split("=", 1) if option_string in self._option_string_actions: action = self._option_string_actions[option_string] return action, option_string, explicit_arg @@ -2106,15 +2057,16 @@ def _parse_optional(self, arg_string): # if multiple actions match, the option string was ambiguous if len(option_tuples) > 1: - options = ', '.join([option_string - for action, option_string, explicit_arg in option_tuples]) + options = ", ".join( + [option_string for action, option_string, explicit_arg in option_tuples] + ) tup = arg_string, options - self.error(_('ambiguous option: %s could match %s') % tup) + self.error(_("ambiguous option: %s could match %s") % tup) # if exactly one action matched, this segmentation is good, # so return the parsed action elif len(option_tuples) == 1: - option_tuple, = option_tuples + (option_tuple,) = option_tuples return option_tuple # if it was not found as an option, but it looks like a negative @@ -2125,7 +2077,7 @@ def _parse_optional(self, arg_string): return None # if it contains a space, it was meant to be a positional - if ' ' in arg_string: + if " " in arg_string: return None # it was meant to be an optional but there is no such option @@ -2139,8 +2091,8 @@ def _get_option_tuples(self, option_string): # split at the '=' chars = self.prefix_chars if option_string[0] in chars and option_string[1] in chars: - if '=' in option_string: - option_prefix, explicit_arg = option_string.split('=', 1) + if "=" in option_string: + option_prefix, explicit_arg = option_string.split("=", 1) else: option_prefix = option_string explicit_arg = None @@ -2171,7 +2123,7 @@ def _get_option_tuples(self, option_string): # shouldn't ever get here else: - self.error(_('unexpected option string: %s') % option_string) + self.error(_("unexpected option string: %s") % option_string) # return the collected option tuples return result @@ -2183,36 +2135,36 @@ def _get_nargs_pattern(self, action): # the default (None) is assumed to be a single argument if nargs is None: - nargs_pattern = '(-*A-*)' + nargs_pattern = "(-*A-*)" # allow zero or one arguments elif nargs == OPTIONAL: - nargs_pattern = '(-*A?-*)' + nargs_pattern = "(-*A?-*)" # allow zero or more arguments elif nargs == ZERO_OR_MORE: - nargs_pattern = '(-*[A-]*)' + nargs_pattern = "(-*[A-]*)" # allow one or more arguments elif nargs == ONE_OR_MORE: - nargs_pattern = '(-*A[A-]*)' + nargs_pattern = "(-*A[A-]*)" # allow any number of options or arguments elif nargs == REMAINDER: - nargs_pattern = '([-AO]*)' + nargs_pattern = "([-AO]*)" # allow one argument followed by any number of options or arguments elif nargs == PARSER: - nargs_pattern = '(-*A[-AO]*)' + nargs_pattern = "(-*A[-AO]*)" # all others should be integers else: - nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) + nargs_pattern = "(-*%s-*)" % "-*".join("A" * nargs) # if this is an optional action, -- is not allowed if action.option_strings: - nargs_pattern = nargs_pattern.replace('-*', '') - nargs_pattern = nargs_pattern.replace('-', '') + nargs_pattern = nargs_pattern.replace("-*", "") + nargs_pattern = nargs_pattern.replace("-", "") # return the pattern return nargs_pattern @@ -2223,7 +2175,7 @@ def _get_nargs_pattern(self, action): def _get_values(self, action, arg_strings): # for everything but PARSER args, strip out '--' if action.nargs not in [PARSER, REMAINDER]: - arg_strings = [s for s in arg_strings if s != '--'] + arg_strings = [s for s in arg_strings if s != "--"] # optional argument produces a default when not present if not arg_strings and action.nargs == OPTIONAL: @@ -2237,8 +2189,7 @@ def _get_values(self, action, arg_strings): # when nargs='*' on a positional, if there were no command-line # args, use the default if it is anything other than None - elif (not arg_strings and action.nargs == ZERO_OR_MORE and - not action.option_strings): + elif not arg_strings and action.nargs == ZERO_OR_MORE and not action.option_strings: if action.default is not None: value = action.default else: @@ -2247,7 +2198,7 @@ def _get_values(self, action, arg_strings): # single argument or optional argument produces a single value elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: - arg_string, = arg_strings + (arg_string,) = arg_strings value = self._get_value(action, arg_string) self._check_value(action, value) @@ -2270,9 +2221,9 @@ def _get_values(self, action, arg_strings): return value def _get_value(self, action, arg_string): - type_func = self._registry_get('type', action.type, action.type) + type_func = self._registry_get("type", action.type, action.type) if not _callable(type_func): - msg = _('%r is not callable') + msg = _("%r is not callable") raise ArgumentError(action, msg % type_func) # convert the value to the appropriate type @@ -2281,14 +2232,14 @@ def _get_value(self, action, arg_string): # ArgumentTypeErrors indicate errors except ArgumentTypeError: - name = getattr(action.type, '__name__', repr(action.type)) + name = getattr(action.type, "__name__", repr(action.type)) msg = str(_sys.exc_info()[1]) raise ArgumentError(action, msg) # TypeErrors or ValueErrors also indicate errors except (TypeError, ValueError): - name = getattr(action.type, '__name__', repr(action.type)) - msg = _('invalid %s value: %r') + name = getattr(action.type, "__name__", repr(action.type)) + msg = _("invalid %s value: %r") raise ArgumentError(action, msg % (name, arg_string)) # return the converted value @@ -2297,8 +2248,8 @@ def _get_value(self, action, arg_string): def _check_value(self, action, value): # converted value must be one of the choices (if specified) if action.choices is not None and value not in action.choices: - tup = value, ', '.join(map(repr, action.choices)) - msg = _('invalid choice: %r (choose from %s)') % tup + tup = value, ", ".join(map(repr, action.choices)) + msg = _("invalid choice: %r (choose from %s)") % tup raise ArgumentError(action, msg) # ======================= @@ -2306,16 +2257,14 @@ def _check_value(self, action, value): # ======================= def format_usage(self): formatter = self._get_formatter() - formatter.add_usage(self.usage, self._actions, - self._mutually_exclusive_groups) + formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) return formatter.format_help() def format_help(self): formatter = self._get_formatter() # usage - formatter.add_usage(self.usage, self._actions, - self._mutually_exclusive_groups) + formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # description formatter.add_text(self.description) @@ -2335,10 +2284,12 @@ def format_help(self): def format_version(self): import warnings + warnings.warn( 'The format_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) + "argument to ArgumentParser is no longer supported.", + DeprecationWarning, + ) formatter = self._get_formatter() formatter.add_text(self.version) return formatter.format_help() @@ -2361,10 +2312,12 @@ def print_help(self, file=None): def print_version(self, file=None): import warnings + warnings.warn( 'The print_version method is deprecated -- the "version" ' - 'argument to ArgumentParser is no longer supported.', - DeprecationWarning) + "argument to ArgumentParser is no longer supported.", + DeprecationWarning, + ) self._print_message(self.format_version(), file) def _print_message(self, message, file=None): @@ -2391,4 +2344,4 @@ def error(self, message): should either exit or raise an exception. """ self.print_usage(_sys.stderr) - self.exit(2, _('%s: error: %s\n') % (self.prog, message)) + self.exit(2, _("%s: error: %s\n") % (self.prog, message)) diff --git a/query-and-process-prs b/query-and-process-prs deleted file mode 100755 index d1f2121d83a6..000000000000 --- a/query-and-process-prs +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -from github import Github -from os.path import expanduser, dirname, abspath, join, exists -from optparse import OptionParser -from datetime import datetime, timedelta -from socket import setdefaulttimeout -from github_utils import api_rate_limits -from github_hooks_config import get_repository_hooks -import sys -setdefaulttimeout(None) -SCRIPT_DIR = dirname(abspath(sys.argv[0])) - -def check_prs(gh, repo, since, process_issue, dryRun): - #if repo.full_name in ["cms-sw/cmsdist", "cms-sw/cmssw"]: return - if not get_repository_hooks(repo.full_name,"Jenkins_Github_Hook"): return - print("Working on Repository: ",repo.full_name) - if since: - issues = repo.get_issues(state="open", sort="updated", since=since) - else: - issues = repo.get_issues(state="open", sort="updated") - err=0 - for issue in issues: - if not process_issue and not issue.pull_request: - print("Only processing PRs, skipped issue: ",issue.number) - continue - try: - process_pr(repo_config, gh, repo, issue, dryRun) - except Exception as e : - print("ERROR: Failed to process",repo.full_name,issue.number) - print(e) - err=1 - return err - -if __name__ == "__main__": - parser = OptionParser(usage="%prog [-r|--repository ] [-i|--issue] [-s|--since ] [-n|--dry-run]") - parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False) - parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name, default is cms-sw/cmssw. Use 'externals' to process all external repos.", type=str, default="cms-sw/cmssw") - parser.add_option("-s", "--since", dest="since", help="Pull request updated since time in sec", type="int", default=3600) - parser.add_option("-i", "--issue", dest="issue", action="store_true", help="Process github issues", default=False) - opts, args = parser.parse_args() - - since=None - if opts.since>0: - since = datetime.utcnow() - timedelta(seconds=opts.since) - - repo_dir = join(SCRIPT_DIR,'repos',opts.repository.replace("-","_")) - if exists(join(repo_dir,"repo_config.py")): sys.path.insert(0,repo_dir) - import repo_config - from process_pr import process_pr - from categories import EXTERNAL_REPOS - gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) - api_rate_limits(gh) - repos = [] - if opts.repository != "externals": - repos.append(opts.repository) - else: - repos = EXTERNAL_REPOS - err=0 - for repo_name in repos: - if not "/" in repo_name: - user = gh.get_user(repo_name) - for repo in user.get_repos(): - err+=check_prs(gh, repo, since, opts.issue, opts.dryRun) - else: - err+=check_prs(gh, gh.get_repo(repo_name), since, opts.issue, opts.dryRun) - sys.exit(err) diff --git a/query-and-process-prs b/query-and-process-prs new file mode 120000 index 000000000000..c0514b68c8ab --- /dev/null +++ b/query-and-process-prs @@ -0,0 +1 @@ +query-and-process-prs.py \ No newline at end of file diff --git a/query-and-process-prs.py b/query-and-process-prs.py new file mode 100755 index 000000000000..1cfe1481fe0c --- /dev/null +++ b/query-and-process-prs.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +from __future__ import print_function +from github import Github +from os.path import expanduser, dirname, abspath, join, exists +from optparse import OptionParser +from datetime import datetime, timedelta +from socket import setdefaulttimeout +from github_utils import api_rate_limits +from github_hooks_config import get_repository_hooks +import sys + +setdefaulttimeout(None) +SCRIPT_DIR = dirname(abspath(sys.argv[0])) + + +def check_prs(gh, repo, since, process_issue, dryRun): + # if repo.full_name in ["cms-sw/cmsdist", "cms-sw/cmssw"]: return + if not get_repository_hooks(repo.full_name, "Jenkins_Github_Hook"): + return + print("Working on Repository: ", repo.full_name) + if since: + issues = repo.get_issues(state="open", sort="updated", since=since) + else: + issues = repo.get_issues(state="open", sort="updated") + err = 0 + for issue in issues: + if not process_issue and not issue.pull_request: + print("Only processing PRs, skipped issue: ", issue.number) + continue + try: + process_pr(repo_config, gh, repo, issue, dryRun) + except Exception as e: + print("ERROR: Failed to process", repo.full_name, issue.number) + print(e) + err = 1 + return err + + +if __name__ == "__main__": + parser = OptionParser( + usage="%prog [-r|--repository ] [-i|--issue] [-s|--since ] [-n|--dry-run]" + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not modify Github", + default=False, + ) + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name, default is cms-sw/cmssw. Use 'externals' to process all external repos.", + type=str, + default="cms-sw/cmssw", + ) + parser.add_option( + "-s", + "--since", + dest="since", + help="Pull request updated since time in sec", + type="int", + default=3600, + ) + parser.add_option( + "-i", + "--issue", + dest="issue", + action="store_true", + help="Process github issues", + default=False, + ) + opts, args = parser.parse_args() + + since = None + if opts.since > 0: + since = datetime.utcnow() - timedelta(seconds=opts.since) + + repo_dir = join(SCRIPT_DIR, "repos", opts.repository.replace("-", "_")) + if exists(join(repo_dir, "repo_config.py")): + sys.path.insert(0, repo_dir) + import repo_config + from process_pr import process_pr + from categories import EXTERNAL_REPOS + + gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip()) + api_rate_limits(gh) + repos = [] + if opts.repository != "externals": + repos.append(opts.repository) + else: + repos = EXTERNAL_REPOS + err = 0 + for repo_name in repos: + if not "/" in repo_name: + user = gh.get_user(repo_name) + for repo in user.get_repos(): + err += check_prs(gh, repo, since, opts.issue, opts.dryRun) + else: + err += check_prs(gh, gh.get_repo(repo_name), since, opts.issue, opts.dryRun) + sys.exit(err) diff --git a/query-new-pull-requests b/query-new-pull-requests deleted file mode 100755 index ff48e6a4628a..000000000000 --- a/query-new-pull-requests +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -from github import Github -from os.path import expanduser -from optparse import OptionParser -from datetime import datetime, timedelta -import re - -if __name__ == "__main__": - parser = OptionParser(usage="%prog ") - parser.add_option("--repository", "-r", dest="repository", type=str, default="cms-sw/cmssw") - parser.add_option("--tests-pending", "-t", action="store_true", dest="only_tests_pending" , \ - help="Only show the pull requests that are pending for tests") - parser.add_option("--only-issues", "-i", action="store_true", dest="only_issues" , \ - help="Only show actual issues") - - opts, args = parser.parse_args() - if not len(args): - parser.error("Please specify the number of seconds since you want updates") - - since = datetime.utcnow() - timedelta(seconds=int(args[0])) - gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) - repo = gh.get_repo( opts.repository ) - - if opts.only_tests_pending: - queried_labels = [] - queried_labels.append( repo.get_label( "tests-pending" ) ) - issues = repo.get_issues( state="open" , labels=queried_labels , sort="updated" , since=since ) - else: - label = [ repo.get_label("release-build-request") ] - issues = repo.get_issues(state="open", sort="updated", since=since, labels=label) - - if opts.only_issues: - issues = [ i for i in issues if not i.pull_request ] - - print(" ".join([str(x.number) for x in issues])) diff --git a/query-new-pull-requests b/query-new-pull-requests new file mode 120000 index 000000000000..fd239fb56dae --- /dev/null +++ b/query-new-pull-requests @@ -0,0 +1 @@ +query-new-pull-requests.py \ No newline at end of file diff --git a/query-new-pull-requests.py b/query-new-pull-requests.py new file mode 100755 index 000000000000..7b0e921aa680 --- /dev/null +++ b/query-new-pull-requests.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +from __future__ import print_function +from github import Github +from os.path import expanduser +from optparse import OptionParser +from datetime import datetime, timedelta +import re + +if __name__ == "__main__": + parser = OptionParser(usage="%prog ") + parser.add_option("--repository", "-r", dest="repository", type=str, default="cms-sw/cmssw") + parser.add_option( + "--tests-pending", + "-t", + action="store_true", + dest="only_tests_pending", + help="Only show the pull requests that are pending for tests", + ) + parser.add_option( + "--only-issues", + "-i", + action="store_true", + dest="only_issues", + help="Only show actual issues", + ) + + opts, args = parser.parse_args() + if not len(args): + parser.error("Please specify the number of seconds since you want updates") + + since = datetime.utcnow() - timedelta(seconds=int(args[0])) + gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) + repo = gh.get_repo(opts.repository) + + if opts.only_tests_pending: + queried_labels = [] + queried_labels.append(repo.get_label("tests-pending")) + issues = repo.get_issues(state="open", labels=queried_labels, sort="updated", since=since) + else: + label = [repo.get_label("release-build-request")] + issues = repo.get_issues(state="open", sort="updated", since=since, labels=label) + + if opts.only_issues: + issues = [i for i in issues if not i.pull_request] + + print(" ".join([str(x.number) for x in issues])) diff --git a/reco_profiling/profileRunner.py b/reco_profiling/profileRunner.py index 0dad27a7d718..bb8888b36804 100644 --- a/reco_profiling/profileRunner.py +++ b/reco_profiling/profileRunner.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -#Profile runner for reco releases -#maintained by the CMS reco group +# Profile runner for reco releases +# maintained by the CMS reco group import subprocess import glob import sys @@ -8,7 +8,7 @@ import shutil workflow_configs = { - #Run3 HI workflow + # Run3 HI workflow "159.03": { "num_events": 100, "steps": { @@ -22,11 +22,11 @@ "FastTimer": True, "igprof": True, }, - }, + }, "nThreads": 1, - "matrix": "standard" + "matrix": "standard", }, - #Run3 workflow + # Run3 workflow "11834.21": { "num_events": 400, "steps": { @@ -45,11 +45,11 @@ "FastTimer": True, "igprof": True, }, - }, + }, "nThreads": 1, - "matrix": "upgrade" + "matrix": "upgrade", }, - #Phase2 workflow used in mid-2021 + # Phase2 workflow used in mid-2021 "23434.21": { "num_events": 100, "steps": { @@ -68,11 +68,11 @@ "FastTimer": True, "igprof": True, }, - }, + }, "nThreads": 1, - "matrix": "upgrade" + "matrix": "upgrade", }, - #Phase2 workflow used in late-2021 + # Phase2 workflow used in late-2021 "34834.21": { "num_events": 100, "steps": { @@ -86,11 +86,11 @@ "FastTimer": True, "igprof": True, }, - }, + }, "nThreads": 1, - "matrix": "upgrade" - } , - #Phase2 workflow used in early-2022 + "matrix": "upgrade", + }, + # Phase2 workflow used in early-2022 "35234.21": { "num_events": 100, "steps": { @@ -104,11 +104,11 @@ "FastTimer": True, "igprof": True, }, - }, + }, "nThreads": 1, - "matrix": "upgrade" - } , - #Phase2 workflow used in mid-2022 + "matrix": "upgrade", + }, + # Phase2 workflow used in mid-2022 "39634.21": { "num_events": 100, "steps": { @@ -122,11 +122,11 @@ "FastTimer": True, "igprof": True, }, - }, + }, "nThreads": 1, - "matrix": "upgrade" - } , - #Phase2 workflow renumbered in late-2022 + "matrix": "upgrade", + }, + # Phase2 workflow renumbered in late-2022 "21034.21": { "num_events": 100, "steps": { @@ -140,11 +140,11 @@ "FastTimer": True, "igprof": True, }, - }, + }, "nThreads": 1, - "matrix": "upgrade" - } , - #Phase2 workflow used begin-2023 + "matrix": "upgrade", + }, + # Phase2 workflow used begin-2023 "23834.21": { "num_events": 100, "steps": { @@ -158,11 +158,11 @@ "FastTimer": True, "igprof": True, }, - }, + }, "nThreads": 1, - "matrix": "upgrade" - } , - #Phase2 workflow used late-2023 + "matrix": "upgrade", + }, + # Phase2 workflow used late-2023 "25034.21": { "num_events": 100, "steps": { @@ -176,11 +176,11 @@ "FastTimer": True, "igprof": True, }, - }, + }, "nThreads": 1, - "matrix": "upgrade" - } , - #8-thread T0-like promptreco workflow + "matrix": "upgrade", + }, + # 8-thread T0-like promptreco workflow "136.889": { "num_events": 5000, "steps": { @@ -189,11 +189,11 @@ "FastTimer": False, "igprof": False, }, - }, + }, "nThreads": 8, - "matrix": "standard" + "matrix": "standard", }, - #2018 HI T0-like workflow + # 2018 HI T0-like workflow "140.56": { "num_events": 1000, "steps": { @@ -202,29 +202,31 @@ "FastTimer": False, "igprof": False, }, - }, + }, "nThreads": 8, - "matrix": "standard" + "matrix": "standard", }, } -#Prepare cmdLog and execute the workflow steps to get e.g. DAS entries, but call cmsRun with --no_exec + +# Prepare cmdLog and execute the workflow steps to get e.g. DAS entries, but call cmsRun with --no_exec def prepareMatrixWF(workflow_number, num_events, matrix="upgrade", nthreads=1): cmd = [ - "runTheMatrix.py", - "-w", - matrix, - "-l", - str(workflow_number), - "--command=\"--no_exec\"", - "--ibeos", - "--nThreads", - str(nthreads), + "runTheMatrix.py", + "-w", + matrix, + "-l", + str(workflow_number), + '--command="--no_exec"', + "--ibeos", + "--nThreads", + str(nthreads), ] cmd = " ".join(cmd) os.system(cmd) -#extracts the cmsdriver lines from the cmdLog + +# extracts the cmsdriver lines from the cmdLog def parseCmdLog(filename): cmsdriver_lines = [] with open(filename) as fi: @@ -235,8 +237,10 @@ def parseCmdLog(filename): cmsdriver_lines.append(stripPipe(line)) return cmsdriver_lines + def stripPipe(cmsdriver_line): - return cmsdriver_line[:cmsdriver_line.index(">")] + return cmsdriver_line[: cmsdriver_line.index(">")] + def getWFDir(workflow_number): dirs = list(glob.glob("{}_*".format(workflow_number))) @@ -244,6 +248,7 @@ def getWFDir(workflow_number): return None return dirs[0] + def wrapInRetry(cmd): s = """n=0 until [ "$n" -ge 10 ] @@ -251,27 +256,51 @@ def wrapInRetry(cmd): echo "attempt $n" {} && break n=$((n+1)) -done""".format(cmd) +done""".format( + cmd + ) return s + def echoBefore(cmd, msg): s = """ echo "{}" {} -""".format(msg, cmd) +""".format( + msg, cmd + ) return s + def prepTimeMemoryInfo(cmd, istep): - cmd_tmi = cmd + " --customise=Validation/Performance/TimeMemoryInfo.py &> step{}_TimeMemoryInfo.log".format(istep) + cmd_tmi = ( + cmd + + " --customise=Validation/Performance/TimeMemoryInfo.py &> step{}_TimeMemoryInfo.log".format( + istep + ) + ) return cmd_tmi + def prepFastTimer(cmd, istep): - cmd_ft = cmd + " --customise HLTrigger/Timer/FastTimer.customise_timer_service_singlejob --customise_commands \"process.FastTimerService.writeJSONSummary=True;process.FastTimerService.jsonFileName=\\\"step{istep}_circles.json\\\"\" &> step{istep}_FastTimerService.log".format(istep=istep) + cmd_ft = ( + cmd + + ' --customise HLTrigger/Timer/FastTimer.customise_timer_service_singlejob --customise_commands "process.FastTimerService.writeJSONSummary=True;process.FastTimerService.jsonFileName=\\"step{istep}_circles.json\\"" &> step{istep}_FastTimerService.log'.format( + istep=istep + ) + ) return cmd_ft + def prepIgprof(cmd, istep): - cmd_ig = cmd + " --customise Validation/Performance/IgProfInfo.customise --no_exec --python_filename step{istep}_igprof.py &> step{istep}_igprof_conf.txt".format(istep=istep) - return cmd_ig + cmd_ig = ( + cmd + + " --customise Validation/Performance/IgProfInfo.customise --no_exec --python_filename step{istep}_igprof.py &> step{istep}_igprof_conf.txt".format( + istep=istep + ) + ) + return cmd_ig + def configureProfilingSteps(cmsdriver_lines, num_events, steps_config): igprof_exe = "igprof" @@ -279,125 +308,145 @@ def configureProfilingSteps(cmsdriver_lines, num_events, steps_config): steps = {} for line in cmsdriver_lines: spl = line.split()[1] - #step1 has the format `cmsDriver.py fragment`, otherwise it's `cmsDriver.py stepN` + # step1 has the format `cmsDriver.py fragment`, otherwise it's `cmsDriver.py stepN` if "step" in spl: istep = int(spl.replace("step", "")) else: istep = 1 - steps[istep] = line + " -n {num_events} --suffix \"-j step{istep}_JobReport.xml\"".format(istep=istep, num_events=num_events) + steps[istep] = line + ' -n {num_events} --suffix "-j step{istep}_JobReport.xml"'.format( + istep=istep, num_events=num_events + ) steps_to_run = list(sorted(steps.keys())) - outfiles = [ - "step{}_JobReport.xml".format(istep) for istep in steps_to_run - ] - outfiles += [ - "step{}.root".format(istep) for istep in steps_to_run - ] - outfiles += [ - "step{}.log".format(istep) for istep in steps_to_run - ] + outfiles = ["step{}_JobReport.xml".format(istep) for istep in steps_to_run] + outfiles += ["step{}.root".format(istep) for istep in steps_to_run] + outfiles += ["step{}.log".format(istep) for istep in steps_to_run] - #First run all the steps without any special options - new_cmdlist = [steps[istep]+"&>step{istep}.log".format(istep=istep) for istep in steps_to_run] + # First run all the steps without any special options + new_cmdlist = [ + steps[istep] + "&>step{istep}.log".format(istep=istep) for istep in steps_to_run + ] igprof_commands = [] for step_name in steps_config.keys(): istep = int(step_name.replace("step", "")) step = steps[istep] - #strip the JobReport from the step command - step = step[:step.index("--suffix")-1] + # strip the JobReport from the step command + step = step[: step.index("--suffix") - 1] if steps_config[step_name]["TimeMemoryInfo"]: step_tmi = prepTimeMemoryInfo(step, istep) outfiles += ["step{}_TimeMemoryInfo.log".format(istep)] - new_cmdlist += [ - echoBefore(step_tmi, "step{istep} TimeMemoryInfo".format(istep=istep)) - ] + new_cmdlist += [echoBefore(step_tmi, "step{istep} TimeMemoryInfo".format(istep=istep))] if steps_config[step_name]["FastTimer"]: step_ft = prepFastTimer(step, istep) - outfiles += ["step{}_FastTimerService.log".format(istep), "step{}_circles.json".format(istep)] + outfiles += [ + "step{}_FastTimerService.log".format(istep), + "step{}_circles.json".format(istep), + ] new_cmdlist += [ echoBefore(step_ft, "step{istep} FastTimer".format(istep=istep)), ] if steps_config[step_name]["igprof"]: step_ig = prepIgprof(step, istep) - new_cmdlist += [ - echoBefore(step_ig, "step{istep} IgProf conf".format(istep=istep)) - ] - - igprof_pp = wrapInRetry(igprof_exe + " -d -pp -z -o step{istep}_igprofCPU.gz -t cmsRun cmsRun step{istep}_igprof.py &> step{istep}_igprof_cpu.txt".format(istep=istep)) - igprof_mp = wrapInRetry(igprof_exe + " -d -mp -z -o step{istep}_igprofMEM.gz -t cmsRunGlibC cmsRunGlibC step{istep}_igprof.py &> step{istep}_igprof_mem.txt".format(istep=istep)) + new_cmdlist += [echoBefore(step_ig, "step{istep} IgProf conf".format(istep=istep))] + + igprof_pp = wrapInRetry( + igprof_exe + + " -d -pp -z -o step{istep}_igprofCPU.gz -t cmsRun cmsRun step{istep}_igprof.py &> step{istep}_igprof_cpu.txt".format( + istep=istep + ) + ) + igprof_mp = wrapInRetry( + igprof_exe + + " -d -mp -z -o step{istep}_igprofMEM.gz -t cmsRunGlibC cmsRunGlibC step{istep}_igprof.py &> step{istep}_igprof_mem.txt".format( + istep=istep + ) + ) outfiles += [ - "step{istep}_igprof_cpu.txt".format(istep=istep), - "step{istep}_igprof_mem.txt".format(istep=istep) + "step{istep}_igprof_cpu.txt".format(istep=istep), + "step{istep}_igprof_mem.txt".format(istep=istep), ] - + igprof_commands += [ echoBefore(igprof_pp, "step{istep} IgProf pp".format(istep=istep)), "mv IgProf.1.gz step{istep}_igprofCPU.1.gz".format(istep=istep), - "mv IgProf.{nev}.gz step{istep}_igprofCPU.{nev}.gz".format(nev=int(num_events/2), istep=istep), - "mv IgProf.{nev}.gz step{istep}_igprofCPU.{nev}.gz".format(nev=int(num_events-1), istep=istep), + "mv IgProf.{nev}.gz step{istep}_igprofCPU.{nev}.gz".format( + nev=int(num_events / 2), istep=istep + ), + "mv IgProf.{nev}.gz step{istep}_igprofCPU.{nev}.gz".format( + nev=int(num_events - 1), istep=istep + ), echoBefore(igprof_mp, "step{istep} IgProf mp".format(istep=istep)), "mv IgProf.1.gz step{istep}_igprofMEM.1.gz".format(istep=istep), - "mv IgProf.{nev}.gz step{istep}_igprofMEM.{nev}.gz".format(nev=int(num_events/2), istep=istep), - "mv IgProf.{nev}.gz step{istep}_igprofMEM.{nev}.gz".format(nev=int(num_events-1), istep=istep), + "mv IgProf.{nev}.gz step{istep}_igprofMEM.{nev}.gz".format( + nev=int(num_events / 2), istep=istep + ), + "mv IgProf.{nev}.gz step{istep}_igprofMEM.{nev}.gz".format( + nev=int(num_events - 1), istep=istep + ), ] outfiles += [ - "step{istep}_igprofCPU.{nev}.gz".format(istep=istep, nev=nev) for nev in [1,int(num_events/2), int(num_events-1)] + "step{istep}_igprofCPU.{nev}.gz".format(istep=istep, nev=nev) + for nev in [1, int(num_events / 2), int(num_events - 1)] ] outfiles += [ - "step{istep}_igprofMEM.{nev}.gz".format(istep=istep, nev=nev) for nev in [1,int(num_events/2), int(num_events-1)] - ] - outfiles += [ - "step{istep}_igprofCPU.gz".format(istep=istep) + "step{istep}_igprofMEM.{nev}.gz".format(istep=istep, nev=nev) + for nev in [1, int(num_events / 2), int(num_events - 1)] ] + outfiles += ["step{istep}_igprofCPU.gz".format(istep=istep)] new_cmdlist = new_cmdlist + igprof_commands return new_cmdlist, outfiles + def writeProfilingScript(wfdir, runscript, cmdlist): runscript_path = "{}/{}".format(wfdir, runscript) with open(runscript_path, "w") as fi: fi.write("#!/bin/sh\n") - #for 12_3_0_pre3,pre4 - #fi.write("scram setup /cvmfs/cms.cern.ch/slc7_amd64_gcc10/cms/cmssw-tool-conf/52.0-904e6a6e16dcc9bdba60a5fd496e4237/tools/selected/libunwind.xml\n") + # for 12_3_0_pre3,pre4 + # fi.write("scram setup /cvmfs/cms.cern.ch/slc7_amd64_gcc10/cms/cmssw-tool-conf/52.0-904e6a6e16dcc9bdba60a5fd496e4237/tools/selected/libunwind.xml\n") - #this is required for igprof + # this is required for igprof fi.write("ulimit -a\n") - #don't abort on error - #fi.write("set -e\n") - - #print commands verbosely + # don't abort on error + # fi.write("set -e\n") + + # print commands verbosely fi.write("set -x\n") - # ensure that compiler include paths are added to ROOT_INCLUDE_PATH - fi.write("for path in $(LC_ALL=C g++ -xc++ -E -v /dev/null 2>&1 | sed -n -e '/^.include/,${' -e '/^ \/.*++/p' -e '}');do ROOT_INCLUDE_PATH=$path:$ROOT_INCLUDE_PATH; done") + # ensure that compiler include paths are added to ROOT_INCLUDE_PATH + fi.write( + "for path in $(LC_ALL=C g++ -xc++ -E -v /dev/null 2>&1 | sed -n -e '/^.include/,${' -e '/^ \/.*++/p' -e '}');do ROOT_INCLUDE_PATH=$path:$ROOT_INCLUDE_PATH; done" + ) fi.write("\n") fi.write("") fi.write("\n") for cmd in cmdlist: - fi.write(cmd + '\n') + fi.write(cmd + "\n") return + def runProfiling(wfdir, runscript): os.chdir(wfdir) os.system("chmod +x {}".format(runscript)) os.system("bash {}".format(runscript)) os.chdir("..") + def copyProfilingOutputs(wfdir, out_dir, outfiles): for output in outfiles: path = "{}/{}".format(wfdir, output) - #check that all outputs exists and are of nonzero size + # check that all outputs exists and are of nonzero size if os.path.isfile(path) and os.stat(path).st_size > 0: print("copying {} to {}".format(path, out_dir)) shutil.copy(path, out_dir) @@ -405,17 +454,25 @@ def copyProfilingOutputs(wfdir, out_dir, outfiles): print("ERROR: Output {} not found or is broken, skipping".format(path)) return + def main(wf, num_events, out_dir): wfdir = getWFDir(wf) - + if not (wfdir is None): print("Output directory {} exists, aborting".format(wfdir)) sys.exit(1) - prepareMatrixWF(wf, num_events, matrix=workflow_configs[wf]["matrix"], nthreads=workflow_configs[wf]["nThreads"]) + prepareMatrixWF( + wf, + num_events, + matrix=workflow_configs[wf]["matrix"], + nthreads=workflow_configs[wf]["nThreads"], + ) wfdir = getWFDir(wf) cmsdriver_lines = parseCmdLog("{}/cmdLog".format(wfdir)) - new_cmdlist, outfiles = configureProfilingSteps(cmsdriver_lines, num_events, workflow_configs[wf]["steps"]) + new_cmdlist, outfiles = configureProfilingSteps( + cmsdriver_lines, num_events, workflow_configs[wf]["steps"] + ) runscript = "cmdLog_profiling.sh" outfiles += ["cmdLog_profiling.sh"] @@ -423,20 +480,32 @@ def main(wf, num_events, out_dir): runProfiling(wfdir, runscript) copyProfilingOutputs(wfdir, out_dir, outfiles) + def parse_args(): import argparse + parser = argparse.ArgumentParser() - parser.add_argument("--workflow", type=str, default="35234.21", help="The workflow to use for profiling") - parser.add_argument("--num-events", type=int, default=-1, help="Number of events to use, -1 to use the default") - parser.add_argument("--out-dir", type=str, help="The output directory where to copy the profiling results", required=True) + parser.add_argument( + "--workflow", type=str, default="35234.21", help="The workflow to use for profiling" + ) + parser.add_argument( + "--num-events", type=int, default=-1, help="Number of events to use, -1 to use the default" + ) + parser.add_argument( + "--out-dir", + type=str, + help="The output directory where to copy the profiling results", + required=True, + ) args = parser.parse_args() - if args.num_events==-1: + if args.num_events == -1: args.num_events = workflow_configs[args.workflow]["num_events"] return args + if __name__ == "__main__": args = parse_args() - + os.makedirs(args.out_dir) main(args.workflow, args.num_events, args.out_dir) diff --git a/release-notes b/release-notes deleted file mode 100755 index e041540a2770..000000000000 --- a/release-notes +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env python3 -from optparse import OptionParser -from os.path import exists,expanduser,join -from _py2with3compatibility import run_cmd, Request, urlopen, HTTPError -from github import Github -import json -from sys import exit -import re -from cms_static import GH_CMSSW_REPO, GH_CMSDIST_REPO, GH_CMSSW_ORGANIZATION -from github_utils import prs2relnotes, get_merge_prs, get_release_by_tag -from socket import setdefaulttimeout -from categories import get_dpg_pog -setdefaulttimeout(120) -CMSDIST_REPO_NAME=join(GH_CMSSW_ORGANIZATION, GH_CMSDIST_REPO) -CMSSW_REPO_NAME=join(GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO) - - -def format(s, **kwds): return s % kwds -#--------------------------------------------------------- -# pyGithub -#-------------------------------------------------------- - -# -#defines the categories for each pr in the release notes -# -def add_categories_notes(notes, cache): - dpg_pog_labels = get_dpg_pog() - for pr_number in notes: - categories = [ l.split('-')[0] for l in cache[pr_number]['pr']['labels'] - if (re.match("^[a-zA-Z0-9]+[-](approved|pending|hold|rejected)$", l) - and not re.match('^(tests|orp)-' , l)) or l in dpg_pog_labels ] - if len(categories) == 0: - print("no categories for:", pr_number) - else: - print("Labels for %s: %s" % (pr_number, categories)) - note = notes[pr_number] - for cat in categories: - note += " `%s`" % cat - - if 'release-notes' in cache[pr_number]['pr']: - rel_notes = "\n".join(cache[pr_number]['pr']['release-notes']) - note = note + rel_notes - notes[pr_number] = note - return notes - -def get_cmssw_notes( previous_release , this_release, cache ): - if not exists("cmssw.git"): - error, out = run_cmd("git clone --bare --reference /cvmfs/cms-ib.cern.ch/git/cms-sw/cmssw.git git@github.com:cms-sw/cmssw.git") - if error: parser.error("Error while checking out the repository:\n" + out) - run_cmd("GIT_DIR=cmssw.git git fetch --all --tags") - return prs2relnotes(get_merge_prs(previous_release, this_release, "cmssw.git", "cms-prs", cache)) - -# -# gets the changes in cmsdist, production architecture is the production architecture of the release -# -def get_cmsdist_notes( prev_cmsdist_tag , curr_cmsdist_tag, cache ): - if not exists("cmsdist.git"): - error, out = run_cmd("git clone --bare git@github.com:cms-sw/cmsdist.git") - if error: parser.error("Error while checking out the cmsdist repository:\n" + out) - run_cmd("GIT_DIR=cmsdist.git git fetch --all --tags") - return prs2relnotes(get_merge_prs(prev_cmsdist_tag, curr_cmsdist_tag, "cmsdist.git", "cms-prs", cache), "cms-sw/cmsdist") - -# -# returns the comparison url to include in the notes -# -def get_comparison_url( previous_tag , current_tag , repo ): - return COMPARISON_URL % ( repo , previous_tag , current_tag ) - -#-------------------------------------------------------------------------------- -# Start of Execution -#-------------------------------------------------------------------------------- - -COMPARISON_URL = 'https://github.com/cms-sw/%s/compare/%s...%s' - -if __name__ == "__main__": - parser = OptionParser(usage="%(progname) ") - parser.add_option("-n", "--dry-run", help="Only print out release notes. Do not execute.", - dest="dryRun", default=False, action="store_true") - opts, args = parser.parse_args() - - if len(args) != 4: - parser.error("Wrong number or arguments") - prev_release = args[0] - curr_release = args[1] - prev_cmsdist_tag = args[2] - curr_cmsdist_tag = args[3] - - - #--------------------------------- - # pyGithub intialization - #--------------------------------- - - token = open(expanduser("~/.github-token")).read().strip() - github = Github( login_or_token = token ) - CMSSW_REPO = github.get_repo(CMSSW_REPO_NAME) - CMSDIST_REPO = github.get_repo(CMSDIST_REPO_NAME) - - if not exists("cms-prs"): - error, out = run_cmd("git clone --depth 1 git@github.com:cms-sw/cms-prs") - if error: parser.error("Error while checking out cms-sw/cms-prs repository:\n" + out) - cmssw_cache = {} - cmsdist_cache = {} - cmssw_notes = get_cmssw_notes( prev_release , curr_release, cmssw_cache) - cmsdist_notes = get_cmsdist_notes( prev_cmsdist_tag , curr_cmsdist_tag, cmsdist_cache) - - cmssw_notes = add_categories_notes(cmssw_notes, cmssw_cache) - cmssw_notes_str = "" - cmsdist_notes_str = "" - for pr in sorted(list(cmssw_notes.keys()), reverse=True): - cmssw_notes_str += cmssw_notes[pr]+'\n' - for pr in sorted(list(cmsdist_notes.keys()), reverse=True): - cmsdist_notes_str += cmsdist_notes[pr]+'\n' - header = "#### Changes since %s:\n%s\n" % \ - ( prev_release , get_comparison_url( prev_release, curr_release , 'cmssw' ) ) - cmsdist_header = "\n#### CMSDIST Changes between Tags %s and %s:\n%s\n" % \ - ( prev_cmsdist_tag , curr_cmsdist_tag , get_comparison_url( prev_cmsdist_tag, curr_cmsdist_tag , 'cmsdist' ) ) - - try: - release = get_release_by_tag("cms-sw/cmssw", curr_release) - url = "https://api.github.com/repos/cms-sw/cmssw/releases/%s" % release['id'] - if not opts.dryRun: - request = Request(url, headers={"Authorization" : "token " + token}) - request.get_method = lambda: 'PATCH' - print("Modifying release notes for %s at %s" % (curr_release, url)) - print(urlopen(request, json.dumps({"body": header + cmssw_notes_str + cmsdist_header + cmsdist_notes_str }).encode()).read()) - else: - print(header) - print(cmssw_notes_str) - print(cmsdist_header) - print(cmsdist_notes_str) - print("--dry-run specified, quitting without modifying release.") - print('ALL_OK') - exit(0) - except HTTPError as e: - print(e) - print("Release %s not found." % curr_release) - exit (1) diff --git a/release-notes b/release-notes new file mode 120000 index 000000000000..b0707129c09e --- /dev/null +++ b/release-notes @@ -0,0 +1 @@ +release-notes.py \ No newline at end of file diff --git a/release-notes.py b/release-notes.py new file mode 100755 index 000000000000..4f171efd9274 --- /dev/null +++ b/release-notes.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python3 +from optparse import OptionParser +from os.path import exists, expanduser, join +from _py2with3compatibility import run_cmd, Request, urlopen, HTTPError +from github import Github +import json +from sys import exit +import re +from cms_static import GH_CMSSW_REPO, GH_CMSDIST_REPO, GH_CMSSW_ORGANIZATION +from github_utils import prs2relnotes, get_merge_prs, get_release_by_tag +from socket import setdefaulttimeout +from categories import get_dpg_pog + +setdefaulttimeout(120) +CMSDIST_REPO_NAME = join(GH_CMSSW_ORGANIZATION, GH_CMSDIST_REPO) +CMSSW_REPO_NAME = join(GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO) + + +def format(s, **kwds): + return s % kwds + + +# --------------------------------------------------------- +# pyGithub +# -------------------------------------------------------- + + +# +# defines the categories for each pr in the release notes +# +def add_categories_notes(notes, cache): + dpg_pog_labels = get_dpg_pog() + for pr_number in notes: + categories = [ + l.split("-")[0] + for l in cache[pr_number]["pr"]["labels"] + if ( + re.match("^[a-zA-Z0-9]+[-](approved|pending|hold|rejected)$", l) + and not re.match("^(tests|orp)-", l) + ) + or l in dpg_pog_labels + ] + if len(categories) == 0: + print("no categories for:", pr_number) + else: + print("Labels for %s: %s" % (pr_number, categories)) + note = notes[pr_number] + for cat in categories: + note += " `%s`" % cat + + if "release-notes" in cache[pr_number]["pr"]: + rel_notes = "\n".join(cache[pr_number]["pr"]["release-notes"]) + note = note + rel_notes + notes[pr_number] = note + return notes + + +def get_cmssw_notes(previous_release, this_release, cache): + if not exists("cmssw.git"): + error, out = run_cmd( + "git clone --bare --reference /cvmfs/cms-ib.cern.ch/git/cms-sw/cmssw.git git@github.com:cms-sw/cmssw.git" + ) + if error: + parser.error("Error while checking out the repository:\n" + out) + run_cmd("GIT_DIR=cmssw.git git fetch --all --tags") + return prs2relnotes( + get_merge_prs(previous_release, this_release, "cmssw.git", "cms-prs", cache) + ) + + +# +# gets the changes in cmsdist, production architecture is the production architecture of the release +# +def get_cmsdist_notes(prev_cmsdist_tag, curr_cmsdist_tag, cache): + if not exists("cmsdist.git"): + error, out = run_cmd("git clone --bare git@github.com:cms-sw/cmsdist.git") + if error: + parser.error("Error while checking out the cmsdist repository:\n" + out) + run_cmd("GIT_DIR=cmsdist.git git fetch --all --tags") + return prs2relnotes( + get_merge_prs(prev_cmsdist_tag, curr_cmsdist_tag, "cmsdist.git", "cms-prs", cache), + "cms-sw/cmsdist", + ) + + +# +# returns the comparison url to include in the notes +# +def get_comparison_url(previous_tag, current_tag, repo): + return COMPARISON_URL % (repo, previous_tag, current_tag) + + +# -------------------------------------------------------------------------------- +# Start of Execution +# -------------------------------------------------------------------------------- + +COMPARISON_URL = "https://github.com/cms-sw/%s/compare/%s...%s" + +if __name__ == "__main__": + parser = OptionParser( + usage="%(progname) " + ) + parser.add_option( + "-n", + "--dry-run", + help="Only print out release notes. Do not execute.", + dest="dryRun", + default=False, + action="store_true", + ) + opts, args = parser.parse_args() + + if len(args) != 4: + parser.error("Wrong number or arguments") + prev_release = args[0] + curr_release = args[1] + prev_cmsdist_tag = args[2] + curr_cmsdist_tag = args[3] + + # --------------------------------- + # pyGithub intialization + # --------------------------------- + + token = open(expanduser("~/.github-token")).read().strip() + github = Github(login_or_token=token) + CMSSW_REPO = github.get_repo(CMSSW_REPO_NAME) + CMSDIST_REPO = github.get_repo(CMSDIST_REPO_NAME) + + if not exists("cms-prs"): + error, out = run_cmd("git clone --depth 1 git@github.com:cms-sw/cms-prs") + if error: + parser.error("Error while checking out cms-sw/cms-prs repository:\n" + out) + cmssw_cache = {} + cmsdist_cache = {} + cmssw_notes = get_cmssw_notes(prev_release, curr_release, cmssw_cache) + cmsdist_notes = get_cmsdist_notes(prev_cmsdist_tag, curr_cmsdist_tag, cmsdist_cache) + + cmssw_notes = add_categories_notes(cmssw_notes, cmssw_cache) + cmssw_notes_str = "" + cmsdist_notes_str = "" + for pr in sorted(list(cmssw_notes.keys()), reverse=True): + cmssw_notes_str += cmssw_notes[pr] + "\n" + for pr in sorted(list(cmsdist_notes.keys()), reverse=True): + cmsdist_notes_str += cmsdist_notes[pr] + "\n" + header = "#### Changes since %s:\n%s\n" % ( + prev_release, + get_comparison_url(prev_release, curr_release, "cmssw"), + ) + cmsdist_header = "\n#### CMSDIST Changes between Tags %s and %s:\n%s\n" % ( + prev_cmsdist_tag, + curr_cmsdist_tag, + get_comparison_url(prev_cmsdist_tag, curr_cmsdist_tag, "cmsdist"), + ) + + try: + release = get_release_by_tag("cms-sw/cmssw", curr_release) + url = "https://api.github.com/repos/cms-sw/cmssw/releases/%s" % release["id"] + if not opts.dryRun: + request = Request(url, headers={"Authorization": "token " + token}) + request.get_method = lambda: "PATCH" + print("Modifying release notes for %s at %s" % (curr_release, url)) + print( + urlopen( + request, + json.dumps( + {"body": header + cmssw_notes_str + cmsdist_header + cmsdist_notes_str} + ).encode(), + ).read() + ) + else: + print(header) + print(cmssw_notes_str) + print(cmsdist_header) + print(cmsdist_notes_str) + print("--dry-run specified, quitting without modifying release.") + print("ALL_OK") + exit(0) + except HTTPError as e: + print(e) + print("Release %s not found." % curr_release) + exit(1) diff --git a/release_notes_collection.py b/release_notes_collection.py index 05cc85687fc9..1e73d2479c8f 100755 --- a/release_notes_collection.py +++ b/release_notes_collection.py @@ -8,118 +8,190 @@ from hashlib import md5 import time -RX_RELEASE = re.compile('CMSSW_(\d+)_(\d+)_(\d+)(_pre[0-9]+)*(_cand[0-9]+)*(_patch[0-9]+)*') +RX_RELEASE = re.compile("CMSSW_(\d+)_(\d+)_(\d+)(_pre[0-9]+)*(_cand[0-9]+)*(_patch[0-9]+)*") RX_AUTHOR = re.compile("(.*)(@[a-zA-Z-_0-9]+)") RX_COMPARE = re.compile("(https://github.*compare.*\.\.\..*)") -RX_COMMIT = re.compile("^-\s+(:arrow_right:\s*|)([^/]+\/[^/]+|)\#(\d{0,5})( from.*)") +RX_COMMIT = re.compile("^-\s+(:arrow_right:\s*|)([^/]+\/[^/]+|)\#(\d{0,5})( from.*)") -Release = namedtuple("Release", ["major", "minor", "subminor", "pre", "cand", "patch","published_at"]) +Release = namedtuple( + "Release", ["major", "minor", "subminor", "pre", "cand", "patch", "published_at"] +) DEBUG = True + def head(title, release): - rel_link=title.replace("CMSSW_","") - ret = "---\n" - ret += "layout: post\n" - ret += 'rel_link: "{rel_link}"\n'.format(rel_link=rel_link) - ret += 'title: "{title}"\n'.format(title=title) - ret += "date: {published_at}\n".format(published_at=time.strftime("%Y-%m-%d %H:%M:%S",time.strptime(release.published_at,"%Y-%m-%dT%H:%M:%SZ"))) - ret += "categories: cmssw\n" - ret += "relmajor: {major}\n".format(major=release.major) - ret += "relminor: {minor}\n".format(minor=release.minor) - ret += "relsubminor: {subminor}\n".format(subminor=release.subminor) - if release.pre: - ret += "relpre: {pre}\n".format(pre=release.pre) - if release.cand: - ret += "relcand: {cand}\n".format(cand=release.cand) - if release.patch: - ret += "relpatch: {patch}\n".format(patch=release.patch) - ret += "---\n\n" - return ret + rel_link = title.replace("CMSSW_", "") + ret = "---\n" + ret += "layout: post\n" + ret += 'rel_link: "{rel_link}"\n'.format(rel_link=rel_link) + ret += 'title: "{title}"\n'.format(title=title) + ret += "date: {published_at}\n".format( + published_at=time.strftime( + "%Y-%m-%d %H:%M:%S", time.strptime(release.published_at, "%Y-%m-%dT%H:%M:%SZ") + ) + ) + ret += "categories: cmssw\n" + ret += "relmajor: {major}\n".format(major=release.major) + ret += "relminor: {minor}\n".format(minor=release.minor) + ret += "relsubminor: {subminor}\n".format(subminor=release.subminor) + if release.pre: + ret += "relpre: {pre}\n".format(pre=release.pre) + if release.cand: + ret += "relcand: {cand}\n".format(cand=release.cand) + if release.patch: + ret += "relpatch: {patch}\n".format(patch=release.patch) + ret += "---\n\n" + return ret + def get_pr(pr, repo, cmsprs): - pr_md5 = md5((pr+"\n").encode()).hexdigest() - pr_cache = join(cmsprs,repo,pr_md5[0:2],pr_md5[2:]+".json") - if exists(pr_cache): return json.load(open(pr_cache)) - return {} + pr_md5 = md5((pr + "\n").encode()).hexdigest() + pr_cache = join(cmsprs, repo, pr_md5[0:2], pr_md5[2:] + ".json") + if exists(pr_cache): + return json.load(open(pr_cache)) + return {} + def getReleasesNotes(opts): - get_gh_token(token_file=expanduser("~/.github-token-cmsbot")) - notes = [] - error_releases = {} - print("Reading releases page") - rel_opt="" - if opts.release: rel_opt="/tags/%s" % opts.release - releases=github_api("/repos/%s/releases%s" % (opts.repository, rel_opt), method="GET") - if opts.release: releases = [releases] - for release in releases: - rel_name = release['name'] - rel_id = str(release['id']) - print("Checking release", rel_name) - if " " in rel_name: - error_releases[rel_name]="Space in name:"+rel_id - print(" Skipping release (contains space in name):",rel_name) - continue - rel_cyc = "_".join(rel_name.split("_")[0:2]) - rel_numbers = re.match(RX_RELEASE, rel_name) - if not rel_numbers: - error_releases[rel_name]="Does not match release regexp:"+rel_id - print(" Skipping release (does not match release regexp):",rel_name) - continue - if (not 'body' in release) or (not release['body']): - error_releases[rel_name]="Empty release body message:"+rel_id - print(" Skipping release (empty release body message):",rel_name) - continue - if not re.match('^%s$' % opts.release_filter, rel_name): - print(" Skipping release (release does not match filter):",rel_name) - continue - rel_file = join(opts.release_notes_dir,rel_cyc,"%s.md" % rel_name) - if (not opts.force) and exists(rel_file): - print(" Skipping release (already exists):",rel_name) - continue - release_notes = [] - prepo = "" - count = 0 - forward_port_sym = '' - for line in release['body'].encode("ascii", "ignore").decode().split('\n'): - line = re.sub(RX_AUTHOR, '\\1**\\2**', line) - m = RX_COMMIT.match(line) - if m: - repo = opts.repository - forward_port = "" - if m.group(1): forward_port = forward_port_sym - if m.group(2): repo = m.group(2) - if repo != prepo: count = 0 - prepo=repo - count+=1 - line = '\n{count}. {forward_port}[{pr}](http://github.com/{repo}/pull/{pr})'.format(forward_port=forward_port,count=count,repo=repo,pr=m.group(3))+'{:target="_blank"} '+m.group(4) - pr = get_pr(m.group(3), repo, opts.prs_dir) - print(" PR found: "+repo+"#"+m.group(3)) - if 'created_at' in pr:line+=" created: "+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(float(pr['created_at']))) - if 'merged_at' in pr:line+=" merged: "+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(float(pr['merged_at']))) - elif RX_COMPARE.match(line): - line = re.sub(RX_COMPARE, '[compare to previous](\\1)\n\n', line) + get_gh_token(token_file=expanduser("~/.github-token-cmsbot")) + notes = [] + error_releases = {} + print("Reading releases page") + rel_opt = "" + if opts.release: + rel_opt = "/tags/%s" % opts.release + releases = github_api("/repos/%s/releases%s" % (opts.repository, rel_opt), method="GET") + if opts.release: + releases = [releases] + for release in releases: + rel_name = release["name"] + rel_id = str(release["id"]) + print("Checking release", rel_name) + if " " in rel_name: + error_releases[rel_name] = "Space in name:" + rel_id + print(" Skipping release (contains space in name):", rel_name) + continue + rel_cyc = "_".join(rel_name.split("_")[0:2]) + rel_numbers = re.match(RX_RELEASE, rel_name) + if not rel_numbers: + error_releases[rel_name] = "Does not match release regexp:" + rel_id + print(" Skipping release (does not match release regexp):", rel_name) + continue + if (not "body" in release) or (not release["body"]): + error_releases[rel_name] = "Empty release body message:" + rel_id + print(" Skipping release (empty release body message):", rel_name) + continue + if not re.match("^%s$" % opts.release_filter, rel_name): + print(" Skipping release (release does not match filter):", rel_name) + continue + rel_file = join(opts.release_notes_dir, rel_cyc, "%s.md" % rel_name) + if (not opts.force) and exists(rel_file): + print(" Skipping release (already exists):", rel_name) + continue + release_notes = [] + prepo = "" + count = 0 + forward_port_sym = '' + for line in release["body"].encode("ascii", "ignore").decode().split("\n"): + line = re.sub(RX_AUTHOR, "\\1**\\2**", line) + m = RX_COMMIT.match(line) + if m: + repo = opts.repository + forward_port = "" + if m.group(1): + forward_port = forward_port_sym + if m.group(2): + repo = m.group(2) + if repo != prepo: + count = 0 + prepo = repo + count += 1 + line = ( + "\n{count}. {forward_port}[{pr}](http://github.com/{repo}/pull/{pr})".format( + forward_port=forward_port, count=count, repo=repo, pr=m.group(3) + ) + + '{:target="_blank"} ' + + m.group(4) + ) + pr = get_pr(m.group(3), repo, opts.prs_dir) + print(" PR found: " + repo + "#" + m.group(3)) + if "created_at" in pr: + line += " created: " + time.strftime( + "%Y-%m-%d %H:%M:%S", time.localtime(float(pr["created_at"])) + ) + if "merged_at" in pr: + line += " merged: " + time.strftime( + "%Y-%m-%d %H:%M:%S", time.localtime(float(pr["merged_at"])) + ) + elif RX_COMPARE.match(line): + line = re.sub(RX_COMPARE, "[compare to previous](\\1)\n\n", line) - release_notes.append(line.replace(':arrow_right:',forward_port_sym)) - r = Release(int(rel_numbers.group(1)), int(rel_numbers.group(2)), - int(rel_numbers.group(3)),rel_numbers.group(4), - rel_numbers.group(5),rel_numbers.group(6),release['published_at']) - out_rel = open(rel_file, 'w') - out_rel.write(head(rel_name, r)) - out_rel.write('# %s\n%s' % (rel_name, "\n".join(release_notes))) - out_rel.close() - print(" Created release notes:",rel_name) - if error_releases: print("Releases with errors:",error_releases) + release_notes.append(line.replace(":arrow_right:", forward_port_sym)) + r = Release( + int(rel_numbers.group(1)), + int(rel_numbers.group(2)), + int(rel_numbers.group(3)), + rel_numbers.group(4), + rel_numbers.group(5), + rel_numbers.group(6), + release["published_at"], + ) + out_rel = open(rel_file, "w") + out_rel.write(head(rel_name, r)) + out_rel.write("# %s\n%s" % (rel_name, "\n".join(release_notes))) + out_rel.close() + print(" Created release notes:", rel_name) + if error_releases: + print("Releases with errors:", error_releases) -if __name__ == '__main__': - parser = OptionParser(usage="%prog") - parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw") - parser.add_option("-p", "--prs-path", dest="prs_dir", help="Directory with Pull request", type=str, default="cms-prs") - parser.add_option("-N", "--release-notes", dest="release_notes_dir", help="Directory where to store release notes", type=str, default="ReleaseNotes/_releases") - parser.add_option("-R", "--release", dest="release", help="Release name", type=str, default=None) - parser.add_option("-F", "--release-filter", dest="release_filter", help="Release filter", type=str, default="CMSSW_.*") - parser.add_option("-f", "--force", dest="force", action="store_true", help="Force re-creation of release notes", default=False) - opts, args = parser.parse_args() - if opts.release: opts.force=True - getReleasesNotes(opts) +if __name__ == "__main__": + parser = OptionParser(usage="%prog") + parser.add_option( + "-r", + "--repository", + dest="repository", + help="Github Repositoy name e.g. cms-sw/cmssw.", + type=str, + default="cms-sw/cmssw", + ) + parser.add_option( + "-p", + "--prs-path", + dest="prs_dir", + help="Directory with Pull request", + type=str, + default="cms-prs", + ) + parser.add_option( + "-N", + "--release-notes", + dest="release_notes_dir", + help="Directory where to store release notes", + type=str, + default="ReleaseNotes/_releases", + ) + parser.add_option( + "-R", "--release", dest="release", help="Release name", type=str, default=None + ) + parser.add_option( + "-F", + "--release-filter", + dest="release_filter", + help="Release filter", + type=str, + default="CMSSW_.*", + ) + parser.add_option( + "-f", + "--force", + dest="force", + action="store_true", + help="Force re-creation of release notes", + default=False, + ) + opts, args = parser.parse_args() + if opts.release: + opts.force = True + getReleasesNotes(opts) diff --git a/releases.map b/releases.map index 34ba873540e5..30dc37be36f2 100644 --- a/releases.map +++ b/releases.map @@ -1,4 +1,4 @@ -architecture=el8_amd64_gcc11;label=CMSSW_13_3_X;type=Development;state=IB;prodarch=1; +architecture=el8_amd64_gcc12;label=CMSSW_13_3_X;type=Development;state=IB;prodarch=1; architecture=el8_amd64_gcc11;label=CMSSW_13_2_X;type=Development;state=IB;prodarch=1; architecture=el8_amd64_gcc11;label=CMSSW_13_1_X;type=Development;state=IB;prodarch=1; architecture=el8_amd64_gcc11;label=CMSSW_13_0_X;type=Development;state=IB;prodarch=1; @@ -3938,3 +3938,9 @@ architecture=el8_ppc64le_gcc11;label=CMSSW_13_1_3;type=Production;state=Announce architecture=el8_amd64_gcc11;label=CMSSW_13_1_3;type=Production;state=Announced;prodarch=1; architecture=slc7_amd64_gcc11;label=CMSSW_13_1_3;type=Production;state=Announced;prodarch=0; architecture=el9_amd64_gcc11;label=CMSSW_13_1_3;type=Production;state=Announced;prodarch=0; +architecture=el9_aarch64_gcc12;label=CMSSW_13_3_0_pre5;type=Development;state=Announced;prodarch=0; +architecture=el8_amd64_gcc12;label=CMSSW_13_3_0_pre5;type=Development;state=Announced;prodarch=1; +architecture=el8_aarch64_gcc12;label=CMSSW_13_3_0_pre5;type=Development;state=Announced;prodarch=0; +architecture=el8_ppc64le_gcc12;label=CMSSW_13_3_0_pre5;type=Development;state=Announced;prodarch=0; +architecture=el9_amd64_gcc12;label=CMSSW_13_3_0_pre5;type=Development;state=Announced;prodarch=0; +architecture=slc7_amd64_gcc12;label=CMSSW_13_3_0_pre5;type=Development;state=Announced;prodarch=0; diff --git a/releases.py b/releases.py index 78d92e62877f..e36b38ef2185 100644 --- a/releases.py +++ b/releases.py @@ -1,25 +1,32 @@ from milestones import * import re -#Default development branch +# Default development branch # Changes from master branch will be merge in to it # Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch) # For new release cycle just change this and make sure to add its milestone and production branches CMSSW_DEVEL_BRANCH = "CMSSW_13_3_X" -RELEASE_BRANCH_MILESTONE["master"]=RELEASE_BRANCH_MILESTONE[CMSSW_DEVEL_BRANCH] +RELEASE_BRANCH_MILESTONE["master"] = RELEASE_BRANCH_MILESTONE[CMSSW_DEVEL_BRANCH] RELEASE_BRANCH_PRODUCTION.append("master") -USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ]) +USERS_TO_TRIGGER_HOOKS = set( + SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel] +) + def get_release_managers(branch): - if branch in RELEASE_MANAGERS: return RELEASE_MANAGERS[branch] - for exp in RELEASE_MANAGERS: - if re.match(exp, branch): return RELEASE_MANAGERS[exp] - return [] + if branch in RELEASE_MANAGERS: + return RELEASE_MANAGERS[branch] + for exp in RELEASE_MANAGERS: + if re.match(exp, branch): + return RELEASE_MANAGERS[exp] + return [] -def is_closed_branch(branch): - if branch in RELEASE_BRANCH_CLOSED: return True - for exp in RELEASE_BRANCH_CLOSED: - if re.match(exp, branch): return True - return False +def is_closed_branch(branch): + if branch in RELEASE_BRANCH_CLOSED: + return True + for exp in RELEASE_BRANCH_CLOSED: + if re.match(exp, branch): + return True + return False diff --git a/repo_config.py b/repo_config.py index 165ce3ce02fc..938d434c2835 100644 --- a/repo_config.py +++ b/repo_config.py @@ -1,12 +1,13 @@ -from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER -from os.path import dirname,abspath -GH_TOKEN="~/.github-token" -GH_TOKEN_READONLY="~/.github-token-readonly" -CONFIG_DIR=dirname(abspath(__file__)) -CMSBUILD_USER=CMSBUILD_GH_USER -GH_REPO_ORGANIZATION=GH_CMSSW_ORGANIZATION -CREATE_EXTERNAL_ISSUE=True -JENKINS_SERVER="http://cmsjenkins03.cern.ch:8080/jenkins" +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER +from os.path import dirname, abspath + +GH_TOKEN = "~/.github-token" +GH_TOKEN_READONLY = "~/.github-token-readonly" +CONFIG_DIR = dirname(abspath(__file__)) +CMSBUILD_USER = CMSBUILD_GH_USER +GH_REPO_ORGANIZATION = GH_CMSSW_ORGANIZATION +CREATE_EXTERNAL_ISSUE = True +JENKINS_SERVER = "http://cmsjenkins03.cern.ch:8080/jenkins" IGNORE_ISSUES = { - GH_CMSSW_ORGANIZATION+"/"+GH_CMSSW_REPO : [12368], + GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO: [12368], } diff --git a/report-build-release-status.py b/report-build-release-status.py index 30c848e364b3..cbd5e59e32a1 100755 --- a/report-build-release-status.py +++ b/report-build-release-status.py @@ -3,16 +3,25 @@ from __future__ import print_function from _py2with3compatibility import getoutput from optparse import OptionParser -from github_utils import create_issue_comment, get_issue_labels, remove_issue_label, add_issue_labels, remove_issue_labels_all +from github_utils import ( + create_issue_comment, + get_issue_labels, + remove_issue_label, + add_issue_labels, + remove_issue_labels_all, +) from os.path import expanduser from datetime import datetime from socket import setdefaulttimeout from os import environ import re + setdefaulttimeout(120) -JENKINS_PREFIX="jenkins" -try: JENKINS_PREFIX=environ['JENKINS_URL'].strip("/").split("/")[-1] -except: JENKINS_PREFIX="jenkins" +JENKINS_PREFIX = "jenkins" +try: + JENKINS_PREFIX = environ["JENKINS_URL"].strip("/").split("/")[-1] +except: + JENKINS_PREFIX = "jenkins" # # Posts a message in the github issue that triggered the build # The structure of the message depends on the option used @@ -21,297 +30,350 @@ # ------------------------------------------------------------------------------- # Global Variables # -------------------------------------------------------------------------------- -GH_CMSSW_ORGANIZATION = 'cms-sw' -GH_CMSSW_REPO = 'cmssw' -POST_BUILDING='BUILDING' -POST_TOOL_CONF_BUILDING='TOOL_CONF_BUILDING' -BUILD_OK='BUILD_OK' -TOOL_CONF_OK='TOOL_CONF_OK' -TOOL_CONF_ERROR = 'TOOL_CONF_ERROR' -BUILD_ERROR='BUILD_ERROR' -UPLOADING='UPLOADING' -UPLOAD_OK='UPLOAD_OK' -UPLOAD_ERROR='UPLOAD_ERROR' -CLEANUP_OK='CLEANUP_OK' -CLEANUP_ERROR='CLEANUP_ERROR' -TESTS_OK='TESTS_OK' -RELEASE_NOTES_OK='RELEASE_NOTES_OK' -RELEASE_NOTES_ERROR='RELEASE_NOTES_ERROR' -INSTALLATION_OK='INSTALLATION_OK' -INSTALLATION_SKIP='INSTALLATION_SKIP' -INSTALLATION_ERROR='INSTALLATION_ERROR' +GH_CMSSW_ORGANIZATION = "cms-sw" +GH_CMSSW_REPO = "cmssw" +POST_BUILDING = "BUILDING" +POST_TOOL_CONF_BUILDING = "TOOL_CONF_BUILDING" +BUILD_OK = "BUILD_OK" +TOOL_CONF_OK = "TOOL_CONF_OK" +TOOL_CONF_ERROR = "TOOL_CONF_ERROR" +BUILD_ERROR = "BUILD_ERROR" +UPLOADING = "UPLOADING" +UPLOAD_OK = "UPLOAD_OK" +UPLOAD_ERROR = "UPLOAD_ERROR" +CLEANUP_OK = "CLEANUP_OK" +CLEANUP_ERROR = "CLEANUP_ERROR" +TESTS_OK = "TESTS_OK" +RELEASE_NOTES_OK = "RELEASE_NOTES_OK" +RELEASE_NOTES_ERROR = "RELEASE_NOTES_ERROR" +INSTALLATION_OK = "INSTALLATION_OK" +INSTALLATION_SKIP = "INSTALLATION_SKIP" +INSTALLATION_ERROR = "INSTALLATION_ERROR" # this means that there was an error in the script that excecutes the tests, # it is independent from the tests results -TESTS_ERROR='TESTS_ERROR' -BUILDING_MSG='The build has started for {architecture} in {machine}. \n' \ - 'You can see the progress here: https://cmssdt.cern.ch/%s/job/build-release/{jk_build_number}/console \n' \ - '{details}' % JENKINS_PREFIX -BUILDING_TOOL_CONF_MSG='The cmssw-tool-conf build has started for {architecture} in {machine}. \n' \ - 'You can see the progress here: https://cmssdt.cern.ch/%s/job/build-release/{jk_build_number}/console \n' \ - '{details}' % JENKINS_PREFIX -BUILD_OK_MSG='The build has finished sucessfully for the architecture {architecture} and is ready to be uploaded. \n' \ - 'You can start the uploads by writing the comment: "upload all". I will upload all the architectures as soon as the build finishes successfully.\n' \ - 'You can see the log for the build here: \n' \ - '{log_url} \n' \ - 'Some tests ( runTheMatrix.py -s ) are being run, the results will be posted when done.' -TOOL_CONF_OK_MSG='The cmssw-tool-conf build has finished sucessfully for the architecture {architecture} and it was automatically uploaded. \n' \ - 'Remember that if you write "+1" I will start to build this and all the architectures as soon as their cmssw-tool-conf finish.\n' \ - 'You can see the log for the build here: \n' \ - '{log_url} \n' -TOOL_CONF_ERROR_MSG='There was an error building cmssw-tool-conf for {architecture} \n' \ - 'You can see the log for the build here: \n' \ - '{log_url} \n' -UPLOADING_MSG='The upload has started for {architecture} in {machine}. \n' \ - 'You can see the progress here: https://cmssdt.cern.ch/%s/job/upload-release/{jk_build_number}/console' % JENKINS_PREFIX -UPLOAD_OK_MSG='The upload has successfully finished for {architecture} \n You can see the log here: \n {log_url}' -INSTALLATION_OK_MSG='The installation has successfully finished for {architecture} \n You can see the log here: \n {log_url} \n' \ - 'To generate the release notes for the release write "release-notes since \\", in the first line of your comment.\n ' \ - 'I will generate the release notes based on the release that you provide. You don\'t need to provide the architecture ' \ - 'I will use the production architecture to infer the cmsdist tag.\n' \ - 'Alternatively, you can just write "release-notes", I will try to guess the previous release.' -INSTALLATION_SKIP_MSG='CERN AFS installation skipped for {architecture} as no CMSSW releases are now deployed on AFS. \n' \ - 'To generate the release notes for the release write "release-notes since \\", in the first line of your comment.\n ' \ - 'I will generate the release notes based on the release that you provide. You don\'t need to provide the architecture ' \ - 'I will use the production architecture to infer the cmsdist tag.\n' \ - 'Alternatively, you can just write "release-notes", I will try to guess the previous release.' -UPLOAD_ERROR_MSG='The was error uploading {architecture}. \n You can see the log here: \n {log_url}' -INSTALLATION_ERROR_MSG='The was error installing {architecture}. \n You can see the log here: \n {log_url}' -CLEANUP_OK_MSG='The workspace for {architecture} has been deleted \n You can see the log here: \n {log_url} \n' -CLEANUP_ERROR_MSG='There was an error deletng the workspace for {architecture} \n You can see the log here: \n {log_url} \n' -TESTS_OK_MSG='The tests have finished for {architecture} \n You can see the log here: \n {log_url} \n' -TESTS_ERROR_MSG='There was an error when running the tests for {architecture} \n You can see the log here: \n {log_url} \n' -BUILD_ERROR_MSG='The was an error for {architecture}. \n You can see the log here: \n {log_url}' -RELEASE_NOTES_OK_MSG='The release notes are ready: https://github.com/cms-sw/cmssw/releases/tag/{rel_name}' -RELEASE_NOTES_ERROR_MSG='There was an error generating the release notes, please look into the logs' -BUILD_QUEUED_LABEL = 'build-release-queued' -BUILD_STARTED = 'build-release-started' -BASE_BUILD_LOG_URL = 'https://cmssdt.cern.ch/SDT/'+JENKINS_PREFIX+'-artifacts/auto-build-release/%s-%s/%d' -BASE_UPLOAD_LOG_URL = 'https://cmssdt.cern.ch/SDT/'+JENKINS_PREFIX+'-artifacts/auto-upload-release/%s-%s/%d' -BASE_CLEANUP_LOG_URL = 'https://cmssdt.cern.ch/SDT/'+JENKINS_PREFIX+'-artifacts/cleanup-auto-build/%s-%s/%d' -BASE_INSTALLATION_URL = 'https://cmssdt.cern.ch/SDT/%s-artifacts/deploy-release-afs/{rel_name}/{architecture}/{job_id}/' % JENKINS_PREFIX +TESTS_ERROR = "TESTS_ERROR" +BUILDING_MSG = ( + "The build has started for {architecture} in {machine}. \n" + "You can see the progress here: https://cmssdt.cern.ch/%s/job/build-release/{jk_build_number}/console \n" + "{details}" % JENKINS_PREFIX +) +BUILDING_TOOL_CONF_MSG = ( + "The cmssw-tool-conf build has started for {architecture} in {machine}. \n" + "You can see the progress here: https://cmssdt.cern.ch/%s/job/build-release/{jk_build_number}/console \n" + "{details}" % JENKINS_PREFIX +) +BUILD_OK_MSG = ( + "The build has finished sucessfully for the architecture {architecture} and is ready to be uploaded. \n" + 'You can start the uploads by writing the comment: "upload all". I will upload all the architectures as soon as the build finishes successfully.\n' + "You can see the log for the build here: \n" + "{log_url} \n" + "Some tests ( runTheMatrix.py -s ) are being run, the results will be posted when done." +) +TOOL_CONF_OK_MSG = ( + "The cmssw-tool-conf build has finished sucessfully for the architecture {architecture} and it was automatically uploaded. \n" + 'Remember that if you write "+1" I will start to build this and all the architectures as soon as their cmssw-tool-conf finish.\n' + "You can see the log for the build here: \n" + "{log_url} \n" +) +TOOL_CONF_ERROR_MSG = ( + "There was an error building cmssw-tool-conf for {architecture} \n" + "You can see the log for the build here: \n" + "{log_url} \n" +) +UPLOADING_MSG = ( + "The upload has started for {architecture} in {machine}. \n" + "You can see the progress here: https://cmssdt.cern.ch/%s/job/upload-release/{jk_build_number}/console" + % JENKINS_PREFIX +) +UPLOAD_OK_MSG = "The upload has successfully finished for {architecture} \n You can see the log here: \n {log_url}" +INSTALLATION_OK_MSG = ( + "The installation has successfully finished for {architecture} \n You can see the log here: \n {log_url} \n" + 'To generate the release notes for the release write "release-notes since \\", in the first line of your comment.\n ' + "I will generate the release notes based on the release that you provide. You don't need to provide the architecture " + "I will use the production architecture to infer the cmsdist tag.\n" + 'Alternatively, you can just write "release-notes", I will try to guess the previous release.' +) +INSTALLATION_SKIP_MSG = ( + "CERN AFS installation skipped for {architecture} as no CMSSW releases are now deployed on AFS. \n" + 'To generate the release notes for the release write "release-notes since \\", in the first line of your comment.\n ' + "I will generate the release notes based on the release that you provide. You don't need to provide the architecture " + "I will use the production architecture to infer the cmsdist tag.\n" + 'Alternatively, you can just write "release-notes", I will try to guess the previous release.' +) +UPLOAD_ERROR_MSG = ( + "The was error uploading {architecture}. \n You can see the log here: \n {log_url}" +) +INSTALLATION_ERROR_MSG = ( + "The was error installing {architecture}. \n You can see the log here: \n {log_url}" +) +CLEANUP_OK_MSG = "The workspace for {architecture} has been deleted \n You can see the log here: \n {log_url} \n" +CLEANUP_ERROR_MSG = "There was an error deletng the workspace for {architecture} \n You can see the log here: \n {log_url} \n" +TESTS_OK_MSG = ( + "The tests have finished for {architecture} \n You can see the log here: \n {log_url} \n" +) +TESTS_ERROR_MSG = "There was an error when running the tests for {architecture} \n You can see the log here: \n {log_url} \n" +BUILD_ERROR_MSG = "The was an error for {architecture}. \n You can see the log here: \n {log_url}" +RELEASE_NOTES_OK_MSG = ( + "The release notes are ready: https://github.com/cms-sw/cmssw/releases/tag/{rel_name}" +) +RELEASE_NOTES_ERROR_MSG = ( + "There was an error generating the release notes, please look into the logs" +) +BUILD_QUEUED_LABEL = "build-release-queued" +BUILD_STARTED = "build-release-started" +BASE_BUILD_LOG_URL = ( + "https://cmssdt.cern.ch/SDT/" + JENKINS_PREFIX + "-artifacts/auto-build-release/%s-%s/%d" +) +BASE_UPLOAD_LOG_URL = ( + "https://cmssdt.cern.ch/SDT/" + JENKINS_PREFIX + "-artifacts/auto-upload-release/%s-%s/%d" +) +BASE_CLEANUP_LOG_URL = ( + "https://cmssdt.cern.ch/SDT/" + JENKINS_PREFIX + "-artifacts/cleanup-auto-build/%s-%s/%d" +) +BASE_INSTALLATION_URL = ( + "https://cmssdt.cern.ch/SDT/%s-artifacts/deploy-release-afs/{rel_name}/{architecture}/{job_id}/" + % JENKINS_PREFIX +) # ------------------------------------------------------------------------------- # Functions # -------------------------------------------------------------------------------- + # # posts a message to the issue in github # if dry-run is selected it doesn't post the message and just prints it # -def post_message(repo, issue, msg ): - if opts.dryRun: - print('Not posting message (dry-run):\n %s' % msg) - else: - print('Posting message:\n %s' % msg) - create_issue_comment(repo, issue, msg) - +def post_message(repo, issue, msg): + if opts.dryRun: + print("Not posting message (dry-run):\n %s" % msg) + else: + print("Posting message:\n %s" % msg) + create_issue_comment(repo, issue, msg) # Adds a label to the issue in github # if dry-run is selected it doesn't add the label and just prints it -def add_label(repo, issue, label ): - if opts.dryRun: - print('Not adding label (dry-run):\n %s' % label) - return - print('Adding label:\n %s' % label) - add_issue_labels(repo, issue, [label] ) +def add_label(repo, issue, label): + if opts.dryRun: + print("Not adding label (dry-run):\n %s" % label) + return + print("Adding label:\n %s" % label) + add_issue_labels(repo, issue, [label]) + # Removes a label form the issue -def remove_label( repo, issue, label ): - if opts.dryRun: - print('Not removing label (dry-run):\n %s' % label) - return - - reM = re.compile ("^%s$" % label) - for l in ALL_LABELS: - if not reM.match(l): continue - print('Removing label: %s' % l) - try: - remove_issue_label(repo, issue, l) - except Exception as e: - pass +def remove_label(repo, issue, label): + if opts.dryRun: + print("Not removing label (dry-run):\n %s" % label) + return + + reM = re.compile("^%s$" % label) + for l in ALL_LABELS: + if not reM.match(l): + continue + print("Removing label: %s" % l) + try: + remove_issue_label(repo, issue, l) + except Exception as e: + pass + # # removes the labels of the issue # -def remove_labels( repo, issue ): - if opts.dryRun: - print('Not removing issue labels (dry-run)') - return - remove_issue_labels_all(repo, issue) +def remove_labels(repo, issue): + if opts.dryRun: + print("Not removing issue labels (dry-run)") + return + remove_issue_labels_all(repo, issue) # # Get tests log output # def get_test_log(logfile): - from os import getenv - from os.path import join,exists - logmsg='' - try: - logfile = join(getenv('WORKSPACE'),logfile) + from os import getenv + from os.path import join, exists + + logmsg = "" try: - logmsg = '\n\nTests results:\n'+getoutput("grep 'ERROR\| tests passed' "+logfile) + logfile = join(getenv("WORKSPACE"), logfile) + try: + logmsg = "\n\nTests results:\n" + getoutput("grep 'ERROR\| tests passed' " + logfile) + except: + logmsg = "\n\nUnable to read tests log: No such file " + logfile except: - logmsg = '\n\nUnable to read tests log: No such file '+logfile - except: - logmsg = '\n\nUnable to read tests log: WORKSPACE variable not set.' - return logmsg - + logmsg = "\n\nUnable to read tests log: WORKSPACE variable not set." + return logmsg + + # Start of execution # -------------------------------------------------------------------------------- if __name__ == "__main__": - parser = OptionParser( usage="%prog [ options ] \n " - "message-type = BUILDING | BUILD_OK | BUILD_ERROR | UPLOADING | UPLOAD_OK | UPLOAD_ERROR | CLEANUP_OK | CLEANUP_ERROR | TESTS_OK | TESTS_ERROR " - "| RELEASE_NOTES_OK | RELEASE_NOTES_ERROR | INSTALLATION_OK | INSTALLATION_ERROR | INSTALLATION_SKIP") - parser.add_option( "-n" , "--dry-run" , dest="dryRun" , action="store_true", help="Do not post on Github", default=False ) - parser.add_option( "-d" , "--details" , dest="details" , action="store", help="Add aditional details to the message", default=False ) - - opts, args = parser.parse_args( ) - if len( args ) != 6: - parser.error( "Not enough arguments" ) - - jenkins_build_number = int( args[ 0 ] ) - hostname = args[ 1 ] - issue = int( args[ 2 ] ) - arch = args[ 3 ] - release_name = args[ 4 ] - action = args[ 5 ] - - repo = GH_CMSSW_ORGANIZATION + '/' + GH_CMSSW_REPO - ALL_LABELS = [ l["name"] for l in get_issue_labels(repo, issue) ] - test_logfile = "build/"+release_name+"-tests/matrixTests/runall-report-step123-.log" - - if action == POST_BUILDING: - msg_details = '' - if opts.details: - msg_details = opts.details - msg = BUILDING_MSG.format( architecture=arch, machine=hostname, jk_build_number=jenkins_build_number, details=msg_details ) - post_message( repo, issue , msg ) - remove_label( repo, issue, arch+'-.*' ) - new_label = arch+'-building' - add_label( repo, issue, new_label ) - - elif action == POST_TOOL_CONF_BUILDING: - - msg_details = '' - if opts.details: - msg_details = opts.details - msg = BUILDING_TOOL_CONF_MSG.format( architecture=arch, machine=hostname, jk_build_number=jenkins_build_number, details=msg_details ) - post_message( repo, issue , msg ) - remove_label( repo, issue, arch+'-.*' ) - new_label = arch+'-tool-conf-building' - add_label( repo, issue, new_label ) - - elif action == BUILD_OK: - - results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number) - msg = BUILD_OK_MSG.format( architecture=arch, log_url=results_url ) - post_message( repo, issue , msg ) - remove_label( repo, issue, arch+'-.*' ) - add_label( repo, issue, arch+'-build-ok' ) - - elif action == TOOL_CONF_OK: - - results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number) - msg = TOOL_CONF_OK_MSG.format( architecture=arch, log_url=results_url ) - post_message( repo, issue , msg ) - remove_label( repo, issue, arch+'-.*' ) - add_label( repo, issue, arch+'-tool-conf-ok' ) - - elif action == BUILD_ERROR: - - results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number) - msg = BUILD_ERROR_MSG.format( architecture=arch, log_url=results_url ) - post_message( repo, issue , msg ) - remove_label( repo, issue, arch+'-.*' ) - add_label( repo, issue, arch+'-build-error' ) - - elif action == TOOL_CONF_ERROR: - - results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number) - msg = TOOL_CONF_ERROR_MSG.format( architecture=arch, log_url=results_url ) - post_message( repo, issue , msg ) - remove_label( repo, issue, arch+'-.*' ) - add_label( repo, issue, arch+'-tool-conf-error' ) - - elif action == UPLOADING: - - msg = UPLOADING_MSG.format( architecture=arch, machine=hostname, jk_build_number=jenkins_build_number) - post_message( repo, issue , msg ) - - elif action == UPLOAD_OK: - - results_url = BASE_UPLOAD_LOG_URL % (release_name,arch,jenkins_build_number) - msg = UPLOAD_OK_MSG.format( architecture=arch , log_url=results_url ) - post_message( repo, issue , msg ) - remove_label( repo, issue, arch+'-.*' ) - add_label( repo, issue, arch+'-upload-ok' ) - - elif action == UPLOAD_ERROR: - - results_url = BASE_UPLOAD_LOG_URL % (release_name,arch,jenkins_build_number) - msg = UPLOAD_ERROR_MSG.format( architecture=arch , log_url=results_url ) - post_message( repo, issue , msg ) - remove_label( repo, issue, arch+'-.*' ) - add_label( repo, issue, arch+'-upload-error' ) - - elif action == CLEANUP_OK: - - results_url = BASE_CLEANUP_LOG_URL % (release_name,arch,jenkins_build_number) - msg = CLEANUP_OK_MSG.format( architecture=arch , log_url=results_url ) - post_message( repo, issue , msg ) - - elif action == CLEANUP_ERROR: - - results_url = BASE_CLEANUP_LOG_URL % (release_name,arch,jenkins_build_number) - msg = CLEANUP_ERROR_MSG.format( architecture=arch , log_url=results_url ) - post_message( repo, issue , msg ) - - elif action == TESTS_OK: - - results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number) - msg = TESTS_OK_MSG.format( architecture=arch, log_url=results_url ) - post_message( repo, issue , msg + get_test_log(test_logfile)) - - elif action == TESTS_ERROR: - - results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number) - msg = TESTS_ERROR_MSG.format( architecture=arch, log_url=results_url ) - post_message( repo, issue , msg + get_test_log(test_logfile)) - - elif action == RELEASE_NOTES_OK: - - msg = RELEASE_NOTES_OK_MSG.format( rel_name=release_name ) - post_message( repo, issue, msg) - - elif action == RELEASE_NOTES_ERROR: - - msg = RELEASE_NOTES_ERROR_MSG.format( rel_name=release_name ) - post_message( repo, issue, msg) - - elif action in [ INSTALLATION_OK, INSTALLATION_SKIP ]: - - results_url = BASE_INSTALLATION_URL.format( rel_name=release_name, - architecture=arch, - job_id=jenkins_build_number ) - #msg = INSTALLATION_OK_MSG.format( architecture=arch , log_url=results_url ) - #if action == INSTALLATION_SKIP: - # msg = INSTALLATION_SKIP_MSG.format( architecture=arch , log_url=results_url ) - #post_message( repo, issue, msg ) - remove_label( repo, issue, arch+'-.*' ) - add_label( repo, issue, arch+'-installation-ok' ) - - elif action == INSTALLATION_ERROR: - - results_url = BASE_INSTALLATION_URL.format( rel_name=release_name, - architecture=arch, - job_id=jenkins_build_number ) - msg = INSTALLATION_ERROR_MSG.format( architecture=arch , log_url=results_url ) - post_message( repo, issue, msg ) - remove_label( repo, issue, arch+'-.*' ) - add_label( repo, issue, arch+'-installation-error' ) - - else: - parser.error( "Message type not recognized" ) + parser = OptionParser( + usage="%prog [ options ] \n " + "message-type = BUILDING | BUILD_OK | BUILD_ERROR | UPLOADING | UPLOAD_OK | UPLOAD_ERROR | CLEANUP_OK | CLEANUP_ERROR | TESTS_OK | TESTS_ERROR " + "| RELEASE_NOTES_OK | RELEASE_NOTES_ERROR | INSTALLATION_OK | INSTALLATION_ERROR | INSTALLATION_SKIP" + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not post on Github", + default=False, + ) + parser.add_option( + "-d", + "--details", + dest="details", + action="store", + help="Add aditional details to the message", + default=False, + ) + + opts, args = parser.parse_args() + if len(args) != 6: + parser.error("Not enough arguments") + + jenkins_build_number = int(args[0]) + hostname = args[1] + issue = int(args[2]) + arch = args[3] + release_name = args[4] + action = args[5] + + repo = GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO + ALL_LABELS = [l["name"] for l in get_issue_labels(repo, issue)] + test_logfile = "build/" + release_name + "-tests/matrixTests/runall-report-step123-.log" + + if action == POST_BUILDING: + msg_details = "" + if opts.details: + msg_details = opts.details + msg = BUILDING_MSG.format( + architecture=arch, + machine=hostname, + jk_build_number=jenkins_build_number, + details=msg_details, + ) + post_message(repo, issue, msg) + remove_label(repo, issue, arch + "-.*") + new_label = arch + "-building" + add_label(repo, issue, new_label) + + elif action == POST_TOOL_CONF_BUILDING: + msg_details = "" + if opts.details: + msg_details = opts.details + msg = BUILDING_TOOL_CONF_MSG.format( + architecture=arch, + machine=hostname, + jk_build_number=jenkins_build_number, + details=msg_details, + ) + post_message(repo, issue, msg) + remove_label(repo, issue, arch + "-.*") + new_label = arch + "-tool-conf-building" + add_label(repo, issue, new_label) + + elif action == BUILD_OK: + results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number) + msg = BUILD_OK_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg) + remove_label(repo, issue, arch + "-.*") + add_label(repo, issue, arch + "-build-ok") + + elif action == TOOL_CONF_OK: + results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number) + msg = TOOL_CONF_OK_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg) + remove_label(repo, issue, arch + "-.*") + add_label(repo, issue, arch + "-tool-conf-ok") + + elif action == BUILD_ERROR: + results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number) + msg = BUILD_ERROR_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg) + remove_label(repo, issue, arch + "-.*") + add_label(repo, issue, arch + "-build-error") + + elif action == TOOL_CONF_ERROR: + results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number) + msg = TOOL_CONF_ERROR_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg) + remove_label(repo, issue, arch + "-.*") + add_label(repo, issue, arch + "-tool-conf-error") + + elif action == UPLOADING: + msg = UPLOADING_MSG.format( + architecture=arch, machine=hostname, jk_build_number=jenkins_build_number + ) + post_message(repo, issue, msg) + + elif action == UPLOAD_OK: + results_url = BASE_UPLOAD_LOG_URL % (release_name, arch, jenkins_build_number) + msg = UPLOAD_OK_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg) + remove_label(repo, issue, arch + "-.*") + add_label(repo, issue, arch + "-upload-ok") + + elif action == UPLOAD_ERROR: + results_url = BASE_UPLOAD_LOG_URL % (release_name, arch, jenkins_build_number) + msg = UPLOAD_ERROR_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg) + remove_label(repo, issue, arch + "-.*") + add_label(repo, issue, arch + "-upload-error") + + elif action == CLEANUP_OK: + results_url = BASE_CLEANUP_LOG_URL % (release_name, arch, jenkins_build_number) + msg = CLEANUP_OK_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg) + + elif action == CLEANUP_ERROR: + results_url = BASE_CLEANUP_LOG_URL % (release_name, arch, jenkins_build_number) + msg = CLEANUP_ERROR_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg) + + elif action == TESTS_OK: + results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number) + msg = TESTS_OK_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg + get_test_log(test_logfile)) + + elif action == TESTS_ERROR: + results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number) + msg = TESTS_ERROR_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg + get_test_log(test_logfile)) + + elif action == RELEASE_NOTES_OK: + msg = RELEASE_NOTES_OK_MSG.format(rel_name=release_name) + post_message(repo, issue, msg) + + elif action == RELEASE_NOTES_ERROR: + msg = RELEASE_NOTES_ERROR_MSG.format(rel_name=release_name) + post_message(repo, issue, msg) + + elif action in [INSTALLATION_OK, INSTALLATION_SKIP]: + results_url = BASE_INSTALLATION_URL.format( + rel_name=release_name, architecture=arch, job_id=jenkins_build_number + ) + # msg = INSTALLATION_OK_MSG.format( architecture=arch , log_url=results_url ) + # if action == INSTALLATION_SKIP: + # msg = INSTALLATION_SKIP_MSG.format( architecture=arch , log_url=results_url ) + # post_message( repo, issue, msg ) + remove_label(repo, issue, arch + "-.*") + add_label(repo, issue, arch + "-installation-ok") + + elif action == INSTALLATION_ERROR: + results_url = BASE_INSTALLATION_URL.format( + rel_name=release_name, architecture=arch, job_id=jenkins_build_number + ) + msg = INSTALLATION_ERROR_MSG.format(architecture=arch, log_url=results_url) + post_message(repo, issue, msg) + remove_label(repo, issue, arch + "-.*") + add_label(repo, issue, arch + "-installation-error") + + else: + parser.error("Message type not recognized") diff --git a/report-cmsdist-pull-request-results b/report-cmsdist-pull-request-results deleted file mode 100755 index 16150e70cdbb..000000000000 --- a/report-cmsdist-pull-request-results +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python - -from __future__ import print_function -from optparse import OptionParser -from github import Github -from os.path import expanduser -import requests -import json -from socket import setdefaulttimeout -setdefaulttimeout(120) -from os import environ -JENKINS_PREFIX="jenkins" -try: JENKINS_PREFIX=environ['JENKINS_URL'].strip("/").split("/")[-1] -except: JENKINS_PREFIX="jenkins" -# -# Posts a message in the github issue that triggered the build -# The structure of the message depends on the option used -# - -# ------------------------------------------------------------------------------- -# Global Variables -# -------------------------------------------------------------------------------- -GH_CMSSW_ORGANIZATION = 'cms-sw' -GH_CMSSW_REPO = 'cmssw' -GH_CMSDIST_REPO = 'cmsdist' -POST_TESTS_OK = 'TESTS_OK' -POST_TESTS_FAILED = 'TESTS_FAIL' -POST_TESTING = 'TESTING' -CMSDIST_TESTS_OK_MSG = '+1\nTested compilation until {package}.\nYou can see the log here: {tests_location}' -CMSDIST_TESTS_FAIL_MSG = '-1\nBuild failed ( compiled until {package} ).\n You can see the log here: {tests_location}' -CMSDIST_COMMIT_STATUS_BASE_URL = 'https://api.github.com/repos/cms-sw/cmsdist/statuses/%s' -COMMIT_STATES_DESCRIPTION = { POST_TESTS_OK : [ 'success' , 'Tests OK' ], - POST_TESTS_FAILED : [ 'failure', 'Tests Failed' ], - POST_TESTING : [ 'pending', 'cms-bot is testing this pull request' ] } -BASE_TESTS_URL='https://cmssdt.cern.ch/SDT/%s-artifacts/cms-externals-pr-integration/{jk_build_number}/results/build.log' % JENKINS_PREFIX -BASE_TESTING_URL='https://cmssdt.cern.ch/%s/job/test-externals-prs/{jk_build_number}/' % JENKINS_PREFIX - -# ------------------------------------------------------------------------------- -# Functions -# -------------------------------------------------------------------------------- - -# -# mars the commit with the result of the tests (success or failure) -# -def mark_commit( action, commit_hash, tests_url ): - if opts.dryRun: - print('Not adding status to commit %s (dry-run):\n %s' % ( commit_hash, action )) - return - - url = CMSDIST_COMMIT_STATUS_BASE_URL % commit_hash - headers = {"Authorization" : "token " + TOKEN } - params = {} - params[ 'state' ] = COMMIT_STATES_DESCRIPTION[ action ][ 0 ] - params[ 'target_url' ] = tests_url - params[ 'description' ] = COMMIT_STATES_DESCRIPTION[ action ][ 1 ] - - data = json.dumps(params) - print('Setting status to %s ' % COMMIT_STATES_DESCRIPTION[ action ][ 0 ]) - print(url) - r = requests.post(url, data=data, headers=headers) - print(r.text) - - -# -# posts a message to the issue in github -# if dry-run is selected it doesn't post the message and just prints it -# -def post_message( issue, msg ): - if opts.dryRun: - print('Not posting message (dry-run):\n %s' % msg) - else: - print('Posting message:\n %s' % msg) - issue.create_comment( msg ) - -# ------------------------------------------------------------------------------- -# Start of execution -# -------------------------------------------------------------------------------- - -if __name__ == "__main__": - parser = OptionParser( usage="%prog [ options ] \n " - "message-type = TESTS_OK | TESTS_FAIL | TESTING " ) - parser.add_option( "-n" , "--dry-run" , dest="dryRun" , action="store_true", help="Do not post on Github", default=False ) - opts, args = parser.parse_args( ) - - if len( args ) != 6: - parser.error( "Not enough arguments" ) - - jenkins_build_number = int( args[ 0 ] ) - issue_id = int( args[ 1 ] ) - arch = args[ 2 ] - commit_hash = args[ 3 ] - action = args[ 4 ] - package_name = args[ 5 ] - - TOKEN=open( expanduser( "~/.github-token" ) ).read( ).strip( ) - GH = Github( login_or_token=TOKEN ) - CMSDIST_REPO = GH.get_organization( GH_CMSSW_ORGANIZATION ).get_repo( GH_CMSDIST_REPO ) - issue = CMSDIST_REPO.get_issue( issue_id ) - - if action == POST_TESTS_OK: - tests_url=BASE_TESTS_URL.format( jk_build_number=jenkins_build_number ) - msg = CMSDIST_TESTS_OK_MSG.format( package=package_name, tests_location=tests_url ) - post_message( issue , msg ) - mark_commit( action, commit_hash, tests_url ) - - elif action == POST_TESTS_FAILED: - tests_url = BASE_TESTS_URL.format( jk_build_number=jenkins_build_number ) - msg = CMSDIST_TESTS_FAIL_MSG.format( package=package_name, tests_location=tests_url ) - post_message( issue , msg ) - mark_commit( action, commit_hash, tests_url ) - - elif action == POST_TESTING: - # This action only marks the commit as testing, does not post any message - tests_url = BASE_TESTING_URL.format( jk_build_number=jenkins_build_number ) - mark_commit( action, commit_hash, tests_url ) - else: - parser.error( "Message type not recognized" ) diff --git a/report-cmsdist-pull-request-results b/report-cmsdist-pull-request-results new file mode 120000 index 000000000000..7a7234bb7948 --- /dev/null +++ b/report-cmsdist-pull-request-results @@ -0,0 +1 @@ +report-cmsdist-pull-request-results.py \ No newline at end of file diff --git a/report-cmsdist-pull-request-results.py b/report-cmsdist-pull-request-results.py new file mode 100755 index 000000000000..e747b2df0930 --- /dev/null +++ b/report-cmsdist-pull-request-results.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python + +from __future__ import print_function +from optparse import OptionParser +from github import Github +from os.path import expanduser +import requests +import json +from socket import setdefaulttimeout + +setdefaulttimeout(120) +from os import environ + +JENKINS_PREFIX = "jenkins" +try: + JENKINS_PREFIX = environ["JENKINS_URL"].strip("/").split("/")[-1] +except: + JENKINS_PREFIX = "jenkins" +# +# Posts a message in the github issue that triggered the build +# The structure of the message depends on the option used +# + +# ------------------------------------------------------------------------------- +# Global Variables +# -------------------------------------------------------------------------------- +GH_CMSSW_ORGANIZATION = "cms-sw" +GH_CMSSW_REPO = "cmssw" +GH_CMSDIST_REPO = "cmsdist" +POST_TESTS_OK = "TESTS_OK" +POST_TESTS_FAILED = "TESTS_FAIL" +POST_TESTING = "TESTING" +CMSDIST_TESTS_OK_MSG = ( + "+1\nTested compilation until {package}.\nYou can see the log here: {tests_location}" +) +CMSDIST_TESTS_FAIL_MSG = ( + "-1\nBuild failed ( compiled until {package} ).\n You can see the log here: {tests_location}" +) +CMSDIST_COMMIT_STATUS_BASE_URL = "https://api.github.com/repos/cms-sw/cmsdist/statuses/%s" +COMMIT_STATES_DESCRIPTION = { + POST_TESTS_OK: ["success", "Tests OK"], + POST_TESTS_FAILED: ["failure", "Tests Failed"], + POST_TESTING: ["pending", "cms-bot is testing this pull request"], +} +BASE_TESTS_URL = ( + "https://cmssdt.cern.ch/SDT/%s-artifacts/cms-externals-pr-integration/{jk_build_number}/results/build.log" + % JENKINS_PREFIX +) +BASE_TESTING_URL = ( + "https://cmssdt.cern.ch/%s/job/test-externals-prs/{jk_build_number}/" % JENKINS_PREFIX +) + +# ------------------------------------------------------------------------------- +# Functions +# -------------------------------------------------------------------------------- + + +# +# mars the commit with the result of the tests (success or failure) +# +def mark_commit(action, commit_hash, tests_url): + if opts.dryRun: + print("Not adding status to commit %s (dry-run):\n %s" % (commit_hash, action)) + return + + url = CMSDIST_COMMIT_STATUS_BASE_URL % commit_hash + headers = {"Authorization": "token " + TOKEN} + params = {} + params["state"] = COMMIT_STATES_DESCRIPTION[action][0] + params["target_url"] = tests_url + params["description"] = COMMIT_STATES_DESCRIPTION[action][1] + + data = json.dumps(params) + print("Setting status to %s " % COMMIT_STATES_DESCRIPTION[action][0]) + print(url) + r = requests.post(url, data=data, headers=headers) + print(r.text) + + +# +# posts a message to the issue in github +# if dry-run is selected it doesn't post the message and just prints it +# +def post_message(issue, msg): + if opts.dryRun: + print("Not posting message (dry-run):\n %s" % msg) + else: + print("Posting message:\n %s" % msg) + issue.create_comment(msg) + + +# ------------------------------------------------------------------------------- +# Start of execution +# -------------------------------------------------------------------------------- + +if __name__ == "__main__": + parser = OptionParser( + usage="%prog [ options ] \n " + "message-type = TESTS_OK | TESTS_FAIL | TESTING " + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not post on Github", + default=False, + ) + opts, args = parser.parse_args() + + if len(args) != 6: + parser.error("Not enough arguments") + + jenkins_build_number = int(args[0]) + issue_id = int(args[1]) + arch = args[2] + commit_hash = args[3] + action = args[4] + package_name = args[5] + + TOKEN = open(expanduser("~/.github-token")).read().strip() + GH = Github(login_or_token=TOKEN) + CMSDIST_REPO = GH.get_organization(GH_CMSSW_ORGANIZATION).get_repo(GH_CMSDIST_REPO) + issue = CMSDIST_REPO.get_issue(issue_id) + + if action == POST_TESTS_OK: + tests_url = BASE_TESTS_URL.format(jk_build_number=jenkins_build_number) + msg = CMSDIST_TESTS_OK_MSG.format(package=package_name, tests_location=tests_url) + post_message(issue, msg) + mark_commit(action, commit_hash, tests_url) + + elif action == POST_TESTS_FAILED: + tests_url = BASE_TESTS_URL.format(jk_build_number=jenkins_build_number) + msg = CMSDIST_TESTS_FAIL_MSG.format(package=package_name, tests_location=tests_url) + post_message(issue, msg) + mark_commit(action, commit_hash, tests_url) + + elif action == POST_TESTING: + # This action only marks the commit as testing, does not post any message + tests_url = BASE_TESTING_URL.format(jk_build_number=jenkins_build_number) + mark_commit(action, commit_hash, tests_url) + else: + parser.error("Message type not recognized") diff --git a/report-pull-request-results.py b/report-pull-request-results.py index 265bf9abeadd..0d977d3c7b0b 100755 --- a/report-pull-request-results.py +++ b/report-pull-request-results.py @@ -16,332 +16,430 @@ import os, sys from socket import setdefaulttimeout from github_utils import api_rate_limits + setdefaulttimeout(120) SCRIPT_DIR = dirname(abspath(sys.argv[0])) -#----------------------------------------------------------------------------------- -#---- Parser Options -#----------------------------------------------------------------------------------- -parser = OptionParser(usage="usage: %prog ACTION [options] \n ACTION = PARSE_UNIT_TESTS_FAIL | PARSE_BUILD_FAIL " - "| PARSE_MATRIX_FAIL | COMPARISON_READY | GET_BASE_MESSAGE | PARSE_EXTERNAL_BUILD_FAIL " - "| PARSE_ADDON_FAIL | PARSE_CRAB_FAIL | PARSE_CLANG_BUILD_FAIL | MATERIAL_BUDGET " - "| PYTHON3_FAIL | PARSE_GPU_UNIT_TESTS_FAIL | MERGE_COMMITS") - -parser.add_option("-f", "--unit-tests-file", action="store", type="string", dest="unit_tests_file", help="results file to analyse", default='None') -parser.add_option("--f2", action="store", type="string", dest="results_file2", help="second results file to analyse" ) -parser.add_option("--missing_map", action="store", type="string", dest="missing_map", help="Missing workflow map file", default='None' ) -parser.add_option("--recent-merges", action="store", type="string", dest="recent_merges_file", help="file with the recent merges after doing the git cms-merge-topic") -parser.add_option("--no-post", action="store_true", dest="no_post_mesage", help="I will only show the message I would post, but I will not post it in github") -parser.add_option("--repo", action="store", dest="custom_repo", help="Tells me to use a custom repository from the user cms-sw", default="cms-sw/cmssw" ) -parser.add_option("--report-file", action="store", type="string", dest="report_file", help="Report the github comment in report file instead of github", default='') -parser.add_option("--report-url", action="store", type="string", dest="report_url", help="URL where pr results are stored.", default='') -parser.add_option("--commit", action="store", type="string", dest="commit", help="Pull request latest commit", default='') +# ----------------------------------------------------------------------------------- +# ---- Parser Options +# ----------------------------------------------------------------------------------- +parser = OptionParser( + usage="usage: %prog ACTION [options] \n ACTION = PARSE_UNIT_TESTS_FAIL | PARSE_BUILD_FAIL " + "| PARSE_MATRIX_FAIL | COMPARISON_READY | GET_BASE_MESSAGE | PARSE_EXTERNAL_BUILD_FAIL " + "| PARSE_ADDON_FAIL | PARSE_CRAB_FAIL | PARSE_CLANG_BUILD_FAIL | MATERIAL_BUDGET " + "| PYTHON3_FAIL | PARSE_GPU_UNIT_TESTS_FAIL | MERGE_COMMITS" +) + +parser.add_option( + "-f", + "--unit-tests-file", + action="store", + type="string", + dest="unit_tests_file", + help="results file to analyse", + default="None", +) +parser.add_option( + "--f2", + action="store", + type="string", + dest="results_file2", + help="second results file to analyse", +) +parser.add_option( + "--missing_map", + action="store", + type="string", + dest="missing_map", + help="Missing workflow map file", + default="None", +) +parser.add_option( + "--recent-merges", + action="store", + type="string", + dest="recent_merges_file", + help="file with the recent merges after doing the git cms-merge-topic", +) +parser.add_option( + "--no-post", + action="store_true", + dest="no_post_mesage", + help="I will only show the message I would post, but I will not post it in github", +) +parser.add_option( + "--repo", + action="store", + dest="custom_repo", + help="Tells me to use a custom repository from the user cms-sw", + default="cms-sw/cmssw", +) +parser.add_option( + "--report-file", + action="store", + type="string", + dest="report_file", + help="Report the github comment in report file instead of github", + default="", +) +parser.add_option( + "--report-url", + action="store", + type="string", + dest="report_url", + help="URL where pr results are stored.", + default="", +) +parser.add_option( + "--commit", + action="store", + type="string", + dest="commit", + help="Pull request latest commit", + default="", +) (options, args) = parser.parse_args() -def openlog(log, mode='r'): - return open(log, mode=mode, encoding='utf-8', errors='ignore') + +def openlog(log, mode="r"): + return open(log, mode=mode, encoding="utf-8", errors="ignore") + def writelog(ref, line): - ref.write(line.encode('ascii', 'ignore').decode('utf-8') if sys.version_info[0] < 3 else line) + ref.write(line.encode("ascii", "ignore").decode("utf-8") if sys.version_info[0] < 3 else line) + + # # Reads the log file for a step in a workflow and identifies the error if it starts with 'Begin Fatal Exception' # def get_wf_error_msg(out_file, filename=True): - if out_file.endswith(MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND): - return '' - error_lines = '' - if exists( out_file ): - reading = False - for line in openlog( out_file): - if reading: - error_lines += line - if '----- End Fatal Exception' in line: - reading = False - break - elif '----- Begin Fatal Exception' in line: - error_lines += '\n'+ line - reading = True - if not error_lines and filename: - error_lines = "/".join(out_file.split("/")[-2:])+'\n' - return error_lines + if out_file.endswith(MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND): + return "" + error_lines = "" + if exists(out_file): + reading = False + for line in openlog(out_file): + if reading: + error_lines += line + if "----- End Fatal Exception" in line: + reading = False + break + elif "----- Begin Fatal Exception" in line: + error_lines += "\n" + line + reading = True + if not error_lines and filename: + error_lines = "/".join(out_file.split("/")[-2:]) + "\n" + return error_lines + # # Reads a line that starts with 'ERROR executing', the line has ben splitted by ' ' # it gets the directory where the results for the workflow are, the step that failed # and the log file # -def parse_workflow_info( parts, relval_dir ): - workflow_info = {} - # this is the output file to which the output of command for the step was directed - # it starts asumed as not found - out_file = MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND - workflow_info[ 'step' ] = MATRIX_WORKFLOW_STEP_NA - out_directory = "UNKNOWN" - for i in range( 0 , len( parts ) ): - current_part = parts[ i ] - if ( current_part == 'cd' ): - out_directory = parts[ i+1 ] - out_directory = re.sub( ';' , '', out_directory) - number = re.sub( '_.*$' , '' , out_directory ) - workflow_info[ 'out_directory' ] = out_directory - workflow_info[ 'number' ] = number - if ( current_part == '>' ): - out_file = parts[ i+1 ] - step = re.sub( '_.*log' , '' , out_file) - workflow_info[ 'out_file'] = out_file - workflow_info[ 'step' ] = step - - workflow_info['message'] = get_wf_error_msg(join(relval_dir, out_directory, out_file)) - return workflow_info - +def parse_workflow_info(parts, relval_dir): + workflow_info = {} + # this is the output file to which the output of command for the step was directed + # it starts asumed as not found + out_file = MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND + workflow_info["step"] = MATRIX_WORKFLOW_STEP_NA + out_directory = "UNKNOWN" + for i in range(0, len(parts)): + current_part = parts[i] + if current_part == "cd": + out_directory = parts[i + 1] + out_directory = re.sub(";", "", out_directory) + number = re.sub("_.*$", "", out_directory) + workflow_info["out_directory"] = out_directory + workflow_info["number"] = number + if current_part == ">": + out_file = parts[i + 1] + step = re.sub("_.*log", "", out_file) + workflow_info["out_file"] = out_file + workflow_info["step"] = step + + workflow_info["message"] = get_wf_error_msg(join(relval_dir, out_directory, out_file)) + return workflow_info + + # # Reads the log file for the matrix tests. It identifyes which workflows failed # and then proceeds to read the corresponding log file to identify the message # def read_matrix_log_file(matrix_log): - workflows_with_error = [ ] - relval_dir = join(dirname (matrix_log), "runTheMatrix-results") - common_errors = [] - for line in openlog( matrix_log): - line = line.strip() - if 'ERROR executing' in line: - print('processing: %s' % line) - parts = re.sub("\s+"," ",line).split(" ") - workflow_info = parse_workflow_info( parts, relval_dir) - if 'number' in workflow_info: - workflows_with_error.append( workflow_info ) - elif ' Step0-DAS_ERROR ' in line: - print('processing: %s' % line) - parts = line.split("_",2) - workflow_info = {} - workflow_info[ 'step' ] = "step1" - workflow_info[ 'number' ] = parts [0] - workflow_info[ 'message' ] = "DAS Error" - workflows_with_error.append( workflow_info ) - elif 'ValueError: Undefined' in line: - common_errors.append(line+"\n") - - # check if it was timeout - message = "\n## RelVals\n\n" - if 'ERROR TIMEOUT' in line: - message += 'The relvals timed out after 4 hours.\n' - if common_errors: - message += ''.join(common_errors) - cnt = 0 - max_show = 3 - extra_msg = False - for wf in workflows_with_error: - wnum = wf['number'] - cnt += 1 - if 'out_directory' in wf: - wnum = "[%s](%s/runTheMatrix-results/%s)" % (wnum, options.report_url, wf['out_directory']) - if cnt<=max_show: - msg = wf['message'].strip() - if len(msg.split('\n'))>1: - message += '- ' + wnum + '\n```\n' + msg + '\n```\n' - else: - message += '- ' + wnum + '```' + msg + '```\n' - else: - if not extra_msg: - extra_msg = True - message += '
\nExpand to see more relval errors ...\n\n' - message += '- ' + wnum + '\n' - if extra_msg: message += '
\n\n' - send_message_pr(message) + workflows_with_error = [] + relval_dir = join(dirname(matrix_log), "runTheMatrix-results") + common_errors = [] + for line in openlog(matrix_log): + line = line.strip() + if "ERROR executing" in line: + print("processing: %s" % line) + parts = re.sub("\s+", " ", line).split(" ") + workflow_info = parse_workflow_info(parts, relval_dir) + if "number" in workflow_info: + workflows_with_error.append(workflow_info) + elif " Step0-DAS_ERROR " in line: + print("processing: %s" % line) + parts = line.split("_", 2) + workflow_info = {} + workflow_info["step"] = "step1" + workflow_info["number"] = parts[0] + workflow_info["message"] = "DAS Error" + workflows_with_error.append(workflow_info) + elif "ValueError: Undefined" in line: + common_errors.append(line + "\n") + + # check if it was timeout + message = "\n## RelVals\n\n" + if "ERROR TIMEOUT" in line: + message += "The relvals timed out after 4 hours.\n" + if common_errors: + message += "".join(common_errors) + cnt = 0 + max_show = 3 + extra_msg = False + for wf in workflows_with_error: + wnum = wf["number"] + cnt += 1 + if "out_directory" in wf: + wnum = "[%s](%s/runTheMatrix-results/%s)" % ( + wnum, + options.report_url, + wf["out_directory"], + ) + if cnt <= max_show: + msg = wf["message"].strip() + if len(msg.split("\n")) > 1: + message += "- " + wnum + "\n```\n" + msg + "\n```\n" + else: + message += "- " + wnum + "```" + msg + "```\n" + else: + if not extra_msg: + extra_msg = True + message += "
\nExpand to see more relval errors ...\n\n" + message += "- " + wnum + "\n" + if extra_msg: + message += "
\n\n" + send_message_pr(message) + # # reads the addon tests log file and gets the tests that failed # def cmd_to_addon_test(command, addon_dir): - try: - cmdMatch = re.match("^\[(.+):(\d+)\] +(.*)", command) - addon_subdir = cmdMatch.group(1) - logfile = 'step%s.log' % cmdMatch.group(2) - e, o = run_cmd('ls -d %s/%s/%s 2>/dev/null | tail -1' % (addon_dir, addon_subdir, logfile)) - except: - commandbase = command.replace(' ','_').replace('/','_') - logfile='%s.log' % commandbase[:150].replace("'",'').replace('"','').replace('../','') - e, o = run_cmd("ls -d %s/*/%s 2>/dev/null | tail -1" % (addon_dir, logfile)) - if (not e) and o: - return (o.split("/")[-2], get_wf_error_msg(o, False).strip()) - print("ERROR: %s -> %s" % (command, o)) - return ("", "") + try: + cmdMatch = re.match("^\[(.+):(\d+)\] +(.*)", command) + addon_subdir = cmdMatch.group(1) + logfile = "step%s.log" % cmdMatch.group(2) + e, o = run_cmd("ls -d %s/%s/%s 2>/dev/null | tail -1" % (addon_dir, addon_subdir, logfile)) + except: + commandbase = command.replace(" ", "_").replace("/", "_") + logfile = "%s.log" % commandbase[:150].replace("'", "").replace('"', "").replace("../", "") + e, o = run_cmd("ls -d %s/*/%s 2>/dev/null | tail -1" % (addon_dir, logfile)) + if (not e) and o: + return (o.split("/")[-2], get_wf_error_msg(o, False).strip()) + print("ERROR: %s -> %s" % (command, o)) + return ("", "") + def read_addon_log_file(unit_tests_file): - message='\n## AddOn Tests\n\n' - addon_dir = join(dirname(unit_tests_file), "addOnTests") - cnt = 0 - max_show = 3 - extra_msg = False - for line in openlog(unit_tests_file): - line = line.strip() - if( ': FAILED -' in line): - cnt += 1 - tname, err = cmd_to_addon_test(line.split(': FAILED -')[0].strip(), addon_dir) - if not tname: tname = "unknown" - else: tname = "[%s](%s/addOnTests/%s)" % (tname, options.report_url, tname) - if cnt <= max_show: - if err: line = err - message += "- "+ tname + '\n```\n' + line + '\n```\n' - else: - if not extra_msg: - extra_msg = True - message += '
\nExpand to see more addon errors ...\n\n' - message += '- ' + tname + '\n' - if extra_msg: message += '
\n\n' - send_message_pr(message) + message = "\n## AddOn Tests\n\n" + addon_dir = join(dirname(unit_tests_file), "addOnTests") + cnt = 0 + max_show = 3 + extra_msg = False + for line in openlog(unit_tests_file): + line = line.strip() + if ": FAILED -" in line: + cnt += 1 + tname, err = cmd_to_addon_test(line.split(": FAILED -")[0].strip(), addon_dir) + if not tname: + tname = "unknown" + else: + tname = "[%s](%s/addOnTests/%s)" % (tname, options.report_url, tname) + if cnt <= max_show: + if err: + line = err + message += "- " + tname + "\n```\n" + line + "\n```\n" + else: + if not extra_msg: + extra_msg = True + message += ( + "
\nExpand to see more addon errors ...\n\n" + ) + message += "- " + tname + "\n" + if extra_msg: + message += "
\n\n" + send_message_pr(message) + # # reads material budget logs # def read_material_budget_log_file(unit_tests_file): - message = '\n## Material Budget\n\nThere was error running material budget tests.' - send_message_pr(message) + message = "\n## Material Budget\n\nThere was error running material budget tests." + send_message_pr(message) + def get_recent_merges_message(): - message = "" - if options.recent_merges_file: - extra_msg = [] - json_obj = json.load(openlog(options.recent_merges_file)) - for r in json_obj: - for pr in json_obj[r]: extra_msg.append(" - @%s %s#%s" % (json_obj[r][pr]['author'], r, pr)) + message = "" + if options.recent_merges_file: + extra_msg = [] + json_obj = json.load(openlog(options.recent_merges_file)) + for r in json_obj: + for pr in json_obj[r]: + extra_msg.append(" - @%s %s#%s" % (json_obj[r][pr]["author"], r, pr)) - if extra_msg: - message += '\n\nThe following merge commits were also included on top of IB + this PR '\ - 'after doing git cms-merge-topic: \n' + if extra_msg: + message += ( + "\n\nThe following merge commits were also included on top of IB + this PR " + "after doing git cms-merge-topic: \n" + ) + + for l in extra_msg: + message += l + "\n" - for l in extra_msg: message += l + '\n' + message += "\nYou can see more details here:\n" + message += GITLOG_FILE_BASE_URL + "\n" + message += GIT_CMS_MERGE_TOPIC_BASE_URL + "\n" + return message - message += '\nYou can see more details here:\n' - message += GITLOG_FILE_BASE_URL +'\n' - message += GIT_CMS_MERGE_TOPIC_BASE_URL + '\n' - return message def get_pr_tests_info(): - message = "" - if options.commit: - message = "\n**COMMIT**: %s" % options.commit - message += "\n**CMSSW**: " - if 'CMSSW_VERSION' in os.environ: - message += os.environ['CMSSW_VERSION'] - else: - message += "UNKNOWN" - if 'SCRAM_ARCH' in os.environ: - message += '/' + os.environ['SCRAM_ARCH'] - else: - message += '/UNKNOWN' - if ('ENABLE_BOT_TESTS' in os.environ) and os.environ['ENABLE_BOT_TESTS']: - message += "\n**Additional Tests**: %s" % os.environ['ENABLE_BOT_TESTS'] - return message + message = "" + if options.commit: + message = "\n**COMMIT**: %s" % options.commit + message += "\n**CMSSW**: " + if "CMSSW_VERSION" in os.environ: + message += os.environ["CMSSW_VERSION"] + else: + message += "UNKNOWN" + if "SCRAM_ARCH" in os.environ: + message += "/" + os.environ["SCRAM_ARCH"] + else: + message += "/UNKNOWN" + if ("ENABLE_BOT_TESTS" in os.environ) and os.environ["ENABLE_BOT_TESTS"]: + message += "\n**Additional Tests**: %s" % os.environ["ENABLE_BOT_TESTS"] + return message # # reads the build log file looking for the first error # it includes 5 lines before and 5 lines after the error # -def read_build_log_file(build_log, isClang=False , toolconf=False): - line_number = 0 - error_line = 0 - lines_to_keep_before=5 - lines_to_keep_after=5 - lines_since_error=0 - lines_before = [''] - lines_after = [''] - error_found = False - for line in openlog(build_log): - line_number += 1 - if (not error_found): - lines_before.append(line) - if (line_number > lines_to_keep_before): - lines_before.pop(0) - #this is how it determines that a line has an error - if ('error: ' in line) or line.startswith("gmake: "): - error_found = True - error_line = line_number +def read_build_log_file(build_log, isClang=False, toolconf=False): + line_number = 0 + error_line = 0 + lines_to_keep_before = 5 + lines_to_keep_after = 5 + lines_since_error = 0 + lines_before = [""] + lines_after = [""] + error_found = False + for line in openlog(build_log): + line_number += 1 + if not error_found: + lines_before.append(line) + if line_number > lines_to_keep_before: + lines_before.pop(0) + # this is how it determines that a line has an error + if ("error: " in line) or line.startswith("gmake: "): + error_found = True + error_line = line_number + if error_found: + if lines_since_error == 0: + lines_since_error += 1 + continue + elif lines_since_error <= lines_to_keep_after: + lines_since_error += 1 + lines_after.append(line) + else: + break + + message = "" + err_type = "compilation warning" if error_found: - if (lines_since_error == 0): - lines_since_error += 1 - continue - elif (lines_since_error <= lines_to_keep_after): - lines_since_error += 1 - lines_after.append(line) - else: - break - - message = "" - err_type = "compilation warning" - if error_found: err_type = "compilation error" - if isClang: - cmd = openlog( build_log).readline() - message += '\n## Clang Build\n\nI found '+err_type+' while trying to compile with clang. ' - message += 'Command used:\n```\n' + cmd +'\n```\n' - elif toolconf: - message += '\n## External Build\n\nI found '+err_type+' when building: ' - else: - message += '\n## Build\n\nI found '+err_type+' when building: ' - - if error_found: - message += '\n\n
'
-    for line in lines_before:
-      message += line + '\f'
-    for line in lines_after:
-      message += line + '\f'
-    message += '
' - else: - message += " See details on the summary page." - - send_message_pr(message) + err_type = "compilation error" + if isClang: + cmd = openlog(build_log).readline() + message += ( + "\n## Clang Build\n\nI found " + err_type + " while trying to compile with clang. " + ) + message += "Command used:\n```\n" + cmd + "\n```\n" + elif toolconf: + message += "\n## External Build\n\nI found " + err_type + " when building: " + else: + message += "\n## Build\n\nI found " + err_type + " when building: " + + if error_found: + message += "\n\n
"
+        for line in lines_before:
+            message += line + "\f"
+        for line in lines_after:
+            message += line + "\f"
+        message += "
" + else: + message += " See details on the summary page." + + send_message_pr(message) + # # reads the unit tests file and gets the tests that failed # def read_unit_tests_file(unit_tests_file): - errors_found='' - err_cnt = 0 - for line in openlog(unit_tests_file): - if( 'had ERRORS' in line): - err_cnt += 1 - if err_cnt == 4: - errors_found += "and more ...\n" - if err_cnt > 3: - continue - errors_found += line - - - message = '\n## Unit Tests\n\nI found %s errors in the following unit tests:\n\n
%s
' % (err_cnt, errors_found) - send_message_pr(message) + errors_found = "" + err_cnt = 0 + for line in openlog(unit_tests_file): + if "had ERRORS" in line: + err_cnt += 1 + if err_cnt == 4: + errors_found += "and more ...\n" + if err_cnt > 3: + continue + errors_found += line + + message = ( + "\n## Unit Tests\n\nI found %s errors in the following unit tests:\n\n
%s
" + % (err_cnt, errors_found) + ) + send_message_pr(message) def read_gpu_tests_file(unit_tests_file): - errors_found='' - err_cnt = 0 - for line in openlog(unit_tests_file): - if( 'had ERRORS' in line): - err_cnt += 1 - if err_cnt == 4: - errors_found += "and more ...\n" - if err_cnt > 3: - continue - errors_found += line - message = '\n## GPU Unit Tests\n\nI found %s errors in the following unit tests:\n\n
%s
' % (err_cnt, errors_found) - send_message_pr(message) + errors_found = "" + err_cnt = 0 + for line in openlog(unit_tests_file): + if "had ERRORS" in line: + err_cnt += 1 + if err_cnt == 4: + errors_found += "and more ...\n" + if err_cnt > 3: + continue + errors_found += line + message = ( + "\n## GPU Unit Tests\n\nI found %s errors in the following unit tests:\n\n
%s
" + % (err_cnt, errors_found) + ) + send_message_pr(message) + # # reads the python3 file and gets the tests that failed # def read_python3_file(python3_file): - errors_found='' - err_cnt = 0 - for line in openlog(python3_file): - if( ' Error compiling ' in line): - err_cnt += 1 - if err_cnt == 4: - errors_found += "and more ...\n" - if err_cnt > 3: - continue - errors_found += line - message = '\n#Python3\n\nI found %s errors: \n\n
%s
' % (err_cnt, errors_found) - send_message_pr(message) + errors_found = "" + err_cnt = 0 + for line in openlog(python3_file): + if " Error compiling " in line: + err_cnt += 1 + if err_cnt == 4: + errors_found += "and more ...\n" + if err_cnt > 3: + continue + errors_found += line + message = "\n#Python3\n\nI found %s errors: \n\n
%s
" % (err_cnt, errors_found) + send_message_pr(message) # @@ -350,153 +448,184 @@ def read_python3_file(python3_file): # and if it is it doesn't post it again # def send_message_pr(message): - if options.no_post_mesage: - print('Not posting message (dry-run): \n ', message) + if options.no_post_mesage: + print("Not posting message (dry-run): \n ", message) + return + with openlog(options.report_file, "a") as rfile: + writelog(rfile, message + "\n") return - with openlog(options.report_file, "a") as rfile: - writelog(rfile, message+"\n") - return # # sends an approval message for a pr in cmssw # def add_to_report(message): - if not message: return - with openlog(options.report_file, "a") as rfile: - writelog(rfile, message+"\n") - return + if not message: + return + with openlog(options.report_file, "a") as rfile: + writelog(rfile, message + "\n") + return + def get_base_message(): - add_to_report(get_pr_tests_info()) - return - -def send_comparison_ready_message(comparison_errors_file, wfs_with_das_inconsistency_file, missing_map ): - message = '\n## Comparison Summary\n\n' - wfs_with_errors = '' - for line in openlog( comparison_errors_file ): - line = line.rstrip() - parts = line.split( ';' ) - wf = parts[ 0 ] - step = parts[ 1 ] - wfs_with_errors += ( wf + ' step ' + step + '\n' ) - - if wfs_with_errors != '': - error_info = COMPARISON_INCOMPLETE_MSG.format( workflows=wfs_with_errors ) - message += '\n\n' + error_info - - wfs_das_inconsistency = openlog( wfs_with_das_inconsistency_file).readline().rstrip().rstrip(',').split( ',' ) - - if '' in wfs_das_inconsistency: - wfs_das_inconsistency.remove( '' ) - - if wfs_das_inconsistency: - das_inconsistency_info = DAS_INCONSISTENCY_MSG.format( workflows=', '.join( wfs_das_inconsistency ) ) - message += '\n\n' + das_inconsistency_info - - if missing_map and exists (missing_map): - missing = [] - for line in openlog(missing_map): - line = line.strip() - if line: missing.append(" * "+line) - if missing: - from categories import COMPARISON_MISSING_MAP - map_notify = ", ".join([ "@"+u for u in COMPARISON_MISSING_MAP] ) - message += "\n\n"+map_notify+" comparisons for the following workflows were not done due to missing matrix map:\n"+"\n".join(missing) - - alt_comp_dir = join(dirname(comparison_errors_file), "upload","alternative-comparisons") - print("Alt comparison directory: ",alt_comp_dir) - if exists(alt_comp_dir): - err, out = run_cmd("grep ' Compilation failed' %s/runDQMComp-*.log" % alt_comp_dir) - print(out) - if not err: - err_wfs = {} - for line in out.split("\n"): - wf = line.split(".log:",1)[0].split("runDQMComp-")[-1] - err_wfs [wf]=1 - if err_wfs: message += "\n\nAlternative comparison was/were failed for workflow(s):\n"+"\n".join(list(err_wfs.keys())) - - JRCompSummaryLog = join(dirname(comparison_errors_file), "upload/validateJR/qaResultsSummary.log") - print("JR comparison Summary: ",JRCompSummaryLog) - if exists(JRCompSummaryLog): - err, out = run_cmd("cat %s" % JRCompSummaryLog) - if (not err) and out: - message += "\n\n**Summary**:\n" - for l in out.split("\n"): - if l.strip(): message += " - %s\n" % l.strip() - - send_message_pr(message ) + add_to_report(get_pr_tests_info()) + return + + +def send_comparison_ready_message( + comparison_errors_file, wfs_with_das_inconsistency_file, missing_map +): + message = "\n## Comparison Summary\n\n" + wfs_with_errors = "" + for line in openlog(comparison_errors_file): + line = line.rstrip() + parts = line.split(";") + wf = parts[0] + step = parts[1] + wfs_with_errors += wf + " step " + step + "\n" + + if wfs_with_errors != "": + error_info = COMPARISON_INCOMPLETE_MSG.format(workflows=wfs_with_errors) + message += "\n\n" + error_info + + wfs_das_inconsistency = ( + openlog(wfs_with_das_inconsistency_file).readline().rstrip().rstrip(",").split(",") + ) + + if "" in wfs_das_inconsistency: + wfs_das_inconsistency.remove("") + + if wfs_das_inconsistency: + das_inconsistency_info = DAS_INCONSISTENCY_MSG.format( + workflows=", ".join(wfs_das_inconsistency) + ) + message += "\n\n" + das_inconsistency_info + + if missing_map and exists(missing_map): + missing = [] + for line in openlog(missing_map): + line = line.strip() + if line: + missing.append(" * " + line) + if missing: + from categories import COMPARISON_MISSING_MAP + + map_notify = ", ".join(["@" + u for u in COMPARISON_MISSING_MAP]) + message += ( + "\n\n" + + map_notify + + " comparisons for the following workflows were not done due to missing matrix map:\n" + + "\n".join(missing) + ) + + alt_comp_dir = join(dirname(comparison_errors_file), "upload", "alternative-comparisons") + print("Alt comparison directory: ", alt_comp_dir) + if exists(alt_comp_dir): + err, out = run_cmd("grep ' Compilation failed' %s/runDQMComp-*.log" % alt_comp_dir) + print(out) + if not err: + err_wfs = {} + for line in out.split("\n"): + wf = line.split(".log:", 1)[0].split("runDQMComp-")[-1] + err_wfs[wf] = 1 + if err_wfs: + message += ( + "\n\nAlternative comparison was/were failed for workflow(s):\n" + + "\n".join(list(err_wfs.keys())) + ) + + JRCompSummaryLog = join( + dirname(comparison_errors_file), "upload/validateJR/qaResultsSummary.log" + ) + print("JR comparison Summary: ", JRCompSummaryLog) + if exists(JRCompSummaryLog): + err, out = run_cmd("cat %s" % JRCompSummaryLog) + if (not err) and out: + message += "\n\n**Summary**:\n" + for l in out.split("\n"): + if l.strip(): + message += " - %s\n" % l.strip() + + send_message_pr(message) + def complain_missing_param(param_name): - print('\n') - print('I need a %s to continue' % param_name) - print('\n') - parser.print_help() - exit() - -#---------------------------------------------------------------------------------------- -#---- Global variables -#--------------------------------------------------------------------------------------- - -COMPARISON_INCOMPLETE_MSG = 'There are some workflows for which there are errors in the baseline:\n {workflows} ' \ - 'The results for the comparisons for these workflows could be incomplete \n' \ - 'This means most likely that the IB is having errors in the relvals.'\ - 'The error does NOT come from this pull request' -DAS_INCONSISTENCY_MSG = 'The workflows {workflows} have different files in step1_dasquery.log than the ones ' \ - 'found in the baseline. You may want to check and retrigger the tests if necessary. ' \ - 'You can check it in the "files" directory in the results of the comparisons' - -MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND = 'Not Found' -MATRIX_WORKFLOW_STEP_NA = 'N/A' - -#---------------------------------------------------------------------------------------- -#---- Check arguments and options -#--------------------------------------------------------------------------------------- - -if (len(args)==0): - print('you have to choose an action') - parser.print_help() - exit() + print("\n") + print("I need a %s to continue" % param_name) + print("\n") + parser.print_help() + exit() + + +# ---------------------------------------------------------------------------------------- +# ---- Global variables +# --------------------------------------------------------------------------------------- + +COMPARISON_INCOMPLETE_MSG = ( + "There are some workflows for which there are errors in the baseline:\n {workflows} " + "The results for the comparisons for these workflows could be incomplete \n" + "This means most likely that the IB is having errors in the relvals." + "The error does NOT come from this pull request" +) +DAS_INCONSISTENCY_MSG = ( + "The workflows {workflows} have different files in step1_dasquery.log than the ones " + "found in the baseline. You may want to check and retrigger the tests if necessary. " + 'You can check it in the "files" directory in the results of the comparisons' +) + +MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND = "Not Found" +MATRIX_WORKFLOW_STEP_NA = "N/A" + +# ---------------------------------------------------------------------------------------- +# ---- Check arguments and options +# --------------------------------------------------------------------------------------- + +if len(args) == 0: + print("you have to choose an action") + parser.print_help() + exit() ACTION = args[0] -if (ACTION == 'prBot.py'): - print('you have to choose an action') - parser.print_help() - exit() - -print('you chose the action %s' % ACTION) - -if (options.report_url=='') or (options.report_file==''): - complain_missing_param( 'report url/report file' ) - exit() - -GITLOG_FILE_BASE_URL='%s/git-recent-commits.json' % options.report_url -GIT_CMS_MERGE_TOPIC_BASE_URL='%s/git-merge-result' % options.report_url - -if ( ACTION == 'GET_BASE_MESSAGE' ): - get_base_message() -elif ( ACTION == 'PARSE_UNIT_TESTS_FAIL' ): - read_unit_tests_file(options.unit_tests_file) -elif ( ACTION == 'PARSE_EXTERNAL_BUILD_FAIL' ): - read_build_log_file(options.unit_tests_file, toolconf=True ) -elif ( ACTION == 'PARSE_BUILD_FAIL' ): - read_build_log_file(options.unit_tests_file) -elif ( ACTION == 'PARSE_MATRIX_FAIL' ): - read_matrix_log_file(options.unit_tests_file ) -elif ( ACTION == 'PARSE_ADDON_FAIL' ): - read_addon_log_file(options.unit_tests_file ) -elif ( ACTION == 'COMPARISON_READY' ): - send_comparison_ready_message(options.unit_tests_file, options.results_file2, options.missing_map ) -elif( ACTION == 'PARSE_CLANG_BUILD_FAIL'): - read_build_log_file(options.unit_tests_file, isClang=True ) -elif( ACTION == 'PYTHON3_FAIL'): - read_python3_file(options.unit_tests_file ) -elif( ACTION == 'MATERIAL_BUDGET'): - read_material_budget_log_file(options.unit_tests_file) -elif ( ACTION == 'MERGE_COMMITS'): - add_to_report(get_recent_merges_message()) -elif ( ACTION == 'PARSE_GPU_UNIT_TESTS_FAIL'): - read_gpu_tests_file(options.unit_tests_file) +if ACTION == "prBot.py": + print("you have to choose an action") + parser.print_help() + exit() + +print("you chose the action %s" % ACTION) + +if (options.report_url == "") or (options.report_file == ""): + complain_missing_param("report url/report file") + exit() + +GITLOG_FILE_BASE_URL = "%s/git-recent-commits.json" % options.report_url +GIT_CMS_MERGE_TOPIC_BASE_URL = "%s/git-merge-result" % options.report_url + +if ACTION == "GET_BASE_MESSAGE": + get_base_message() +elif ACTION == "PARSE_UNIT_TESTS_FAIL": + read_unit_tests_file(options.unit_tests_file) +elif ACTION == "PARSE_EXTERNAL_BUILD_FAIL": + read_build_log_file(options.unit_tests_file, toolconf=True) +elif ACTION == "PARSE_BUILD_FAIL": + read_build_log_file(options.unit_tests_file) +elif ACTION == "PARSE_MATRIX_FAIL": + read_matrix_log_file(options.unit_tests_file) +elif ACTION == "PARSE_ADDON_FAIL": + read_addon_log_file(options.unit_tests_file) +elif ACTION == "COMPARISON_READY": + send_comparison_ready_message( + options.unit_tests_file, options.results_file2, options.missing_map + ) +elif ACTION == "PARSE_CLANG_BUILD_FAIL": + read_build_log_file(options.unit_tests_file, isClang=True) +elif ACTION == "PYTHON3_FAIL": + read_python3_file(options.unit_tests_file) +elif ACTION == "MATERIAL_BUDGET": + read_material_budget_log_file(options.unit_tests_file) +elif ACTION == "MERGE_COMMITS": + add_to_report(get_recent_merges_message()) +elif ACTION == "PARSE_GPU_UNIT_TESTS_FAIL": + read_gpu_tests_file(options.unit_tests_file) else: - print("I don't recognize that action!") + print("I don't recognize that action!") diff --git a/report-summary-merged-prs b/report-summary-merged-prs deleted file mode 100755 index c887ac665ad0..000000000000 --- a/report-summary-merged-prs +++ /dev/null @@ -1,1708 +0,0 @@ -#! /usr/bin/env python -""" -This script generates json file (like CMSSW_10_0_X.json) which is then used to render cmssdt ib page. -""" -from __future__ import print_function -from optparse import OptionParser -import subprocess -import re -import json -from pickle import Unpickler -from os.path import basename, dirname, exists, join, expanduser, getmtime -from glob import glob -from github import Github -from pprint import pformat - -from cmsutils import get_config_map_properties -from github_utils import get_merge_prs -from cms_static import GH_CMSSW_REPO, GH_CMSSW_ORGANIZATION -from releases import CMSSW_DEVEL_BRANCH -from socket import setdefaulttimeout - -setdefaulttimeout(120) -CMSSW_REPO_NAME = join(GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO) - -# ----------------------------------------------------------------------------------- -# ---- Parser Options -# ----------------------------------------------------------------------------------- -parser = OptionParser( - usage="usage: %prog CMSSW_REPO GITHUB_IO_REPO START_DATE" - "\n CMSSW_REPO: location of the cmssw repository. This must be a bare clone ( git clone --bare )" - "\n CMSDIST_REPO: location of the cmsdist repository. This must be a normal clone" - "\n GITHUB_IO_REPO: location of the github.io repository. This must be a normal clone" - "\n for example: cmssw.git or /afs/cern.ch/cms/git-cmssw-mirror/cmssw.git" - "\n START_DATE: the date of the earliest IB to show. It must be in the format" - "\n ---" - "\n For example:" - "\n 2014-10-08-1400" -) - -parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="Do not post on Github", default=False) - -(options, args) = parser.parse_args() - -""" ------------------------------------------------------------------------------------ ----- Output Schema ------------------------------------------------------------------------------------ - -comparisons": [ , , ] - -Each dict contains the result of the comparison between 2 tags in cmssw. For example -CMSSW_5_3_X_2015-02-03-0200 with CMSSW_5_3_X_2015-02-04-0200 which correspond -to the IB CMSSW_5_3_X_2015-02-04-0200 - -The schema of the dictionary is as folows: -{ - "addons": [], - "builds": [], - "fwlite": [], - "compared_tags": "", - "utests": [], - "gpu_utests": [], - "cmsdistTags": {}, - "relvals": [], - "static_checks": "", - "valgrind": "", - "material_budget" : "", - "isIB": Boolean, - "tests_archs": [], - "release_name": "", - "merged_prs": [], - "RVExceptions" : Boolean - -} -""" - - -# ----------------------------------------------------------------------------------- -# ---- Review of arguments -# ----------------------------------------------------------------------------------- - -if (len(args) < 4): - print('not enough arguments\n') - parser.print_help() - exit() - -# Remember that the cmssw repo is a bare clone while cmsdist is a complete clone -CMSSW_REPO_LOCAL = args[0] -GITHUB_IO_REPO = args[1] -CMSDIST_REPO = args[2] -START_DATE = args[3] -if len(args) >= 5: - CMS_PRS = args[4] -else: - CMS_PRS = "cms-prs" - - -# ----------------------------------------------------------------------------------- -# ---- Fuctions -# ----------------------------------------------------------------------------------- -def print_verbose(msg): - """ - Takes into account the verbose option. If the option is activated it doesn't print anything. - """ - if options.verbose: - print (msg) - - -def parse_config_map_line(line): - """ - reads a line of config.map and returns a dictionary with is parameters - """ - params = {} - parts = line.split(';') - - for part in parts: - if part == '': - continue - key = part.split('=')[0] - value = part.split('=')[1] - params[key] = value - - return params - - -def get_config_map_params(): - """ - gets the list of architectures by reading config.map, they are saved in ARCHITECTURES - gets the releases branches from config.map, they are saved in RELEASES_BRANCHES - it maps the branches for all the releases this is to take into account the case in which the base branch - is different from the release queue - """ - f = open(CONFIG_MAP_FILE, 'r') - for line in f.readlines(): - params = parse_config_map_line(line.rstrip()) - if not params: continue - print(params) - - arch = params['SCRAM_ARCH'] - if arch not in ARCHITECTURES: - ARCHITECTURES.append(arch) - - release_queue = params['RELEASE_QUEUE'] - base_branch = params.get('RELEASE_BRANCH') - if base_branch: - if base_branch == "master": base_branch = CMSSW_DEVEL_BRANCH - RELEASES_BRANCHES[release_queue] = base_branch - else: - RELEASES_BRANCHES[release_queue] = release_queue - - sp_rel_name = release_queue.split('_')[3] - - if sp_rel_name != 'X' and sp_rel_name not in SPECIAL_RELEASES: - SPECIAL_RELEASES.append(sp_rel_name) - - if (not params.get('DISABLED') or params.get('IB_WEB_PAGE')): - if not RELEASES_ARCHS.get(release_queue): - RELEASES_ARCHS_WITH_DIST_BRANCH[release_queue] = {} - RELEASES_ARCHS[release_queue] = [] - RELEASES_ARCHS[release_queue].append(arch) - RELEASES_ARCHS_WITH_DIST_BRANCH[release_queue][arch] = params['CMSDIST_TAG'] - if release_queue not in RELEASE_QUEUES: - RELEASE_QUEUES.append(release_queue) - - additional_tests = params.get('ADDITIONAL_TESTS') - - if additional_tests: - if RELEASE_ADITIONAL_TESTS.get(release_queue): continue - RELEASE_ADITIONAL_TESTS[release_queue] = {} - # if not RELEASE_ADITIONAL_TESTS.get( release_queue ): - # RELEASE_ADITIONAL_TESTS[ release_queue ] = {} - RELEASE_ADITIONAL_TESTS[release_queue][arch] = [test for test in additional_tests.split(',') if - test != 'dqm'] - - SP_REL_REGEX = "|".join(SPECIAL_RELEASES) - RELEASE_QUEUES.sort() - - print() - print('---------------------------') - print('Read config.map:') - print('ARCHS:') - print(ARCHITECTURES) - print('--') - print(RELEASES_ARCHS) - print('RELEASES_BRANCHES:') - print(RELEASES_BRANCHES) - print('special releases') - print(SPECIAL_RELEASES) - print('aditional tests') - print(RELEASE_ADITIONAL_TESTS) - print('I am going to show:') - print(RELEASE_QUEUES) - print('---------------------------') - print() - - -def get_tags_from_line(line, release_queue): - """ - reads a line of the output of git log and returns the tags that it contains - if there are no tags it returns an empty list - it applies filters according to the release queue to only get the - tags related to the current release queue - """ - if 'tags->' not in line: - return [] - tags_str = line.split('tags->')[1] - if re.match('.*SLHC$', release_queue): - filter = release_queue[:-6] + '[X|0-9]_SLHC.*' - else: - filter = release_queue[:-1] + '[X|0-9].*' - - ## if the tags part is equal to ," there are no tags - if tags_str != ',"': - tags = tags_str.split(',', 1)[1].strip().replace('(', '').replace(')', '').split(',') - # remove te word "tag: " - tags = [t.replace('tag: ', '') for t in tags] - # I also have to remove the branch name because it otherwise will always appear - # I also remove tags that have the string _DEBUG_TEST, they are used to create test IBs - tags = [t for t in tags if re.match(filter, t.strip()) and (t.strip().replace('"', '') != release_queue) and ( - 'DEBUG_TEST' not in t)] - return [t.replace('"', '').replace('tag:', '').strip() for t in tags] - else: - return [] - - -# ----------------------------------------------------------------------------------- -# ---- Fuctions -- Analize Git outputs -# ----------------------------------------------------------------------------------- -def determine_build_error(nErrorInfo): - a = BuildResultsKeys.COMP_ERROR in nErrorInfo.keys() - b = BuildResultsKeys.LINK_ERROR in nErrorInfo.keys() - c = BuildResultsKeys.MISC_ERROR in nErrorInfo.keys() - d = BuildResultsKeys.DWNL_ERROR in nErrorInfo.keys() - e = BuildResultsKeys.DICT_ERROR in nErrorInfo.keys() - f = BuildResultsKeys.PYTHON_ERROR in nErrorInfo.keys() - return a or b or c or d or e or f - - -def determine_build_warning(nErrorInfo): - a = BuildResultsKeys.PYTHON3_ERROR in nErrorInfo.keys() - b = BuildResultsKeys.COMP_WARNING in nErrorInfo.keys() - return a or b - - -def get_results_one_addOn_file(file): - look_for_err_cmd = 'grep "failed" %s' % file - result, err, ret_code = get_output_command(look_for_err_cmd) - if ' 0 failed' in result: - return True - else: - return False - - -def get_results_one_unitTests_file(file, grep_str="ERROR"): - """ - given a unitTests-summary.log it determines if the test passed or not - it returns a tuple, the first element is one of the possible values of PossibleUnitTestResults - The second element is a dictionary which indicates how many tests failed - """ - look_for_err_cmd = 'grep -h -c "%s" %s' % (grep_str, file) - result, err, ret_code = get_output_command(look_for_err_cmd) - - result = result.rstrip() - - details = {'num_fails': result} - - if result != '0': - return PossibleUnitTestResults.FAILED, details - else: - return PossibleUnitTestResults.PASSED, details - - -def get_results_one_relval_file(filename): - """ - given a runall-report-step123-.log file it returns the result of the relvals - it returns a tuple, the first element indicates if the tests passed or not - the second element is a dictionary which shows the details of how many relvals pased - and how many failed - """ - summary_file = filename.replace("/runall-report-step123-.log", "/summary.json") - if exists(summary_file) and getmtime(summary_file)>getmtime(filename): - try: - details = json.load(open(summary_file)) - return details['num_failed'] == 0, details - except: - pass - - details = {'num_passed': 0, - 'num_failed': 1, - 'known_failed': 0} - - print_verbose('Analyzing: ' + filename) - lines = file(filename).read().split("\n") - results = [x for x in lines if ' tests passed' in x] - if len(results) == 0: - return False, details - out = results.pop() - - num_passed_sep = out.split(',')[0].replace(' tests passed', '').strip() - num_failed_sep = out.split(',')[1].replace(' failed', '').strip() - try: - details["num_passed"] = sum([int(num) for num in num_passed_sep.split(' ')]) - details["num_failed"] = sum([int(num) for num in num_failed_sep.split(' ')]) - except ValueError as e: - print("Error while reading file %s" % filename) - print(e) - return False, details - with open(summary_file, "w") as ref: - json.dump(details, ref) - return details["num_failed"] == 0, details - - -def get_results_details_one_build_file(file, type): - """ - Given a logAnalysis.pkl file, it determines if the tests passed or not - it returns a tuple, the first element is one of the values of PossibleBuildResults - The second element is a dictionary containing the details of the results. - If the tests are all ok this dictionary is empty - """ - summFile = open(file, 'r') - pklr = Unpickler(summFile) - [rel, plat, anaTime] = pklr.load() - errorKeys = pklr.load() - nErrorInfo = pklr.load() - summFile.close() - # if type=='builds': - # py3_log = join(dirname(dirname(file)),'python3.log') - # if exists (py3_log): - # py3 = open(py3_log, 'r') - # nErrorInfo[BuildResultsKeys.PYTHON3_ERROR]=len([l for l in py3.readlines() if ' Error compiling ' in l]) - - if determine_build_error(nErrorInfo): - return PossibleBuildResults.ERROR, nErrorInfo - elif determine_build_warning(nErrorInfo): - return PossibleBuildResults.WARNING, nErrorInfo - else: - return PossibleBuildResults.PASSED, nErrorInfo - - -def analyze_tests_results(output, results, arch, type): - """ - parses the tests results for each file in output. It distinguishes if it is - build, unit tests, relvals, or addon tests logs. The the result of the parsing - is saved in the parameter results. - type can be 'relvals', 'utests', 'gpu_tests', 'addON', 'builds', 'fwlite' - - schema of results: - { - "": [ result_arch1, result_arch2, ... result_archN ] - } - schema of result_arch - { - "arch" : "" - "file" : "" - "passed" : ( if not applicable the value is true ) - "details" :
( can be empty if not applicable, but not undefined ) - } - """ - for line in output.splitlines(): - m = re.search('/(CMSSW_[^/]+)/', line) - if not m: - print_verbose('Ignoring file:\n%s' % line) - continue - - print("Processing ", type, ":", line) - rel_name = m.group(1) - result_arch = {} - result_arch['arch'] = arch - result_arch['file'] = line - - details = {} - passed = None - if type == 'relvals': - passed, details = get_results_one_relval_file(line) - result_arch['done'] = False - if exists(join(dirname(line), "done")) or exists(join(dirname(line), "all.pages")): - result_arch['done'] = True - elif type == 'utests': - passed, details = get_results_one_unitTests_file(line) - elif type == 'gpu_utests': - passed, details = get_results_one_unitTests_file(line) - elif type == 'addOn': - passed = get_results_one_addOn_file(line) - elif type == 'builds': - passed, details = get_results_details_one_build_file(line, type) - elif type == 'fwlite': - passed, details = get_results_details_one_build_file(line, type) - elif type == 'python3': - passed, details = get_results_one_unitTests_file(line, " Error compiling ") - elif type == 'invalid-includes': - errs = len(json.load(open(line))) - if errs: - passed = PossibleUnitTestResults.FAILED - details = {'num_fails': str(errs)} - else: - passed = PossibleUnitTestResults.PASSED - else: - print('not a valid test type %s' % type) - exit(1) - - result_arch['passed'] = passed - result_arch['details'] = details - - if rel_name not in results.keys(): - results[rel_name] = [] - - results[rel_name].append(result_arch) - - -def execute_magic_command_find_rv_exceptions_results(): - """ - Searchs in github.io for the results for relvals exceptions - """ - print ('Finding relval exceptions results...') - command_to_execute = MAGIC_COMMAND_FIND_EXCEPTIONS_RESULTS_RELVALS - out, err, ret_code = get_output_command(command_to_execute) - - rv_exception_results = {} - - for line in out.splitlines(): - line_parts = line.split('/') - ib_name = line_parts[-1].replace('EXCEPTIONS.json', '') + line_parts[-2] - rv_exception_results[ib_name] = True - - return rv_exception_results - - -def get_tags(git_log_output, release_queue): - """ - returns a list of tags based on git log output - It uses the release queue name to filter the tags, this avoids having - in the result tags from other queues that may come from automatic merges. - For example, if release_queue is 7_2_X, it will drop tags like CMSSW_7_2_THREADED_X_2014-09-15-0200 - """ - tags = [] - for line in git_log_output.splitlines(): - tags += get_tags_from_line(line, release_queue) - - if (len(tags) == 0): - print("ATTENTION:") - print("looks like %s has not changed between the tags specified!" % release_queue) - command_to_execute = MAGIC_COMMAND_FIND_FIRST_MERGE_WITH_TAG.replace('END_TAG', release_queue) - out, err, ret_code = get_output_command(command_to_execute) - print(out) - tags = get_tags_from_line(out, release_queue) - print(tags) - - return tags - - -def get_day_number_tag(tag): - """ - returns the number of the day of a tag - if it is not an IB tag, it returns -1 - """ - parts = tag.split("-") - if len(parts) == 1: - return -1 - else: - day = parts[2] - try: - return int(day) - except ValueError: - return -1 - - -def is_tag_list_suspicious(tags): - """ - uses some heuristics to tell if the list of tags seems to be too short - """ - if len(tags) < 7: - return True - day_first_tag = get_day_number_tag(tags[-1]) - day_second_tag = get_day_number_tag(tags[-2]) - return day_second_tag - day_first_tag > 1 - - -def is_recent_branch(err): - """ - determines if the error is because one of the tags does not exist - this can happen when the branch that is being analyzed has been - created recently - """ - return "unknown revision or path not in the working tree" in err - - -# ----------------------------------------------------------------------------------- -# ---- Fuctions -- Execute Magic commands -# ----------------------------------------------------------------------------------- - -def look_for_missing_tags(start_tag, release_queue): - """ - this calls the git log command with the first tag to look for missing - tags that were not found previously - """ - command_to_execute = MAGIC_COMMAND_FIND_FIRST_MERGE_WITH_TAG.replace('END_TAG', start_tag) - out, err, ret_code = get_output_command(command_to_execute) - tags = get_tags_from_line(out, release_queue) - return tags - - -def get_output_command(command_to_execute): - """ - Executes the command that is given as parameter, returns a tuple out,err,ret_code - with the output, error and return code obtained - """ - print_verbose('Executing:') - print_verbose(command_to_execute) - - p = subprocess.Popen(command_to_execute, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = p.communicate() - ret_code = p.returncode - - if ret_code != 0: - print_verbose(ret_code) - print_verbose('Error:') - print_verbose(err) - - return out, err, ret_code - - -def execute_magic_command_tags(start_tag, end_tag, release_queue, release_branch, ignore_tags=None): - """ - Gets the tags between start_tag and end_tag, the release_queue is used as a filter - to ignore tags that are from other releases - """ - print_verbose('Release Queue:') - print_verbose(release_queue) - print_verbose('Release Branch:') - print_verbose(release_branch) - - # if it is a special release queue based on a branch with a different name, I use the release_branch as end tag - if release_queue == release_branch: - print_verbose('These IBs have a custom release branch') - real_end_tag = end_tag - else: - real_end_tag = release_branch - - print_verbose('Start tag:') - print_verbose(start_tag) - print_verbose('End tag:') - print_verbose(real_end_tag) - command_to_execute = MAGIC_COMMAND_TAGS.replace('START_TAG', start_tag).replace('END_TAG', real_end_tag) - command_to_execute = command_to_execute.replace('RELEASE_QUEUE',release_queue) - print("Running:", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - - # check if the end_tag exists, but the start_tag doesn't - # this could mean that the release branch has been created recently - if ret_code != 0: - if is_recent_branch(err): - print_verbose('looks like this branch has been created recently') - command_to_execute = MAGIC_COMMAND_FIND_ALL_TAGS.replace('END_TAG', real_end_tag).replace('RELEASE_QUEUE', - release_queue) - print("Running:", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - - tags = get_tags(out, release_queue) - tags.append(start_tag) - - # check if the tags list could be missing tags - # this means that the release branch has not changed much from the start_tag - if is_tag_list_suspicious(tags): - print_verbose('this list could be missing something!') - print_verbose(tags) - new_tags = look_for_missing_tags(start_tag, release_branch) - tags.pop() - tags += new_tags - - tags = [t for t in reversed(tags) if not ignore_tags or not re.match(ignore_tags, t)] - print("Found Tags:", tags) - - return tags - - -def execute_command_compare_tags(branch, start_tag, end_tag, git_dir, repo, cache={}): - comp = {} - comp['compared_tags'] = '%s-->%s' % (start_tag, end_tag) - comp['release_name'] = end_tag - notes = get_merge_prs(start_tag, end_tag, git_dir, CMS_PRS, cache) - prs = [] - for pr_num in notes: - pr = {'is_merge_commit': False, 'from_merge_commit': False} - if notes[pr_num]['branch'] != "master": - if notes[pr_num]['branch'] != branch: pr['from_merge_commit'] = True - pr['number'] = pr_num - pr['hash'] = notes[pr_num]['hash'] - pr['author_login'] = notes[pr_num]['author'] - pr['title'] = notes[pr_num]['title'] - pr['url'] = 'https://github.com/cms-sw/cmssw/pull/%s' % pr_num - prs.append(pr) - comp['merged_prs'] = prs - return comp - - -def compare_tags(branch, tags, git_dir, repo, cache={}): - comparisons = [] - if len(tags) > 1: comparisons.append(execute_command_compare_tags(branch, tags[0], tags[0], git_dir, repo, cache)) - for i in range(len(tags) - 1): - comp = execute_command_compare_tags(branch, tags[i], tags[i + 1], git_dir, repo, cache) - comparisons.append(comp) - return comparisons - - -def execute_magic_command_get_cmsdist_tags(): - """ - Executes the command to get the tags schema of all_tags_found: - { - "": { - "" : "" - } - } - """ - all_tags_found = {} - for arch in ARCHITECTURES: - command_to_execute = MAGIC_COMMAND_CMSDIST_TAGS.replace('ARCHITECTURE', arch) - out, err, ret_code = get_output_command(command_to_execute) - - for line in out.splitlines(): - m = re.search('CMSSW.*[0-9]/', line) - if not m: continue - - rel_name = line[m.start():m.end() - 1] - - if not all_tags_found.get(rel_name): - all_tags_found[rel_name] = {} - - all_tags_found[rel_name][arch] = line - if "CMSSW_10_" in rel_name: print("CMSDIST ", rel_name, arch) - return all_tags_found - - -def execute_magic_command_find_results(type): - """ - Executes the a command to get the results for the relvals, unit tests, - addon tests, and compitlation tests - It saves the results in the parameter 'results' - type can be 'relvals', 'utests', 'gpu_tests', 'addON', 'builds' - """ - ex_magix_comand_finf_setuls_dict = { - 'relvals': MAGIC_COMMAD_FIND_RESULTS_RELVALS, - 'utests': MAGIC_COMMAND_FIND_RESULTS_UNIT_TESTS, - 'gpu_utests': MAGIC_COMMAND_FIND_RESULTS_GPU_UNIT_TESTS, - 'addOn': MAGIC_COMMAND_FIND_RESULTS_ADDON, - 'builds': MAGIC_COMMAND_FIND_RESULTS_BUILD, - 'fwlite': MAGIC_COMMAND_FIND_RESULTS_FWLITE, - 'python3': MAGIC_COMMAND_FIND_RESULTS_PYTHON3, - 'invalid-includes': MAGIC_COMMAND_FIND_INVALID_INCLUDES - } - if type not in ex_magix_comand_finf_setuls_dict: - print('not a valid test type %s' % type) - exit(1) - results = {} - for arch in ARCHITECTURES: - base_command = ex_magix_comand_finf_setuls_dict[type] - command_to_execute = base_command.replace('ARCHITECTURE', arch) - print("Run>>", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - analyze_tests_results(out, results, arch, type) - return results - - -def print_results(results): - print("Results:") - print() - print() - for rq in results: - print() - print(rq['release_name']) - print('/////////////////////////') - for comp in rq['comparisons']: - print(comp['compared_tags']) - - print('\t' + 'HLT Tests: ' + comp['hlt_tests']) - print('\t' + 'Crab Tests: ' + comp['crab_tests']) - print('\t' + 'HEADER Tests:' + comp['check-headers']) - print('\t' + 'DQM Tests: ' + comp['dqm_tests']) - print('\t' + 'Static Checks: ' + comp['static_checks']) - print('\t' + 'Valgrind: ' + comp['valgrind']) - print('\t' + 'Material budget: ' + comp['material_budget']) - print('\t' + 'Igprof: ' + comp['igprof']) - print('\t' + 'Profiling: ' + comp['profiling']) - print('\t' + 'Comparison Baseline: ' + comp['comp_baseline']) - print('\t' + 'Comparison Baseline State: ' + comp['comp_baseline_state']) - - cmsdist_tags = comp['cmsdistTags'] - print('\t' + 'cmsdist Tags:' + str(cmsdist_tags)) - - builds_results = [res['arch'] + ':' + str(res['passed']) + ':' + str(res['details']) for res in - comp['builds']] - print('\t' + 'Builds:' + str(builds_results)) - - fwlite_results = [res['arch'] + ':' + str(res['passed']) + ':' + str(res['details']) for res in - comp['fwlite']] - print('\t' + 'FWLite:' + str(fwlite_results)) - - relvals_results = [res['arch'] + ':' + str(res['passed']) + ":" + str(res['details']) for res in - comp['relvals']] - print('\t' + 'RelVals:' + str(relvals_results)) - - utests_results = [res['arch'] + ':' + str(res['passed']) + ':' + str(res['details']) for res in - comp['utests']] - print('\t' + 'UnitTests:' + str(utests_results)) - - gpu_utests_results = [res['arch'] + ':' + str(res['passed']) + ':' + str(res['details']) for res in - comp['gpu_utests']] - print('\t' + 'GPUUnitTests:' + str(gpu_utests_results)) - - addons_results = [res['arch'] + ':' + str(res['passed']) for res in comp['addons']] - print('\t' + 'AddOns:' + str(addons_results)) - - merged_prs = [pr['number'] for pr in comp['merged_prs']] - print('\t' + 'PRs:' + str(merged_prs)) - print('\t' + "Cmsdist compared tags: " + pformat(comp['cmsdist_compared_tags'])) - print('\t' + "Cmsdist merged prs: " + pformat(comp['cmsdist_merged_prs'])) - - from_merge_commit = [pr['number'] for pr in comp['merged_prs'] if pr['from_merge_commit']] - print('\t' + 'From merge commit' + str(from_merge_commit)) - - print('\t' + 'RVExceptions: ' + str(comp.get('RVExceptions'))) - print('\t' + 'inProgress: ' + str(comp.get('inProgress'))) - - -def fill_missing_cmsdist_tags(results): - """ - Iterates over the IBs comparisons, if an IB doesn't have a tag for an architecture, the previous tag is - assigned. For example, for arch slc6_amd64_gcc481 - 1. CMSSW_7_1_X_2014-10-02-1500 was built using the tag IB/CMSSW_7_1_X_2014-10-02-1500/slc6_amd64_gcc481 - 2. There is no tag for CMSSW_7_1_X_2014-10-03-0200 in cmsdist - Then, it assumes that the tag used for CMSSW_7_1_X_2014-10-03-0200 was IB/CMSSW_7_1_X_2014-10-02-1500/slc6_amd64_gcc481 - """ - for rq in results: - previous_cmsdist_tags = {} - for comp in rq['comparisons']: - for arch in comp['tests_archs']: - current_ib_tag_arch = comp['cmsdistTags'].get(arch) - if current_ib_tag_arch: - previous_cmsdist_tags[arch] = current_ib_tag_arch - else: - if previous_cmsdist_tags.get(arch): - comp['cmsdistTags'][arch] = previous_cmsdist_tags[arch] - else: - comp['cmsdistTags'][arch] = 'Not Found' - - -def get_cmsdist_merge_commits(results): - """ - Will modiffy object in place - """ - for release_queue in results: - previous_cmsdist_tags = {} - release_queue_name = release_queue['release_name'] - for pos, comp in enumerate(release_queue['comparisons'], start=1): - comp['cmsdist_merged_prs'] = {} - comp['cmsdist_compared_tags'] = {} - - if pos == len(release_queue['comparisons']): - # this is special case when we want to compare unreleased IB with branch head - # sinces it is not an IB, there are no build archs yet. - archs_to_iterate_over = RELEASES_ARCHS[release_queue_name] - else: - archs_to_iterate_over = comp['tests_archs'] - - for arch in archs_to_iterate_over: - if arch not in RELEASES_ARCHS_WITH_DIST_BRANCH[release_queue_name]: continue - cmsdist_branch = RELEASES_ARCHS_WITH_DIST_BRANCH[release_queue_name][arch] - if pos == len(release_queue['comparisons']): - # if this last comparison, it means its not yet an IB - # we want to compare branch HEAD with last tag - # we will compare with remote branch to avoid checking out all the time, this is the reason for - # remotes/origin/{BRANCH_NAME} - current_ib_tag_arch = "remotes/origin/" + cmsdist_branch - # when dumping JSON, we do not want 'remotes/origin/ part - current_ib_tag_arch_to_show = cmsdist_branch - else: - # else, just use current cmsdistTag - current_ib_tag_arch = comp['cmsdistTags'].get(arch) - current_ib_tag_arch_to_show = comp['cmsdistTags'].get(arch) - if arch in previous_cmsdist_tags: - previous_cmsdist_tag = previous_cmsdist_tags[arch] - else: - previous_cmsdist_tag = current_ib_tag_arch - - previous_cmsdist_tags[arch] = current_ib_tag_arch - notes = get_merge_prs(previous_cmsdist_tag, current_ib_tag_arch, "{0}/.git".format(CMSDIST_REPO), CMS_PRS, repo_name='cmsdist') - prs = [] - for pr_num in notes: - pr = {'is_merge_commit': False, 'from_merge_commit': False} - if notes[pr_num]['branch'] != "master": - if notes[pr_num]['branch'] != cmsdist_branch: - pr['from_merge_commit'] = True - pr['number'] = pr_num - pr['hash'] = notes[pr_num]['hash'] - pr['author_login'] = notes[pr_num]['author'] - pr['title'] = notes[pr_num]['title'] - pr['url'] = 'https://github.com/cms-sw/cmsdist/pull/%s' % pr_num - prs.append(pr) - comp['cmsdist_merged_prs'][arch] = prs - comp['cmsdist_compared_tags'][arch] = "{0}..{1}".format(previous_cmsdist_tag, current_ib_tag_arch_to_show) - - -def add_tests_to_results(results, unit_tests, relvals_results, - addon_results, build_results, cmsdist_tags_results, - rv_Exceptions_Results, fwlite_results, gpu_unit_tests, python3_results, invalid_includes): - """ - merges the results of the tests with the structure of the IBs tags and the pull requests - it also marks the comparisons that correspond to an IB - """ - for rq in results: - for comp in rq['comparisons']: - rel_name = comp['compared_tags'].split('-->')[1] - rvsres = relvals_results.get(rel_name) - utres = unit_tests.get(rel_name) - gpu_utres = gpu_unit_tests.get(rel_name) - python3_res = python3_results.get(rel_name) - invalid_includes_res = invalid_includes.get(rel_name) - adonres = addon_results.get(rel_name) - buildsres = build_results.get(rel_name) - fwliteres = fwlite_results.get(rel_name) - cmsdist_tags = cmsdist_tags_results.get(rel_name) - print("CMDIST ", rel_name, ":", cmsdist_tags) - - # for tests with arrays - comp['relvals'] = rvsres if rvsres else [] - comp['utests'] = utres if utres else [] - comp['gpu_utests'] = gpu_utres if gpu_utres else [] - comp['python3_tests'] = python3_res if python3_res else [] - comp['invalid_includes'] = invalid_includes_res if invalid_includes_res else [] - comp['addons'] = adonres if adonres else [] - comp['builds'] = buildsres if buildsres else [] - comp['fwlite'] = fwliteres if fwliteres else [] - comp['cmsdistTags'] = cmsdist_tags if cmsdist_tags else {} - comp['isIB'] = '-' in rel_name - comp['RVExceptions'] = rv_Exceptions_Results.get(rel_name) - if "_X_" in rel_name: - comp['ib_date'] = rel_name.split("_X_", 1)[-1] - else: - comp['ib_date'] = '' - - comp['inProgress'] = False - if not comp.get('static_checks'): - comp['static_checks'] = 'not-found' - if not comp.get('hlt_tests'): - comp['hlt_tests'] = 'not-found' - if not comp.get('crab_tests'): - comp['crab_tests'] = 'not-found' - if not comp.get('check-headers'): - comp['check-headers'] = 'not-found' - if not comp.get('valgrind'): - comp['valgrind'] = 'not-found' - if not comp.get('material_budget'): - comp['material_budget'] = 'not-found' - if not comp.get('igprof'): - comp['igprof'] = 'not-found' - if not comp.get('profiling'): - comp['profiling'] = 'not-found' - if not comp.get('comp_baseline'): - comp['comp_baseline'] = 'not-found' - comp['comp_baseline_state'] = 'errors' - if not comp.get('dqm_tests'): - comp['dqm_tests'] = 'not-found' - # custom details for new IB page - if not comp.get('material_budget_v2'): - comp['material_budget_v2'] = 'not-found' - if not comp.get('material_budget_comparison'): - comp['material_budget_comparison'] = 'not-found' - if not comp.get('static_checks_v2'): - comp['static_checks_v2'] = 'not-found' - if not comp.get('static_checks_failures'): - comp['static_checks_failures'] = 'not-found' - - a = [t['arch'] for t in utres] if utres else [] - b = [t['arch'] for t in rvsres] if rvsres else [] - c = [t['arch'] for t in buildsres] if buildsres else [] - - not_complete_archs = [arch for arch in c if arch not in a] - for nca in not_complete_archs: - result = {} - result['arch'] = nca - result['file'] = str([res['file'] for res in buildsres if res['arch'] == nca]) - result['passed'] = PossibleUnitTestResults.UNKNOWN - result['details'] = {} - comp['utests'].append(result) - - comp['tests_archs'] = list(set(a + b + c)) - - -def find_comparison_baseline_results(comparisons, architecture): - """ - Finds for an IB the results of the Comparison BaseLine - """ - for comp in comparisons: - rel_name = comp['compared_tags'].split('-->')[1] - print('Looking for comparison baseline results for ', rel_name) - comp['comp_baseline'] = find_one_comparison_baseline(rel_name, architecture) - comp['comp_baseline_state'] = "errors" - if comp['comp_baseline'] != 'not-found': - comp['comp_baseline_state'] = find_one_comparison_baseline_errors(rel_name, architecture) - - -def find_material_budget_results(comparisons, architecture): - """ - Finds for an IB the results of the material_budget - """ - for comp in comparisons: - rel_name = comp['compared_tags'].split('-->')[1] - print('Looking for material_budget results for ', rel_name) - arch, comparison, status = find_one_material_budget(rel_name, architecture) - if (arch is None): - comp['material_budget'] = status # returns 'inprogress' - else: - comp['material_budget'] = arch + ":" + comparison - comp['material_budget_v2'] = { - 'status': status, - 'arch': arch - } - if (comparison is None) or (comparison is '-1'): - pass - elif (comparison == "0"): - comp['material_budget_comparison'] = {'status': 'found', 'results': 'ok', 'arch': arch} - else: - comp['material_budget_comparison'] = {'status': 'found', 'results': 'warning', 'arch': arch} - - -def find_one_test_results(command_to_execute): - print("Running ", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - print("Ran:", out, err, ret_code, command_to_execute) - if ret_code == 0: - print('found') - return 'found' - print('inprogress') - return 'inprogress' - - -# def find_dup_dict_result(command_to_execute): -# # todo delete -# print("Running ", command_to_execute) -# out, err, ret_code = get_output_command(command_to_execute) -# print("Ran:", out, err, ret_code, command_to_execute) -# if ret_code == 0: -# if int(out) == 0: -# print('passed') -# return 'passed' -# else: -# print('error') -# return 'error' -# print("not-found") -# return("not-found") - - -def find_dup_dict_result(comparisons): - """ - Will check for duplicated dictionary (CMSSW specific test) for each architecture - """ - def get_status(command_to_execute): - print("Running ", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - print("Ran:", out, err, ret_code, command_to_execute) - if ret_code == 0: - if int(out) == 0: - print('passed') - return 'passed' - else: - print('error') - return 'error' - print("not-found") - return ("not-found") - - test_field = "dupDict" - for comp in comparisons: - if test_field not in comp: - comp[test_field] = [] - for architecture in comp["tests_archs"]: - rel_name = comp['compared_tags'].split('-->')[1] - print("Looking for {0} results for {1}.".format(test_field,rel_name)) - command_to_execute = MAGIC_COMMAND_FIND_DUP_DICT.replace('RELEASE_NAME', rel_name).replace( - 'ARCHITECTURE', architecture - ) - comp[test_field].append({ - "passed": get_status(command_to_execute), - "arch": architecture - }) - - -def find_one_profiling_result(magic_command): - """ - Looks for one profiling result - """ - command_to_execute = magic_command.replace('WORKFLOW', '11834.21') - print("Running ", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - print("Ran:", out, err, ret_code, command_to_execute) - file = out.strip() - if (ret_code == 0) and (out != ""): - print('found', file) - return {'status' : 'passed', 'data' : file} - print('inprogress') - return 'inprogress' - - - -def find_general_test_results(test_field, comparisons, architecture, magic_command, results_function=find_one_test_results): - """ - Finds for results for the test_field. Modifies `comparisons` dict in place. - :param comparisons: comparison dictionary - :param architecture: arch - :param magic_command: string with bash command to execute - :param test_field: field to write back the results to - :param results_function: function how to process results - """ - - for comp in comparisons: - rel_name = comp['compared_tags'].split('-->')[1] - print("Looking for {0} results for {1}.".format(test_field,rel_name)) - command_to_execute = magic_command.replace('RELEASE_NAME', rel_name).replace('ARCHITECTURE', architecture) - comp[test_field] = results_function(command_to_execute) - - -def find_general_test_results_2(test_field, comparisons, magic_command): - def find_one_test_results(release_name): - command = magic_command.replace('RELEASE_NAME', release_name) - out, err, ret_code = get_output_command(command) - if ret_code == 0: - print('found') - return 'found' - print('not-found') - return 'not-found' - - for comp in comparisons: - rel_name = comp['compared_tags'].split('-->')[1] - print("Looking for {0} results for {1}.".format(test_field, rel_name)) - comp[test_field] = find_one_test_results(rel_name) - - -def find_and_check_result(release_name, architecture, magic_cmd, res_cmd, opt_cmd=''): - path = magic_cmd.replace('RELEASE_NAME', release_name) - path = path.replace('ARCHITECTURE', architecture) - _, _, t_ret_code = get_output_command('test -e ' + path) - - - def set_result(cmd, status0='passed', statusnon0='error'): - cmd = cmd.format(path) - out, err, ret_code = get_output_command(cmd) - try: - e = 0 - for o in [ x for x in out.split('\n') if x]: - e += int(o) - if e == 0: - result = status0 - else: - result = statusnon0 - except: - print("ERROR running command: " + cmd) - print(out, err, ret_code) - result = 'error' # this will make sure to check what is wrong with the file - return result - - - if t_ret_code == 0: - result = set_result(res_cmd) - if result == 'passed' and opt_cmd != '': - result = set_result(opt_cmd, 'passed', 'inprogress') - else: - result = 'inprogress' - - print(result) - return result - - -def find_check_hlt(comparisons, architecture): - for comp in comparisons: - rel_name = comp['compared_tags'].split('-->')[1] - print("Looking for {0} results for {1}.".format('hlt', rel_name)) - comp['hlt_tests'] = find_and_check_result(rel_name, architecture, CHECK_HLT_PATH, 'grep -h -c "exit status: *[1-9]" {0}') - - -def find_check_crab(comparisons, architecture): - for comp in comparisons: - rel_name = comp['compared_tags'].split('-->')[1] - print("Looking for {0} results for {1}.".format('crab', rel_name)) - comp['crab_tests'] = find_and_check_result(rel_name, architecture, CHECK_CRAB_PATH, 'grep -h -c "FAILED" {0}/*/statusfile', 'grep -h -c "INPROGRESS" {0}/*/statusfile') - - -def find_check_headers(comparisons, architecture): - for comp in comparisons: - rel_name = comp['compared_tags'].split('-->')[1] - print('Looking for check-headers results for', rel_name, '.') - comp['check-headers'] = find_and_check_result(rel_name, architecture, CHECK_HEADERS_PATH, 'cat {0} | wc -l') - - -def find_ubsan_logs(comparisons, ubsan_data): - for c in comparisons: - rel_name = c['compared_tags'].split('-->')[1] - if rel_name in ubsan_data: - print('Looking for ubsan results for', rel_name, '.') - if ubsan_data[rel_name]>0: - c['ubsan-logs'] = 'error' - else: - c['ubsan-logs'] = 'passed' - - -def find_static_results(comparisons, architecture): - """ - Finds for an IB the results of the static tests - """ - for comp in comparisons: - rel_name = comp['compared_tags'].split('-->')[1] - print('Looking for static tests results for ', rel_name) - comp['static_checks'] = find_one_static_check(rel_name, architecture) - # For new IB page - if (comp['static_checks'] == 'not-found' or comp['static_checks'] == 'inprogress'): - comp['static_checks_v2'] = comp['static_checks'] - else: - resultList = comp['static_checks'].split(":") - comp['static_checks_v2'] = {'status': "passed", 'arch': resultList[0]} - iterable = [] - for i in range(1, len(resultList)): - result = resultList[i] - if result == '': - continue - iterable.append(result) - if (len(iterable) > 0): - comp['static_checks_failures'] = { - 'status': "found", - 'arch': resultList[0], - 'iterable': iterable - } - -def find_one_static_filter_check(release_name, architecture, magic_cmd): - """ - Looks for one static-tests-filter result for the IB, if it finds it, the value is 'found' if not, the value is 'inprogress' - """ - command_to_execute = magic_cmd.replace('RELEASE_NAME', release_name) - command_to_execute = command_to_execute.replace('ARCHITECTURE', architecture) - print("Running ", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - print("Ran:", out, err, ret_code, command_to_execute) - return out - - -def find_one_static_check(release_name, architecture): - """ - Looks for one static-tests result for the IB, if it finds it, the value is 'found' if not, the value is 'inprogress' - """ - command_to_execute = MAGIC_COMMAND_FIND_STATIC_CHECKS.replace('RELEASE_NAME', release_name) - command_to_execute = command_to_execute.replace('ARCHITECTURE', architecture) - print("Running ", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - print("Ran:", out, err, ret_code, command_to_execute) - if ret_code == 0: - arch = out.split()[0] - print('found', arch) - filter1 = find_one_static_filter_check(release_name, arch, MAGIC_COMMAND_FIND_STATIC_CHECKS_FILTER1) - return arch + ":" + filter1 - print('inprogress') - return 'inprogress' - - -def find_one_material_budget(release_name, architecture): - """ - Looks for one material_budget result for the IB, if it finds it, the value is 'found' if not, the value is 'inprogress' - """ - command_to_execute = MAGIC_COMMAND_FIND_MATERIL_BUDGET_CHECKS.replace('RELEASE_NAME', release_name) - command_to_execute = command_to_execute.replace('ARCHITECTURE', architecture) - print("Running ", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - print("Ran:", out, err, ret_code, command_to_execute) - if ret_code == 0: - arch = out.split()[0] - print('found', arch) - command_to_execute = MAGIC_COMMAND_FIND_MATERIL_BUDGET_COMPARISON_CHECKS.replace('RELEASE_NAME', - release_name).replace( - 'ARCHITECTURE', architecture) - print("Running ", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - if ret_code == 0: - return (arch, out.split()[0], 'found') - return (arch, "-1", 'found') - print('inprogress') - return (None, None, 'inprogress') - - -def find_one_comparison_baseline_errors(release_name, architecture): - """ - Looks for one comparison baseline errors result for the IB, if no errors then value is 'ok' if not, - the value is 'errors' - """ - command_to_execute = MAGIC_COMMAND_COMPARISON_BASELINE_ERRORS.replace('RELEASE_NAME', release_name) - command_to_execute = command_to_execute.replace('ARCHITECTURE', architecture) - print("Running ", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - print("Ran:", out, err, ret_code, command_to_execute) - if out == "": - return "ok" - else: - return "errors" - - -def find_one_comparison_baseline(release_name, architecture): - """ - Looks for one comparison baseline result for the IB, if it finds it, the value is 'found' if not, the value is '' - """ - command_to_execute = MAGIC_COMMAND_FIND_COMPARISON_BASELINE.replace('RELEASE_NAME', release_name) - command_to_execute = command_to_execute.replace('ARCHITECTURE', architecture) - print("Running ", command_to_execute) - out, err, ret_code = get_output_command(command_to_execute) - print("Ran:", out, err, ret_code, command_to_execute) - if ret_code == 0: - print('found') - return COMPARISON_BASELINE_TESTS_URL.replace('RELEASE_NAME', release_name).replace('ARCHITECTURE', architecture) - print('inprogress') - return 'inprogress' - - -def generate_separated_json_results(results): - """ - reads the results and generates a separated json for each release_queue - it also generates a csv file with statistics per release_queue and a general one - """ - all_ibs_list = [] - all_prs_list = [] - - for rq in results: - file_name = rq['release_name'] + ".json" - summary_file_name = rq['release_name'] + "_summary.txt" - out_json = open(file_name, "w") - json.dump(rq, out_json, indent=4) - out_json.close() - - f_summary = open(summary_file_name, "w") - ibs = [comp['release_name'] for comp in rq['comparisons'] - if (comp['release_name'] != rq['base_branch']) and comp['isIB']] - - all_ibs_list.extend(ibs) - - # Ignore forward ported prs, and merge commits - only_prs_list = [] - for comp in rq['comparisons']: - only_prs_list.extend([pr['number'] for pr in comp['merged_prs'] - if not (pr['is_merge_commit'] or pr['from_merge_commit'])]) - - all_prs_list.extend(only_prs_list) - f_summary.write("IBs:%s\n" % ibs) - f_summary.write("NumIBs:%d\n" % len(ibs)) - f_summary.write("PRs:%s\n" % only_prs_list) - f_summary.write("NumPRs:%d\n" % len(only_prs_list)) - f_summary.close() - - all_ibs_list = list(set(all_ibs_list)) - all_ibs_list.sort() - - all_prs_list = list(set(all_prs_list)) - all_prs_list.sort() - - f_summary_all = open('ibsSummaryAll.txt', "w") - f_summary_all.write("IBs:%s\n" % all_ibs_list) - f_summary_all.write("NumIBs:%d\n" % len(all_ibs_list)) - - f_summary_all.write("PRs:%s\n" % all_prs_list) - f_summary_all.write("NumPRs:%d\n" % len(all_prs_list)) - - -def get_production_archs(config_map): - archs = {} - for release in config_map: - if (('PROD_ARCH' in release) and (('DISABLED' not in release) or ('IB_WEB_PAGE' in release))): - archs[release['RELEASE_QUEUE']] = release['SCRAM_ARCH'] - return archs - - -def generate_ib_json_short_summary(results): - """ - Generates a json file with the global status of the last IB for each architecture, - per each Release Queue - Schema of short_summary - [ releaseQueue1, releaseQueue2, ... , releaseQueueN ] - Schema of releaseQueueN - { - "": { - "": { - "status": "ok|warning|error|unknown" - "latest_IB" : "" - } - } - } - """ - short_summary = {} - for rq in results: - # this should not be called 'release name', this should be fixed - rq_name = rq['release_name'] - enabled_archs = RELEASES_ARCHS[rq_name] - for arch in enabled_archs: - ibs_for_current_arch = [rel for rel in rq['comparisons'] if arch in rel["tests_archs"]] - # it starts as ok and checks the conditions - ib_status = 'ok' - - if len(ibs_for_current_arch) == 0: - pass - # TODO unused - # latest_IB = 'N/A' - # ib_status = 'unknown' - else: - latest_IB_info = ibs_for_current_arch[-1] - latest_IB_name = latest_IB_info['release_name'] - - build_info = [b for b in latest_IB_info["builds"] if b['arch'] == arch] - if len(build_info) == 0: - build_passed = 'unknown' - else: - build_passed = build_info[0]["passed"] - - fwlite_info = [b for b in latest_IB_info["fwlite"] if b['arch'] == arch] - # TODO unused - # if len(fwlite_info) == 0: - # fwlite_passed = 'unknown' - # else: - # fwlite_passed = build_info[0]["passed"] - - unit_tests_info = [u for u in latest_IB_info["utests"] if u['arch'] == arch] - if len(unit_tests_info) == 0: - utests_passed = 'unknown' - else: - utests_passed = unit_tests_info[0]["passed"] - - gpu_unit_tests_info = [u for u in latest_IB_info["gpu_utests"] if u['arch'] == arch] - if len(gpu_unit_tests_info) == 0: - gpu_utests_passed = 'unknown' - else: - gpu_utests_passed = gpu_unit_tests_info[0]["passed"] - - relvals_info = [r for r in latest_IB_info["relvals"] if r['arch'] == arch] - if len(relvals_info) == 0: - relvals_passed = 'unknown' - else: - relvals_passed = relvals_info[0]["passed"] - - if not short_summary.get(rq_name): - short_summary[rq_name] = {} - short_summary[rq_name][arch] = {} - short_summary[rq_name][arch]["latest_IB"] = latest_IB_name - - merged_statuses = "%s-%s-%s-%s" % (build_passed, utests_passed, relvals_passed, gpu_utests_passed) - - if 'unknown' in merged_statuses: - ib_status = 'unknown' - elif 'failed' in merged_statuses or 'False' in merged_statuses: - ib_status = 'error' - elif 'warning' in merged_statuses: - ib_status = 'warning' - short_summary[rq_name][arch]["status"] = ib_status - - short_summary['all_archs'] = ARCHITECTURES - short_summary['prod_archs'] = get_production_archs(get_config_map_properties()) - out_json = open('LatestIBsSummary.json', "w") - json.dump(short_summary, out_json, indent=4) - out_json.close() - - -def identify_release_groups(results): - """ - Identifies and groups the releases accodring to their prefix - For example if the release queues are: - CMSSW_7_1_X, CMSSW_7_0_X, CMSSW_6_2_X, CMSSW_5_3_X, CMSSW_7_1_THREADED_X - CMSSW_7_1_BOOSTIO_X, CMSSW_7_1_ROOT6_X, CMSSW_7_1_GEANT10_X, CMSSW_6_2_X_SLHC - CMSSW_7_1_DEVEL_X, CMSSW_7_1_CLANG_X, CMSSW_7_2_X, CMSSW_7_2_DEVEL_X, CMSSW_7_2_CLANG_X - CMSSW_7_2_GEANT10_X - It will organize them like this: - CMSSW_5_3_X: CMSSW_5_3_X - CMSSW_7_2_X: CMSSW_7_2_X, CMSSW_7_2_DEVEL_X, CMSSW_7_2_CLANG_X, CMSSW_7_2_GEANT10_X - CMSSW_6_2_X: CMSSW_6_2_X CMSSW_6_2_X_SLHC - CMSSW_7_0_X: CMSSW_7_0_X - CMSSW_7_1_X: CMSSW_7_1_X, CMSSW_7_1_THREADED_X, CMSSW_7_1_BOOSTIO_X, CMSSW_7_1_ROOT6_X', - CMSSW_7_1_GEANT10_X, CMSSW_7_1_DEVEL_X, CMSSW_7_1_CLANG_X - It returns a dictionary in which the keys are the release prefixes, and the values are - the release queues - """ - from operator import itemgetter - - releases = [] - release_objs = {} - for rq in results: - rn = rq['release_name'] - release_objs[rn] = rq - releases.append([rn] + [int(x) for x in rn.split("_")[1:3]]) - - groups = [] - for item in sorted(releases, key=itemgetter(1, 2)): - prefix = "CMSSW_" + "_".join([str(s) for s in item[1:3]]) + "_X" - group = None - for g in groups: - if g[0] == prefix: - group = g - break - if not group: - group = [prefix, []] - groups.append(group) - if not item[0] in group[1]: group[1].append(item[0]) - - structure = {'all_release_queues': [], 'all_prefixes': [], 'default_release': ''} - for g in groups: - rq = g[0] - structure[rq] = sorted(g[1], reverse=True) - structure['all_release_queues'] = structure[rq] + structure['all_release_queues'] - structure['all_prefixes'].append(rq) - for rq in structure['all_prefixes'][::-1]: - rn = structure[rq][0] - if (rn in release_objs) and ('comparisons' in release_objs[rn]) and (release_objs[rn]['comparisons']): - for comp in release_objs[rn]['comparisons']: - if ('builds' in comp) and (comp['builds']): - structure['default_release'] = rn - return structure - return structure - - -def fix_results(results): - for rq in results: - prev_ib_date = '' - release_count = 0 - for comp in rq['comparisons']: - comp['release_queue'] = rq['release_name'] - comp['base_branch'] = rq['base_branch'] - if comp['ib_date']: - prev_ib_date = comp['ib_date'] - release_count = 0 - comp['ib_date'] = prev_ib_date + '-0000' - else: - release_count += 1 - xstr = str(format(release_count, '04d')) - if not prev_ib_date: - comp['ib_date'] = xstr + '-' + comp['release_name'] - else: - comp['ib_date'] = prev_ib_date + '-' + xstr - comp['next_ib'] = False - if comp['release_name'] == rq['base_branch']: comp['next_ib'] = True - rq['comparisons'].reverse() - - -# ----------------------------------------------------------------------------------- -# ---- Start of execution -# ----------------------------------------------------------------------------------- - -if __name__ == "__main__": - - MAGIC_COMMAND_CMSDIST_TAGS = "pushd %s; git tag -l '*/*/ARCHITECTURE' | grep -E 'IB|ERR'; popd" % CMSDIST_REPO - CMSSDT_DIR = "/data/sdt" - BUILD_LOG_DIR = CMSSDT_DIR + "/buildlogs" - JENKINS_ARTIFACTS_SUBDIR = "SDT/jenkins-artifacts" - JENKINS_ARTIFACTS_DIR = CMSSDT_DIR + "/" + JENKINS_ARTIFACTS_SUBDIR - # I used this type of concatenation because the string has %s inside - MAGIC_COMMAND_FIND_FIRST_MERGE_WITH_TAG = 'GIT_DIR=' + CMSSW_REPO_LOCAL + ' git log --pretty=\'"%s", "tags->,%d"\' END_TAG | grep "\\\"tags->," | head -n1' - MAGIC_COMMAD_FIND_RESULTS_RELVALS = 'find ' + BUILD_LOG_DIR + '/ARCHITECTURE/www -mindepth 6 -maxdepth 6 -path "*/pyRelValMatrixLogs/run/runall-report-step123-.log"' - MAGIC_COMMAND_FIND_EXCEPTIONS_RESULTS_RELVALS = "find cms-sw.github.io/data/relvals/ -name '*EXCEPTIONS.json'" - MAGIC_COMMAND_TAGS = 'GIT_DIR=' + CMSSW_REPO_LOCAL + ' git log --pretty=\'"%s", "tags->,%d"\' START_TAG..END_TAG | grep -E "\\\"tags->, " | grep -E "RELEASE_QUEUE"' - MAGIC_COMMAND_FIND_RESULTS_UNIT_TESTS = 'find ' + BUILD_LOG_DIR + '/ARCHITECTURE/www -mindepth 4 -maxdepth 4 -name unitTests-summary.log' - MAGIC_COMMAND_FIND_RESULTS_GPU_UNIT_TESTS = 'find ' + BUILD_LOG_DIR + '/ARCHITECTURE/www -mindepth 5 -maxdepth 5 -name unitTests-summary.log | grep "/GPU/"' - MAGIC_COMMAND_FIND_RESULTS_ADDON = 'find ' + BUILD_LOG_DIR + '/ARCHITECTURE/www -mindepth 4 -maxdepth 4 -name addOnTests.log' - MAGIC_COMMAND_FIND_RESULTS_BUILD = 'find ' + BUILD_LOG_DIR + '/ARCHITECTURE/www -mindepth 5 -maxdepth 5 -path "*/new/logAnalysis.pkl"' - MAGIC_COMMAND_FIND_RESULTS_FWLITE = 'find ' + BUILD_LOG_DIR + '/ARCHITECTURE/www -mindepth 5 -maxdepth 5 -path "*/new_FWLITE/logAnalysis.pkl"' - MAGIC_COMMAND_FIND_RESULTS_PYTHON3 = 'find ' + BUILD_LOG_DIR + '/ARCHITECTURE/www -mindepth 4 -maxdepth 4 -name python3.html' - MAGIC_COMMAND_FIND_INVALID_INCLUDES = 'find ' + JENKINS_ARTIFACTS_DIR + '/invalid-includes -maxdepth 3 -mindepth 3 -path "*/ARCHITECTURE/summary.json" -type f' - MAGIC_COMMAND_FIND_STATIC_CHECKS = 'test -d ' + JENKINS_ARTIFACTS_DIR + '/ib-static-analysis/RELEASE_NAME && ls ' + JENKINS_ARTIFACTS_DIR + '/ib-static-analysis/RELEASE_NAME/' - MAGIC_COMMAND_FIND_STATIC_CHECKS_FILTER1 = 'test -s ' + JENKINS_ARTIFACTS_DIR + '/ib-static-analysis/RELEASE_NAME/ARCHITECTURE/reports/modules2statics-filter1.txt && echo reports/modules2statics-filter1.txt' - MAGIC_COMMAND_FIND_MATERIL_BUDGET_CHECKS = 'test -d ' + JENKINS_ARTIFACTS_DIR + '/material-budget/RELEASE_NAME && ls ' + JENKINS_ARTIFACTS_DIR + '/material-budget/RELEASE_NAME/' - MAGIC_COMMAND_FIND_MATERIL_BUDGET_COMPARISON_CHECKS = "TEST_FILE=" + JENKINS_ARTIFACTS_DIR + "/material-budget/RELEASE_NAME/ARCHITECTURE/comparison/Images/MBDiff.txt && test -f $TEST_FILE && grep '0$' $TEST_FILE | wc -l" - MAGIC_COMMAND_FIND_VALGRIND = 'test -d ' + JENKINS_ARTIFACTS_DIR + '/valgrind/RELEASE_NAME' - MAGIC_COMMAND_FIND_IGPROF = 'test -d ' + JENKINS_ARTIFACTS_DIR + '/igprof/RELEASE_NAME' - MAGIC_COMMAND_FIND_PROFILING = 'test -d ' + JENKINS_ARTIFACTS_DIR + '/profiling/RELEASE_NAME' - MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER1 = 'ls '+JENKINS_ARTIFACTS_DIR+'/profiling/RELEASE_NAME/ARCHITECTURE/WORKFLOW/step3_*.resources.json 2>/dev/null | head -1 | sed "s|.*/RELEASE_NAME/||;s|.json$||"' - MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER2 = 'ls ' + JENKINS_ARTIFACTS_DIR + '/igprof/RELEASE_NAME/ARCHITECTURE/profiling/*/sorted_RES_CPU_step3.txt 2>/dev/null | head -1 | sed "s|.*/RELEASE_NAME/||"' - MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER3 = 'ls ' + JENKINS_ARTIFACTS_DIR + '/profiling/RELEASE_NAME/ARCHITECTURE/*/step3_gpu_nsys.txt 2>/dev/null | head -1 | sed "s|.*/RELEASE_NAME||"' - MAGIC_COMMAND_FIND_COMPARISON_BASELINE = 'test -f ' + JENKINS_ARTIFACTS_DIR + '/ib-baseline-tests/RELEASE_NAME/ARCHITECTURE/-GenuineIntel/matrix-results/wf_errors.txt' - MAGIC_COMMAND_COMPARISON_BASELINE_ERRORS = 'cat ' + JENKINS_ARTIFACTS_DIR + '/ib-baseline-tests/RELEASE_NAME/ARCHITECTURE/-GenuineIntel/matrix-results/wf_errors.txt' - COMPARISON_BASELINE_TESTS_URL = 'https://cmssdt.cern.ch/' + JENKINS_ARTIFACTS_SUBDIR + '/ib-baseline-tests/RELEASE_NAME/ARCHITECTURE/-GenuineIntel/matrix-results' - CHECK_HLT_PATH = JENKINS_ARTIFACTS_DIR + '/HLT-Validation/RELEASE_NAME/ARCHITECTURE/jenkins.log' - CHECK_CRAB_PATH = JENKINS_ARTIFACTS_DIR + '/ib-run-crab/RELEASE_NAME/*' - MAGIC_COMMAND_FIND_DQM_TESTS = 'test -d ' + JENKINS_ARTIFACTS_DIR + '/ib-dqm-tests/RELEASE_NAME' - MAGIC_COMMAND_FIND_LIZARD = 'test -d ' + JENKINS_ARTIFACTS_DIR + '/lizard/RELEASE_NAME/ARCHITECTURE' - MAGIC_COMMAND_FIND_CHECK_HEADERS = 'test -d ' + JENKINS_ARTIFACTS_DIR + '/check_headers/RELEASE_NAME/ARCHITECTURE' - CHECK_HEADERS_PATH = JENKINS_ARTIFACTS_DIR + '/check_headers/RELEASE_NAME/ARCHITECTURE/headers_with_error.log' - CHECK_UBSANLOG_PATH = JENKINS_ARTIFACTS_DIR + '/ubsan_logs/CMSSW_*/ubsan_runtime_errors.log' - MAGIC_COMMAND_FIND_FLAWFINDER = 'test -d ' + JENKINS_ARTIFACTS_DIR + '/flawfinder/RELEASE_NAME/ARCHITECTURE' - MAGIC_COMMAND_FIND_DUP_DICT = ("grep -v '^Searching for ' " + BUILD_LOG_DIR + "/ARCHITECTURE/www/*/*/RELEASE_NAME/testLogs/dup*.log" + - " | grep -v ':**** SKIPPING ' | grep -v '^ *$' | wc -l ") - CONFIG_MAP_FILE = 'config.map' - # this will be filled using config.map by get_config_map_params() - ARCHITECTURES = [] - # this will be filled using config.map by get_config_map_params() - RELEASES_BRANCHES = {} - # this will be filled using config.map by get_config_map_params() SLHC releases have a different format, so it is hardcoded - SPECIAL_RELEASES = ['SLHC'] - # this will be filled using config.map by get_config_map_params() - SP_REL_REGEX = "" # Needs to be declared empty before using - # These are the release queues that need to be shown, this this will be filled using config.map by get_config_map_params() - RELEASE_QUEUES = [] - # These are the ibs and archs for which the aditional tests need to be shown - # The schema is: - # { - # "": { - # "" : [ test1, test2, ... , testN ] - # } - # } - # This this will be filled using config.map by get_config_map_params() - RELEASE_ADITIONAL_TESTS = {} - # the acrhitectures for which the enabled releases are currently avaiable - # The schema is: - # { - # "": [ "arch1" , "arch2" , ... ,"archN" ] - # } - # will be filled using config.map by get_config_map_params() - RELEASES_ARCHS = {} - """ - { - "RELEASE_QUE" : { "ARCH1" : "CMSDIST_BRANCH", "ARCH2" : "CMSDIST_BRANCH2" } - } - """ - RELEASES_ARCHS_WITH_DIST_BRANCH = {} - # The IBs and arch for which relval results are availavle - # The schema is: - # { - # "": [ "arch1" , "arch2" , ... ,"archN" ] - # } - MAGIC_COMMAND_FIND_ALL_TAGS = 'GIT_DIR=' + CMSSW_REPO_LOCAL + ' git log --pretty=\'"%s", "tags->,%d"\' END_TAG | grep -E "\\\"tags->, " | grep -E "RELEASE_QUEUE"' - # This regular expression allows to identify if a merge commit is an automatic forward port - AUTO_FORWARD_PORT_REGEX = '^.*Merge CMSSW.+ into CMSSW.+$' - - - class BuildResultsKeys(object): - DICT_ERROR = 'dictError' - COMP_ERROR = 'compError' - LINK_ERROR = 'linkError' - COMP_WARNING = 'compWarning' - DWNL_ERROR = 'dwnlError' - MISC_ERROR = 'miscError' - IGNORE_WARNING = 'ignoreWarning' - PYTHON_ERROR = 'pythonError' - PYTHON3_ERROR = 'python3Warning' - - - class PossibleBuildResults(object): - PASSED = 'passed' - WARNING = 'warning' - ERROR = 'error' - - - class PossibleUnitTestResults(object): - PASSED = 'passed' - FAILED = 'failed' - UNKNOWN = 'unknown' - - - results = [] - - get_config_map_params() - SP_REL_REGEX = "|".join(SPECIAL_RELEASES) - REQUESTED_COMPARISONS = [('%s_%s..%s' % (rq, START_DATE, rq)) for rq in RELEASE_QUEUES] - - AFS_INSTALLATION = "/cvmfs/cms.cern.ch/*/cms" - installedPaths = [] - for ib_path in [ "/cvmfs/cms-ib.cern.ch", "/cvmfs/cms-ib.cern.ch/sw/*"]: - installedPaths += [x for x in glob(ib_path + "/week*/*/cms/cmssw/*")] - installedPaths += [x for x in glob(ib_path + "/week*/*/cms/cmssw-patch/*")] - installedPaths += [x for x in glob(AFS_INSTALLATION + "/cmssw/*")] - installedPaths += [x for x in glob(AFS_INSTALLATION + "/cmssw-patch/*")] - - installedReleases = [basename(x) for x in installedPaths] - - print_verbose('Installed Releases:') - print_verbose(installedReleases) - prs_file = GITHUB_IO_REPO + "/_data/prs_cmssw_cache.json" - token = open(expanduser("~/.github-token")).read().strip() - github = Github(login_or_token=token) - CMSSW_REPO = github.get_repo(CMSSW_REPO_NAME) - - for comp in REQUESTED_COMPARISONS: - start_tag = comp.split("..")[0] - end_tag = comp.split("..")[1] - release_queue = start_tag - - # if is a SLHC or any special release, the split will happen with the fifth underscore _ - if re.search(SP_REL_REGEX, release_queue): - print_verbose('This is a special release') - release_queue = re.match(r'^((?:[^_]*_){%d}[^_]*)_(.*)' % (4), release_queue).groups()[0] - else: - release_queue = re.match(r'^((?:[^_]*_){%d}[^_]*)_(.*)' % (3), release_queue).groups()[0] - - print('####################################################################') - print("I will analyze %s from %s to %s:" % (release_queue, start_tag, end_tag)) - - release_branch = RELEASES_BRANCHES[release_queue] - release_queue_results = {} - release_queue_results['release_name'] = release_queue - release_queue_results['base_branch'] = release_branch - - print('Identifying tags...') - tags = execute_magic_command_tags(start_tag, end_tag, release_queue, release_branch, - ignore_tags="^CMSSW_9_3_.+_2017-09-(06-2300|07-1100)$") - originalTags = tags - tags = [x for x in tags if x in installedReleases] # NOTE: comment out on local development - tags.append(release_branch) - print('I got these tags: ') - print(tags) - - print('Getting merged pull requests between tags...') - release_queue_results['comparisons'] = compare_tags(release_branch, tags, CMSSW_REPO_LOCAL, CMSSW_REPO) - print('Done') - - # It checks if the tests are being run for that architecture, if they don't, it doesn't look for them. - # Then it goes over each selected tests, executes 'magic' command to look for tests results, interprets it - # and writes back in to 'release_queue_results['comparisons']'. Finally, it appends it back to results object. - # (check config.map file) - additional_tests = RELEASE_ADITIONAL_TESTS.get(release_queue) - if additional_tests: - for arch in additional_tests.keys(): - tests_to_find = additional_tests[arch] - if 'HLT' in tests_to_find: - find_check_hlt(release_queue_results['comparisons'], arch) - if 'crab' in tests_to_find: - find_check_crab(release_queue_results['comparisons'], arch) - if 'static-checks' in tests_to_find: - find_static_results(release_queue_results['comparisons'], arch) - if 'material-budget' in tests_to_find: - find_material_budget_results(release_queue_results['comparisons'], arch) - if 'baseline' in tests_to_find: - find_comparison_baseline_results(release_queue_results['comparisons'], arch) - if 'valgrind' in tests_to_find: - find_general_test_results( - 'valgrind', release_queue_results['comparisons'], arch, MAGIC_COMMAND_FIND_VALGRIND - ) - if 'lizard' in tests_to_find: - find_general_test_results( - 'lizard', release_queue_results['comparisons'], arch, MAGIC_COMMAND_FIND_LIZARD - ) - if 'flawfinder' in tests_to_find: - find_general_test_results( - 'flawfinder', release_queue_results['comparisons'], arch, MAGIC_COMMAND_FIND_FLAWFINDER - ) - if ('igprof-mp' in tests_to_find) or ('igprof-pp' in tests_to_find): - find_general_test_results( - 'igprof', release_queue_results['comparisons'], arch, MAGIC_COMMAND_FIND_IGPROF - ) - if ('profiling' in tests_to_find): - find_general_test_results( - 'profiling', release_queue_results['comparisons'], arch, MAGIC_COMMAND_FIND_PROFILING - ) - find_general_test_results( - 'piechart', release_queue_results['comparisons'], arch, MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER1, find_one_profiling_result - ) - find_general_test_results( - 'reco_event_loop', release_queue_results['comparisons'], arch, MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER2, find_one_profiling_result - ) - find_general_test_results( - 'reco_gpu_mods', release_queue_results['comparisons'], arch, MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER3, find_one_profiling_result - ) - if 'check-headers' in tests_to_find: - find_check_headers(release_queue_results['comparisons'], arch) - # will run every time for Q/A, that is why not checked if it is in tests to find - - find_general_test_results_2( - 'dqm_tests', release_queue_results['comparisons'], MAGIC_COMMAND_FIND_DQM_TESTS - ) - results.append(release_queue_results) - - add_tests_to_results( - results, - execute_magic_command_find_results('utests'), - execute_magic_command_find_results('relvals'), - execute_magic_command_find_results('addOn'), - execute_magic_command_find_results('builds'), - execute_magic_command_get_cmsdist_tags(), - execute_magic_command_find_rv_exceptions_results(), # rv_Exceptions_Results - execute_magic_command_find_results('fwlite'), - execute_magic_command_find_results('gpu_utests'), - execute_magic_command_find_results('python3'), - execute_magic_command_find_results('invalid-includes') - ) - - ubsan_data = {} - out, err, rcode = get_output_command('wc -l %s' % CHECK_UBSANLOG_PATH) - for line in out.split('\n'): - if not '/CMSSW_' in line: continue - print('UBSAN',line) - count, rel = line.strip().split(' ',1) - rel = rel.split('/')[-2] - ubsan_data[rel]=int(count) - if '_UBSAN_' in rel: - ubsan_data[rel.replace('_UBSAN_','_')]=int(count) - - for release_queue_results in results: - find_dup_dict_result(release_queue_results['comparisons']) - find_ubsan_logs(release_queue_results['comparisons'], ubsan_data) - - fill_missing_cmsdist_tags(results) - get_cmsdist_merge_commits(results) - print_results(results) - - structure = identify_release_groups(results) - fix_results(results) - generate_separated_json_results(results) - generate_ib_json_short_summary(results) - - out_json = open("merged_prs_summary.json", "w") - json.dump(results, out_json, indent=4) - out_json.close() - - out_groups = open("structure.json", "w") - json.dump(structure, out_groups, indent=4) - out_groups.close() diff --git a/report-summary-merged-prs b/report-summary-merged-prs new file mode 120000 index 000000000000..d61b30c632f5 --- /dev/null +++ b/report-summary-merged-prs @@ -0,0 +1 @@ +report-summary-merged-prs.py \ No newline at end of file diff --git a/report-summary-merged-prs.py b/report-summary-merged-prs.py new file mode 100755 index 000000000000..545029c3f8e1 --- /dev/null +++ b/report-summary-merged-prs.py @@ -0,0 +1,1957 @@ +#! /usr/bin/env python +""" +This script generates json file (like CMSSW_10_0_X.json) which is then used to render cmssdt ib page. +""" +from __future__ import print_function +from optparse import OptionParser +import subprocess +import re +import json +from pickle import Unpickler +from os.path import basename, dirname, exists, join, expanduser, getmtime +from glob import glob +from github import Github +from pprint import pformat + +from cmsutils import get_config_map_properties +from github_utils import get_merge_prs +from cms_static import GH_CMSSW_REPO, GH_CMSSW_ORGANIZATION +from releases import CMSSW_DEVEL_BRANCH +from socket import setdefaulttimeout + +setdefaulttimeout(120) +CMSSW_REPO_NAME = join(GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO) + +# ----------------------------------------------------------------------------------- +# ---- Parser Options +# ----------------------------------------------------------------------------------- +parser = OptionParser( + usage="usage: %prog CMSSW_REPO GITHUB_IO_REPO START_DATE" + "\n CMSSW_REPO: location of the cmssw repository. This must be a bare clone ( git clone --bare )" + "\n CMSDIST_REPO: location of the cmsdist repository. This must be a normal clone" + "\n GITHUB_IO_REPO: location of the github.io repository. This must be a normal clone" + "\n for example: cmssw.git or /afs/cern.ch/cms/git-cmssw-mirror/cmssw.git" + "\n START_DATE: the date of the earliest IB to show. It must be in the format" + "\n ---" + "\n For example:" + "\n 2014-10-08-1400" +) + +parser.add_option( + "-v", + "--verbose", + dest="verbose", + action="store_true", + help="Do not post on Github", + default=False, +) + +(options, args) = parser.parse_args() + +""" +----------------------------------------------------------------------------------- +---- Output Schema +----------------------------------------------------------------------------------- + +comparisons": [ , , ] + +Each dict contains the result of the comparison between 2 tags in cmssw. For example +CMSSW_5_3_X_2015-02-03-0200 with CMSSW_5_3_X_2015-02-04-0200 which correspond +to the IB CMSSW_5_3_X_2015-02-04-0200 + +The schema of the dictionary is as folows: +{ + "addons": [], + "builds": [], + "fwlite": [], + "compared_tags": "", + "utests": [], + "gpu_utests": [], + "cmsdistTags": {}, + "relvals": [], + "static_checks": "", + "valgrind": "", + "material_budget" : "", + "isIB": Boolean, + "tests_archs": [], + "release_name": "", + "merged_prs": [], + "RVExceptions" : Boolean + +} +""" + + +# ----------------------------------------------------------------------------------- +# ---- Review of arguments +# ----------------------------------------------------------------------------------- + +if len(args) < 4: + print("not enough arguments\n") + parser.print_help() + exit() + +# Remember that the cmssw repo is a bare clone while cmsdist is a complete clone +CMSSW_REPO_LOCAL = args[0] +GITHUB_IO_REPO = args[1] +CMSDIST_REPO = args[2] +START_DATE = args[3] +if len(args) >= 5: + CMS_PRS = args[4] +else: + CMS_PRS = "cms-prs" + + +# ----------------------------------------------------------------------------------- +# ---- Fuctions +# ----------------------------------------------------------------------------------- +def print_verbose(msg): + """ + Takes into account the verbose option. If the option is activated it doesn't print anything. + """ + if options.verbose: + print(msg) + + +def parse_config_map_line(line): + """ + reads a line of config.map and returns a dictionary with is parameters + """ + params = {} + parts = line.split(";") + + for part in parts: + if part == "": + continue + key = part.split("=")[0] + value = part.split("=")[1] + params[key] = value + + return params + + +def get_config_map_params(): + """ + gets the list of architectures by reading config.map, they are saved in ARCHITECTURES + gets the releases branches from config.map, they are saved in RELEASES_BRANCHES + it maps the branches for all the releases this is to take into account the case in which the base branch + is different from the release queue + """ + f = open(CONFIG_MAP_FILE, "r") + for line in f.readlines(): + params = parse_config_map_line(line.rstrip()) + if not params: + continue + print(params) + + arch = params["SCRAM_ARCH"] + if arch not in ARCHITECTURES: + ARCHITECTURES.append(arch) + + release_queue = params["RELEASE_QUEUE"] + base_branch = params.get("RELEASE_BRANCH") + if base_branch: + if base_branch == "master": + base_branch = CMSSW_DEVEL_BRANCH + RELEASES_BRANCHES[release_queue] = base_branch + else: + RELEASES_BRANCHES[release_queue] = release_queue + + sp_rel_name = release_queue.split("_")[3] + + if sp_rel_name != "X" and sp_rel_name not in SPECIAL_RELEASES: + SPECIAL_RELEASES.append(sp_rel_name) + + if not params.get("DISABLED") or params.get("IB_WEB_PAGE"): + if not RELEASES_ARCHS.get(release_queue): + RELEASES_ARCHS_WITH_DIST_BRANCH[release_queue] = {} + RELEASES_ARCHS[release_queue] = [] + RELEASES_ARCHS[release_queue].append(arch) + RELEASES_ARCHS_WITH_DIST_BRANCH[release_queue][arch] = params["CMSDIST_TAG"] + if release_queue not in RELEASE_QUEUES: + RELEASE_QUEUES.append(release_queue) + + additional_tests = params.get("ADDITIONAL_TESTS") + + if additional_tests: + if RELEASE_ADITIONAL_TESTS.get(release_queue): + continue + RELEASE_ADITIONAL_TESTS[release_queue] = {} + # if not RELEASE_ADITIONAL_TESTS.get( release_queue ): + # RELEASE_ADITIONAL_TESTS[ release_queue ] = {} + RELEASE_ADITIONAL_TESTS[release_queue][arch] = [ + test for test in additional_tests.split(",") if test != "dqm" + ] + + SP_REL_REGEX = "|".join(SPECIAL_RELEASES) + RELEASE_QUEUES.sort() + + print() + print("---------------------------") + print("Read config.map:") + print("ARCHS:") + print(ARCHITECTURES) + print("--") + print(RELEASES_ARCHS) + print("RELEASES_BRANCHES:") + print(RELEASES_BRANCHES) + print("special releases") + print(SPECIAL_RELEASES) + print("aditional tests") + print(RELEASE_ADITIONAL_TESTS) + print("I am going to show:") + print(RELEASE_QUEUES) + print("---------------------------") + print() + + +def get_tags_from_line(line, release_queue): + """ + reads a line of the output of git log and returns the tags that it contains + if there are no tags it returns an empty list + it applies filters according to the release queue to only get the + tags related to the current release queue + """ + if "tags->" not in line: + return [] + tags_str = line.split("tags->")[1] + if re.match(".*SLHC$", release_queue): + filter = release_queue[:-6] + "[X|0-9]_SLHC.*" + else: + filter = release_queue[:-1] + "[X|0-9].*" + + ## if the tags part is equal to ," there are no tags + if tags_str != ',"': + tags = tags_str.split(",", 1)[1].strip().replace("(", "").replace(")", "").split(",") + # remove te word "tag: " + tags = [t.replace("tag: ", "") for t in tags] + # I also have to remove the branch name because it otherwise will always appear + # I also remove tags that have the string _DEBUG_TEST, they are used to create test IBs + tags = [ + t + for t in tags + if re.match(filter, t.strip()) + and (t.strip().replace('"', "") != release_queue) + and ("DEBUG_TEST" not in t) + ] + return [t.replace('"', "").replace("tag:", "").strip() for t in tags] + else: + return [] + + +# ----------------------------------------------------------------------------------- +# ---- Fuctions -- Analize Git outputs +# ----------------------------------------------------------------------------------- +def determine_build_error(nErrorInfo): + a = BuildResultsKeys.COMP_ERROR in nErrorInfo.keys() + b = BuildResultsKeys.LINK_ERROR in nErrorInfo.keys() + c = BuildResultsKeys.MISC_ERROR in nErrorInfo.keys() + d = BuildResultsKeys.DWNL_ERROR in nErrorInfo.keys() + e = BuildResultsKeys.DICT_ERROR in nErrorInfo.keys() + f = BuildResultsKeys.PYTHON_ERROR in nErrorInfo.keys() + return a or b or c or d or e or f + + +def determine_build_warning(nErrorInfo): + a = BuildResultsKeys.PYTHON3_ERROR in nErrorInfo.keys() + b = BuildResultsKeys.COMP_WARNING in nErrorInfo.keys() + return a or b + + +def get_results_one_addOn_file(file): + look_for_err_cmd = 'grep "failed" %s' % file + result, err, ret_code = get_output_command(look_for_err_cmd) + if " 0 failed" in result: + return True + else: + return False + + +def get_results_one_unitTests_file(file, grep_str="ERROR"): + """ + given a unitTests-summary.log it determines if the test passed or not + it returns a tuple, the first element is one of the possible values of PossibleUnitTestResults + The second element is a dictionary which indicates how many tests failed + """ + look_for_err_cmd = 'grep -h -c "%s" %s' % (grep_str, file) + result, err, ret_code = get_output_command(look_for_err_cmd) + + result = result.rstrip() + + details = {"num_fails": result} + + if result != "0": + return PossibleUnitTestResults.FAILED, details + else: + return PossibleUnitTestResults.PASSED, details + + +def get_results_one_relval_file(filename): + """ + given a runall-report-step123-.log file it returns the result of the relvals + it returns a tuple, the first element indicates if the tests passed or not + the second element is a dictionary which shows the details of how many relvals pased + and how many failed + """ + summary_file = filename.replace("/runall-report-step123-.log", "/summary.json") + if exists(summary_file) and getmtime(summary_file) > getmtime(filename): + try: + details = json.load(open(summary_file)) + return details["num_failed"] == 0, details + except: + pass + + details = {"num_passed": 0, "num_failed": 1, "known_failed": 0} + + print_verbose("Analyzing: " + filename) + lines = file(filename).read().split("\n") + results = [x for x in lines if " tests passed" in x] + if len(results) == 0: + return False, details + out = results.pop() + + num_passed_sep = out.split(",")[0].replace(" tests passed", "").strip() + num_failed_sep = out.split(",")[1].replace(" failed", "").strip() + try: + details["num_passed"] = sum([int(num) for num in num_passed_sep.split(" ")]) + details["num_failed"] = sum([int(num) for num in num_failed_sep.split(" ")]) + except ValueError as e: + print("Error while reading file %s" % filename) + print(e) + return False, details + with open(summary_file, "w") as ref: + json.dump(details, ref) + return details["num_failed"] == 0, details + + +def get_results_details_one_build_file(file, type): + """ + Given a logAnalysis.pkl file, it determines if the tests passed or not + it returns a tuple, the first element is one of the values of PossibleBuildResults + The second element is a dictionary containing the details of the results. + If the tests are all ok this dictionary is empty + """ + summFile = open(file, "r") + pklr = Unpickler(summFile) + [rel, plat, anaTime] = pklr.load() + errorKeys = pklr.load() + nErrorInfo = pklr.load() + summFile.close() + # if type=='builds': + # py3_log = join(dirname(dirname(file)),'python3.log') + # if exists (py3_log): + # py3 = open(py3_log, 'r') + # nErrorInfo[BuildResultsKeys.PYTHON3_ERROR]=len([l for l in py3.readlines() if ' Error compiling ' in l]) + + if determine_build_error(nErrorInfo): + return PossibleBuildResults.ERROR, nErrorInfo + elif determine_build_warning(nErrorInfo): + return PossibleBuildResults.WARNING, nErrorInfo + else: + return PossibleBuildResults.PASSED, nErrorInfo + + +def analyze_tests_results(output, results, arch, type): + """ + parses the tests results for each file in output. It distinguishes if it is + build, unit tests, relvals, or addon tests logs. The the result of the parsing + is saved in the parameter results. + type can be 'relvals', 'utests', 'gpu_tests', 'addON', 'builds', 'fwlite' + + schema of results: + { + "": [ result_arch1, result_arch2, ... result_archN ] + } + schema of result_arch + { + "arch" : "" + "file" : "" + "passed" : ( if not applicable the value is true ) + "details" :
( can be empty if not applicable, but not undefined ) + } + """ + for line in output.splitlines(): + m = re.search("/(CMSSW_[^/]+)/", line) + if not m: + print_verbose("Ignoring file:\n%s" % line) + continue + + print("Processing ", type, ":", line) + rel_name = m.group(1) + result_arch = {} + result_arch["arch"] = arch + result_arch["file"] = line + + details = {} + passed = None + if type == "relvals": + passed, details = get_results_one_relval_file(line) + result_arch["done"] = False + if exists(join(dirname(line), "done")) or exists(join(dirname(line), "all.pages")): + result_arch["done"] = True + elif type == "utests": + passed, details = get_results_one_unitTests_file(line) + elif type == "gpu_utests": + passed, details = get_results_one_unitTests_file(line) + elif type == "addOn": + passed = get_results_one_addOn_file(line) + elif type == "builds": + passed, details = get_results_details_one_build_file(line, type) + elif type == "fwlite": + passed, details = get_results_details_one_build_file(line, type) + elif type == "python3": + passed, details = get_results_one_unitTests_file(line, " Error compiling ") + elif type == "invalid-includes": + errs = len(json.load(open(line))) + if errs: + passed = PossibleUnitTestResults.FAILED + details = {"num_fails": str(errs)} + else: + passed = PossibleUnitTestResults.PASSED + else: + print("not a valid test type %s" % type) + exit(1) + + result_arch["passed"] = passed + result_arch["details"] = details + + if rel_name not in results.keys(): + results[rel_name] = [] + + results[rel_name].append(result_arch) + + +def execute_magic_command_find_rv_exceptions_results(): + """ + Searchs in github.io for the results for relvals exceptions + """ + print("Finding relval exceptions results...") + command_to_execute = MAGIC_COMMAND_FIND_EXCEPTIONS_RESULTS_RELVALS + out, err, ret_code = get_output_command(command_to_execute) + + rv_exception_results = {} + + for line in out.splitlines(): + line_parts = line.split("/") + ib_name = line_parts[-1].replace("EXCEPTIONS.json", "") + line_parts[-2] + rv_exception_results[ib_name] = True + + return rv_exception_results + + +def get_tags(git_log_output, release_queue): + """ + returns a list of tags based on git log output + It uses the release queue name to filter the tags, this avoids having + in the result tags from other queues that may come from automatic merges. + For example, if release_queue is 7_2_X, it will drop tags like CMSSW_7_2_THREADED_X_2014-09-15-0200 + """ + tags = [] + for line in git_log_output.splitlines(): + tags += get_tags_from_line(line, release_queue) + + if len(tags) == 0: + print("ATTENTION:") + print("looks like %s has not changed between the tags specified!" % release_queue) + command_to_execute = MAGIC_COMMAND_FIND_FIRST_MERGE_WITH_TAG.replace( + "END_TAG", release_queue + ) + out, err, ret_code = get_output_command(command_to_execute) + print(out) + tags = get_tags_from_line(out, release_queue) + print(tags) + + return tags + + +def get_day_number_tag(tag): + """ + returns the number of the day of a tag + if it is not an IB tag, it returns -1 + """ + parts = tag.split("-") + if len(parts) == 1: + return -1 + else: + day = parts[2] + try: + return int(day) + except ValueError: + return -1 + + +def is_tag_list_suspicious(tags): + """ + uses some heuristics to tell if the list of tags seems to be too short + """ + if len(tags) < 7: + return True + day_first_tag = get_day_number_tag(tags[-1]) + day_second_tag = get_day_number_tag(tags[-2]) + return day_second_tag - day_first_tag > 1 + + +def is_recent_branch(err): + """ + determines if the error is because one of the tags does not exist + this can happen when the branch that is being analyzed has been + created recently + """ + return "unknown revision or path not in the working tree" in err + + +# ----------------------------------------------------------------------------------- +# ---- Fuctions -- Execute Magic commands +# ----------------------------------------------------------------------------------- + + +def look_for_missing_tags(start_tag, release_queue): + """ + this calls the git log command with the first tag to look for missing + tags that were not found previously + """ + command_to_execute = MAGIC_COMMAND_FIND_FIRST_MERGE_WITH_TAG.replace("END_TAG", start_tag) + out, err, ret_code = get_output_command(command_to_execute) + tags = get_tags_from_line(out, release_queue) + return tags + + +def get_output_command(command_to_execute): + """ + Executes the command that is given as parameter, returns a tuple out,err,ret_code + with the output, error and return code obtained + """ + print_verbose("Executing:") + print_verbose(command_to_execute) + + p = subprocess.Popen( + command_to_execute, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + out, err = p.communicate() + ret_code = p.returncode + + if ret_code != 0: + print_verbose(ret_code) + print_verbose("Error:") + print_verbose(err) + + return out, err, ret_code + + +def execute_magic_command_tags( + start_tag, end_tag, release_queue, release_branch, ignore_tags=None +): + """ + Gets the tags between start_tag and end_tag, the release_queue is used as a filter + to ignore tags that are from other releases + """ + print_verbose("Release Queue:") + print_verbose(release_queue) + print_verbose("Release Branch:") + print_verbose(release_branch) + + # if it is a special release queue based on a branch with a different name, I use the release_branch as end tag + if release_queue == release_branch: + print_verbose("These IBs have a custom release branch") + real_end_tag = end_tag + else: + real_end_tag = release_branch + + print_verbose("Start tag:") + print_verbose(start_tag) + print_verbose("End tag:") + print_verbose(real_end_tag) + command_to_execute = MAGIC_COMMAND_TAGS.replace("START_TAG", start_tag).replace( + "END_TAG", real_end_tag + ) + command_to_execute = command_to_execute.replace("RELEASE_QUEUE", release_queue) + print("Running:", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + + # check if the end_tag exists, but the start_tag doesn't + # this could mean that the release branch has been created recently + if ret_code != 0: + if is_recent_branch(err): + print_verbose("looks like this branch has been created recently") + command_to_execute = MAGIC_COMMAND_FIND_ALL_TAGS.replace( + "END_TAG", real_end_tag + ).replace("RELEASE_QUEUE", release_queue) + print("Running:", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + + tags = get_tags(out, release_queue) + tags.append(start_tag) + + # check if the tags list could be missing tags + # this means that the release branch has not changed much from the start_tag + if is_tag_list_suspicious(tags): + print_verbose("this list could be missing something!") + print_verbose(tags) + new_tags = look_for_missing_tags(start_tag, release_branch) + tags.pop() + tags += new_tags + + tags = [t for t in reversed(tags) if not ignore_tags or not re.match(ignore_tags, t)] + print("Found Tags:", tags) + + return tags + + +def execute_command_compare_tags(branch, start_tag, end_tag, git_dir, repo, cache={}): + comp = {} + comp["compared_tags"] = "%s-->%s" % (start_tag, end_tag) + comp["release_name"] = end_tag + notes = get_merge_prs(start_tag, end_tag, git_dir, CMS_PRS, cache) + prs = [] + for pr_num in notes: + pr = {"is_merge_commit": False, "from_merge_commit": False} + if notes[pr_num]["branch"] != "master": + if notes[pr_num]["branch"] != branch: + pr["from_merge_commit"] = True + pr["number"] = pr_num + pr["hash"] = notes[pr_num]["hash"] + pr["author_login"] = notes[pr_num]["author"] + pr["title"] = notes[pr_num]["title"] + pr["url"] = "https://github.com/cms-sw/cmssw/pull/%s" % pr_num + prs.append(pr) + comp["merged_prs"] = prs + return comp + + +def compare_tags(branch, tags, git_dir, repo, cache={}): + comparisons = [] + if len(tags) > 1: + comparisons.append( + execute_command_compare_tags(branch, tags[0], tags[0], git_dir, repo, cache) + ) + for i in range(len(tags) - 1): + comp = execute_command_compare_tags(branch, tags[i], tags[i + 1], git_dir, repo, cache) + comparisons.append(comp) + return comparisons + + +def execute_magic_command_get_cmsdist_tags(): + """ + Executes the command to get the tags schema of all_tags_found: + { + "": { + "" : "" + } + } + """ + all_tags_found = {} + for arch in ARCHITECTURES: + command_to_execute = MAGIC_COMMAND_CMSDIST_TAGS.replace("ARCHITECTURE", arch) + out, err, ret_code = get_output_command(command_to_execute) + + for line in out.splitlines(): + m = re.search("CMSSW.*[0-9]/", line) + if not m: + continue + + rel_name = line[m.start() : m.end() - 1] + + if not all_tags_found.get(rel_name): + all_tags_found[rel_name] = {} + + all_tags_found[rel_name][arch] = line + if "CMSSW_10_" in rel_name: + print("CMSDIST ", rel_name, arch) + return all_tags_found + + +def execute_magic_command_find_results(type): + """ + Executes the a command to get the results for the relvals, unit tests, + addon tests, and compitlation tests + It saves the results in the parameter 'results' + type can be 'relvals', 'utests', 'gpu_tests', 'addON', 'builds' + """ + ex_magix_comand_finf_setuls_dict = { + "relvals": MAGIC_COMMAD_FIND_RESULTS_RELVALS, + "utests": MAGIC_COMMAND_FIND_RESULTS_UNIT_TESTS, + "gpu_utests": MAGIC_COMMAND_FIND_RESULTS_GPU_UNIT_TESTS, + "addOn": MAGIC_COMMAND_FIND_RESULTS_ADDON, + "builds": MAGIC_COMMAND_FIND_RESULTS_BUILD, + "fwlite": MAGIC_COMMAND_FIND_RESULTS_FWLITE, + "python3": MAGIC_COMMAND_FIND_RESULTS_PYTHON3, + "invalid-includes": MAGIC_COMMAND_FIND_INVALID_INCLUDES, + } + if type not in ex_magix_comand_finf_setuls_dict: + print("not a valid test type %s" % type) + exit(1) + results = {} + for arch in ARCHITECTURES: + base_command = ex_magix_comand_finf_setuls_dict[type] + command_to_execute = base_command.replace("ARCHITECTURE", arch) + print("Run>>", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + analyze_tests_results(out, results, arch, type) + return results + + +def print_results(results): + print("Results:") + print() + print() + for rq in results: + print() + print(rq["release_name"]) + print("/////////////////////////") + for comp in rq["comparisons"]: + print(comp["compared_tags"]) + + print("\t" + "HLT Tests: " + comp["hlt_tests"]) + print("\t" + "Crab Tests: " + comp["crab_tests"]) + print("\t" + "HEADER Tests:" + comp["check-headers"]) + print("\t" + "DQM Tests: " + comp["dqm_tests"]) + print("\t" + "Static Checks: " + comp["static_checks"]) + print("\t" + "Valgrind: " + comp["valgrind"]) + print("\t" + "Material budget: " + comp["material_budget"]) + print("\t" + "Igprof: " + comp["igprof"]) + print("\t" + "Profiling: " + comp["profiling"]) + print("\t" + "Comparison Baseline: " + comp["comp_baseline"]) + print("\t" + "Comparison Baseline State: " + comp["comp_baseline_state"]) + + cmsdist_tags = comp["cmsdistTags"] + print("\t" + "cmsdist Tags:" + str(cmsdist_tags)) + + builds_results = [ + res["arch"] + ":" + str(res["passed"]) + ":" + str(res["details"]) + for res in comp["builds"] + ] + print("\t" + "Builds:" + str(builds_results)) + + fwlite_results = [ + res["arch"] + ":" + str(res["passed"]) + ":" + str(res["details"]) + for res in comp["fwlite"] + ] + print("\t" + "FWLite:" + str(fwlite_results)) + + relvals_results = [ + res["arch"] + ":" + str(res["passed"]) + ":" + str(res["details"]) + for res in comp["relvals"] + ] + print("\t" + "RelVals:" + str(relvals_results)) + + utests_results = [ + res["arch"] + ":" + str(res["passed"]) + ":" + str(res["details"]) + for res in comp["utests"] + ] + print("\t" + "UnitTests:" + str(utests_results)) + + gpu_utests_results = [ + res["arch"] + ":" + str(res["passed"]) + ":" + str(res["details"]) + for res in comp["gpu_utests"] + ] + print("\t" + "GPUUnitTests:" + str(gpu_utests_results)) + + addons_results = [res["arch"] + ":" + str(res["passed"]) for res in comp["addons"]] + print("\t" + "AddOns:" + str(addons_results)) + + merged_prs = [pr["number"] for pr in comp["merged_prs"]] + print("\t" + "PRs:" + str(merged_prs)) + print("\t" + "Cmsdist compared tags: " + pformat(comp["cmsdist_compared_tags"])) + print("\t" + "Cmsdist merged prs: " + pformat(comp["cmsdist_merged_prs"])) + + from_merge_commit = [ + pr["number"] for pr in comp["merged_prs"] if pr["from_merge_commit"] + ] + print("\t" + "From merge commit" + str(from_merge_commit)) + + print("\t" + "RVExceptions: " + str(comp.get("RVExceptions"))) + print("\t" + "inProgress: " + str(comp.get("inProgress"))) + + +def fill_missing_cmsdist_tags(results): + """ + Iterates over the IBs comparisons, if an IB doesn't have a tag for an architecture, the previous tag is + assigned. For example, for arch slc6_amd64_gcc481 + 1. CMSSW_7_1_X_2014-10-02-1500 was built using the tag IB/CMSSW_7_1_X_2014-10-02-1500/slc6_amd64_gcc481 + 2. There is no tag for CMSSW_7_1_X_2014-10-03-0200 in cmsdist + Then, it assumes that the tag used for CMSSW_7_1_X_2014-10-03-0200 was IB/CMSSW_7_1_X_2014-10-02-1500/slc6_amd64_gcc481 + """ + for rq in results: + previous_cmsdist_tags = {} + for comp in rq["comparisons"]: + for arch in comp["tests_archs"]: + current_ib_tag_arch = comp["cmsdistTags"].get(arch) + if current_ib_tag_arch: + previous_cmsdist_tags[arch] = current_ib_tag_arch + else: + if previous_cmsdist_tags.get(arch): + comp["cmsdistTags"][arch] = previous_cmsdist_tags[arch] + else: + comp["cmsdistTags"][arch] = "Not Found" + + +def get_cmsdist_merge_commits(results): + """ + Will modiffy object in place + """ + for release_queue in results: + previous_cmsdist_tags = {} + release_queue_name = release_queue["release_name"] + for pos, comp in enumerate(release_queue["comparisons"], start=1): + comp["cmsdist_merged_prs"] = {} + comp["cmsdist_compared_tags"] = {} + + if pos == len(release_queue["comparisons"]): + # this is special case when we want to compare unreleased IB with branch head + # sinces it is not an IB, there are no build archs yet. + archs_to_iterate_over = RELEASES_ARCHS[release_queue_name] + else: + archs_to_iterate_over = comp["tests_archs"] + + for arch in archs_to_iterate_over: + if arch not in RELEASES_ARCHS_WITH_DIST_BRANCH[release_queue_name]: + continue + cmsdist_branch = RELEASES_ARCHS_WITH_DIST_BRANCH[release_queue_name][arch] + if pos == len(release_queue["comparisons"]): + # if this last comparison, it means its not yet an IB + # we want to compare branch HEAD with last tag + # we will compare with remote branch to avoid checking out all the time, this is the reason for + # remotes/origin/{BRANCH_NAME} + current_ib_tag_arch = "remotes/origin/" + cmsdist_branch + # when dumping JSON, we do not want 'remotes/origin/ part + current_ib_tag_arch_to_show = cmsdist_branch + else: + # else, just use current cmsdistTag + current_ib_tag_arch = comp["cmsdistTags"].get(arch) + current_ib_tag_arch_to_show = comp["cmsdistTags"].get(arch) + if arch in previous_cmsdist_tags: + previous_cmsdist_tag = previous_cmsdist_tags[arch] + else: + previous_cmsdist_tag = current_ib_tag_arch + + previous_cmsdist_tags[arch] = current_ib_tag_arch + notes = get_merge_prs( + previous_cmsdist_tag, + current_ib_tag_arch, + "{0}/.git".format(CMSDIST_REPO), + CMS_PRS, + repo_name="cmsdist", + ) + prs = [] + for pr_num in notes: + pr = {"is_merge_commit": False, "from_merge_commit": False} + if notes[pr_num]["branch"] != "master": + if notes[pr_num]["branch"] != cmsdist_branch: + pr["from_merge_commit"] = True + pr["number"] = pr_num + pr["hash"] = notes[pr_num]["hash"] + pr["author_login"] = notes[pr_num]["author"] + pr["title"] = notes[pr_num]["title"] + pr["url"] = "https://github.com/cms-sw/cmsdist/pull/%s" % pr_num + prs.append(pr) + comp["cmsdist_merged_prs"][arch] = prs + comp["cmsdist_compared_tags"][arch] = "{0}..{1}".format( + previous_cmsdist_tag, current_ib_tag_arch_to_show + ) + + +def add_tests_to_results( + results, + unit_tests, + relvals_results, + addon_results, + build_results, + cmsdist_tags_results, + rv_Exceptions_Results, + fwlite_results, + gpu_unit_tests, + python3_results, + invalid_includes, +): + """ + merges the results of the tests with the structure of the IBs tags and the pull requests + it also marks the comparisons that correspond to an IB + """ + for rq in results: + for comp in rq["comparisons"]: + rel_name = comp["compared_tags"].split("-->")[1] + rvsres = relvals_results.get(rel_name) + utres = unit_tests.get(rel_name) + gpu_utres = gpu_unit_tests.get(rel_name) + python3_res = python3_results.get(rel_name) + invalid_includes_res = invalid_includes.get(rel_name) + adonres = addon_results.get(rel_name) + buildsres = build_results.get(rel_name) + fwliteres = fwlite_results.get(rel_name) + cmsdist_tags = cmsdist_tags_results.get(rel_name) + print("CMDIST ", rel_name, ":", cmsdist_tags) + + # for tests with arrays + comp["relvals"] = rvsres if rvsres else [] + comp["utests"] = utres if utres else [] + comp["gpu_utests"] = gpu_utres if gpu_utres else [] + comp["python3_tests"] = python3_res if python3_res else [] + comp["invalid_includes"] = invalid_includes_res if invalid_includes_res else [] + comp["addons"] = adonres if adonres else [] + comp["builds"] = buildsres if buildsres else [] + comp["fwlite"] = fwliteres if fwliteres else [] + comp["cmsdistTags"] = cmsdist_tags if cmsdist_tags else {} + comp["isIB"] = "-" in rel_name + comp["RVExceptions"] = rv_Exceptions_Results.get(rel_name) + if "_X_" in rel_name: + comp["ib_date"] = rel_name.split("_X_", 1)[-1] + else: + comp["ib_date"] = "" + + comp["inProgress"] = False + if not comp.get("static_checks"): + comp["static_checks"] = "not-found" + if not comp.get("hlt_tests"): + comp["hlt_tests"] = "not-found" + if not comp.get("crab_tests"): + comp["crab_tests"] = "not-found" + if not comp.get("check-headers"): + comp["check-headers"] = "not-found" + if not comp.get("valgrind"): + comp["valgrind"] = "not-found" + if not comp.get("material_budget"): + comp["material_budget"] = "not-found" + if not comp.get("igprof"): + comp["igprof"] = "not-found" + if not comp.get("profiling"): + comp["profiling"] = "not-found" + if not comp.get("comp_baseline"): + comp["comp_baseline"] = "not-found" + comp["comp_baseline_state"] = "errors" + if not comp.get("dqm_tests"): + comp["dqm_tests"] = "not-found" + # custom details for new IB page + if not comp.get("material_budget_v2"): + comp["material_budget_v2"] = "not-found" + if not comp.get("material_budget_comparison"): + comp["material_budget_comparison"] = "not-found" + if not comp.get("static_checks_v2"): + comp["static_checks_v2"] = "not-found" + if not comp.get("static_checks_failures"): + comp["static_checks_failures"] = "not-found" + + a = [t["arch"] for t in utres] if utres else [] + b = [t["arch"] for t in rvsres] if rvsres else [] + c = [t["arch"] for t in buildsres] if buildsres else [] + + not_complete_archs = [arch for arch in c if arch not in a] + for nca in not_complete_archs: + result = {} + result["arch"] = nca + result["file"] = str([res["file"] for res in buildsres if res["arch"] == nca]) + result["passed"] = PossibleUnitTestResults.UNKNOWN + result["details"] = {} + comp["utests"].append(result) + + comp["tests_archs"] = list(set(a + b + c)) + + +def find_comparison_baseline_results(comparisons, architecture): + """ + Finds for an IB the results of the Comparison BaseLine + """ + for comp in comparisons: + rel_name = comp["compared_tags"].split("-->")[1] + print("Looking for comparison baseline results for ", rel_name) + comp["comp_baseline"] = find_one_comparison_baseline(rel_name, architecture) + comp["comp_baseline_state"] = "errors" + if comp["comp_baseline"] != "not-found": + comp["comp_baseline_state"] = find_one_comparison_baseline_errors( + rel_name, architecture + ) + + +def find_material_budget_results(comparisons, architecture): + """ + Finds for an IB the results of the material_budget + """ + for comp in comparisons: + rel_name = comp["compared_tags"].split("-->")[1] + print("Looking for material_budget results for ", rel_name) + arch, comparison, status = find_one_material_budget(rel_name, architecture) + if arch is None: + comp["material_budget"] = status # returns 'inprogress' + else: + comp["material_budget"] = arch + ":" + comparison + comp["material_budget_v2"] = {"status": status, "arch": arch} + if (comparison is None) or (comparison is "-1"): + pass + elif comparison == "0": + comp["material_budget_comparison"] = {"status": "found", "results": "ok", "arch": arch} + else: + comp["material_budget_comparison"] = { + "status": "found", + "results": "warning", + "arch": arch, + } + + +def find_one_test_results(command_to_execute): + print("Running ", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + print("Ran:", out, err, ret_code, command_to_execute) + if ret_code == 0: + print("found") + return "found" + print("inprogress") + return "inprogress" + + +# def find_dup_dict_result(command_to_execute): +# # todo delete +# print("Running ", command_to_execute) +# out, err, ret_code = get_output_command(command_to_execute) +# print("Ran:", out, err, ret_code, command_to_execute) +# if ret_code == 0: +# if int(out) == 0: +# print('passed') +# return 'passed' +# else: +# print('error') +# return 'error' +# print("not-found") +# return("not-found") + + +def find_dup_dict_result(comparisons): + """ + Will check for duplicated dictionary (CMSSW specific test) for each architecture + """ + + def get_status(command_to_execute): + print("Running ", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + print("Ran:", out, err, ret_code, command_to_execute) + if ret_code == 0: + if int(out) == 0: + print("passed") + return "passed" + else: + print("error") + return "error" + print("not-found") + return "not-found" + + test_field = "dupDict" + for comp in comparisons: + if test_field not in comp: + comp[test_field] = [] + for architecture in comp["tests_archs"]: + rel_name = comp["compared_tags"].split("-->")[1] + print("Looking for {0} results for {1}.".format(test_field, rel_name)) + command_to_execute = MAGIC_COMMAND_FIND_DUP_DICT.replace( + "RELEASE_NAME", rel_name + ).replace("ARCHITECTURE", architecture) + comp[test_field].append( + {"passed": get_status(command_to_execute), "arch": architecture} + ) + + +def find_one_profiling_result(magic_command): + """ + Looks for one profiling result + """ + command_to_execute = magic_command.replace("WORKFLOW", "11834.21") + print("Running ", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + print("Ran:", out, err, ret_code, command_to_execute) + file = out.strip() + if (ret_code == 0) and (out != ""): + print("found", file) + return {"status": "passed", "data": file} + print("inprogress") + return "inprogress" + + +def find_general_test_results( + test_field, comparisons, architecture, magic_command, results_function=find_one_test_results +): + """ + Finds for results for the test_field. Modifies `comparisons` dict in place. + :param comparisons: comparison dictionary + :param architecture: arch + :param magic_command: string with bash command to execute + :param test_field: field to write back the results to + :param results_function: function how to process results + """ + + for comp in comparisons: + rel_name = comp["compared_tags"].split("-->")[1] + print("Looking for {0} results for {1}.".format(test_field, rel_name)) + command_to_execute = magic_command.replace("RELEASE_NAME", rel_name).replace( + "ARCHITECTURE", architecture + ) + comp[test_field] = results_function(command_to_execute) + + +def find_general_test_results_2(test_field, comparisons, magic_command): + def find_one_test_results(release_name): + command = magic_command.replace("RELEASE_NAME", release_name) + out, err, ret_code = get_output_command(command) + if ret_code == 0: + print("found") + return "found" + print("not-found") + return "not-found" + + for comp in comparisons: + rel_name = comp["compared_tags"].split("-->")[1] + print("Looking for {0} results for {1}.".format(test_field, rel_name)) + comp[test_field] = find_one_test_results(rel_name) + + +def find_and_check_result(release_name, architecture, magic_cmd, res_cmd, opt_cmd=""): + path = magic_cmd.replace("RELEASE_NAME", release_name) + path = path.replace("ARCHITECTURE", architecture) + _, _, t_ret_code = get_output_command("test -e " + path) + + def set_result(cmd, status0="passed", statusnon0="error"): + cmd = cmd.format(path) + out, err, ret_code = get_output_command(cmd) + try: + e = 0 + for o in [x for x in out.split("\n") if x]: + e += int(o) + if e == 0: + result = status0 + else: + result = statusnon0 + except: + print("ERROR running command: " + cmd) + print(out, err, ret_code) + result = "error" # this will make sure to check what is wrong with the file + return result + + if t_ret_code == 0: + result = set_result(res_cmd) + if result == "passed" and opt_cmd != "": + result = set_result(opt_cmd, "passed", "inprogress") + else: + result = "inprogress" + + print(result) + return result + + +def find_check_hlt(comparisons, architecture): + for comp in comparisons: + rel_name = comp["compared_tags"].split("-->")[1] + print("Looking for {0} results for {1}.".format("hlt", rel_name)) + comp["hlt_tests"] = find_and_check_result( + rel_name, architecture, CHECK_HLT_PATH, 'grep -h -c "exit status: *[1-9]" {0}' + ) + + +def find_check_crab(comparisons, architecture): + for comp in comparisons: + rel_name = comp["compared_tags"].split("-->")[1] + print("Looking for {0} results for {1}.".format("crab", rel_name)) + comp["crab_tests"] = find_and_check_result( + rel_name, + architecture, + CHECK_CRAB_PATH, + 'grep -h -c "FAILED" {0}/*/statusfile', + 'grep -h -c "INPROGRESS" {0}/*/statusfile', + ) + + +def find_check_headers(comparisons, architecture): + for comp in comparisons: + rel_name = comp["compared_tags"].split("-->")[1] + print("Looking for check-headers results for", rel_name, ".") + comp["check-headers"] = find_and_check_result( + rel_name, architecture, CHECK_HEADERS_PATH, "cat {0} | wc -l" + ) + + +def find_ubsan_logs(comparisons, ubsan_data): + for c in comparisons: + rel_name = c["compared_tags"].split("-->")[1] + if rel_name in ubsan_data: + print("Looking for ubsan results for", rel_name, ".") + if ubsan_data[rel_name] > 0: + c["ubsan-logs"] = "error" + else: + c["ubsan-logs"] = "passed" + + +def find_static_results(comparisons, architecture): + """ + Finds for an IB the results of the static tests + """ + for comp in comparisons: + rel_name = comp["compared_tags"].split("-->")[1] + print("Looking for static tests results for ", rel_name) + comp["static_checks"] = find_one_static_check(rel_name, architecture) + # For new IB page + if comp["static_checks"] == "not-found" or comp["static_checks"] == "inprogress": + comp["static_checks_v2"] = comp["static_checks"] + else: + resultList = comp["static_checks"].split(":") + comp["static_checks_v2"] = {"status": "passed", "arch": resultList[0]} + iterable = [] + for i in range(1, len(resultList)): + result = resultList[i] + if result == "": + continue + iterable.append(result) + if len(iterable) > 0: + comp["static_checks_failures"] = { + "status": "found", + "arch": resultList[0], + "iterable": iterable, + } + + +def find_one_static_filter_check(release_name, architecture, magic_cmd): + """ + Looks for one static-tests-filter result for the IB, if it finds it, the value is 'found' if not, the value is 'inprogress' + """ + command_to_execute = magic_cmd.replace("RELEASE_NAME", release_name) + command_to_execute = command_to_execute.replace("ARCHITECTURE", architecture) + print("Running ", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + print("Ran:", out, err, ret_code, command_to_execute) + return out + + +def find_one_static_check(release_name, architecture): + """ + Looks for one static-tests result for the IB, if it finds it, the value is 'found' if not, the value is 'inprogress' + """ + command_to_execute = MAGIC_COMMAND_FIND_STATIC_CHECKS.replace("RELEASE_NAME", release_name) + command_to_execute = command_to_execute.replace("ARCHITECTURE", architecture) + print("Running ", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + print("Ran:", out, err, ret_code, command_to_execute) + if ret_code == 0: + arch = out.split()[0] + print("found", arch) + filter1 = find_one_static_filter_check( + release_name, arch, MAGIC_COMMAND_FIND_STATIC_CHECKS_FILTER1 + ) + return arch + ":" + filter1 + print("inprogress") + return "inprogress" + + +def find_one_material_budget(release_name, architecture): + """ + Looks for one material_budget result for the IB, if it finds it, the value is 'found' if not, the value is 'inprogress' + """ + command_to_execute = MAGIC_COMMAND_FIND_MATERIL_BUDGET_CHECKS.replace( + "RELEASE_NAME", release_name + ) + command_to_execute = command_to_execute.replace("ARCHITECTURE", architecture) + print("Running ", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + print("Ran:", out, err, ret_code, command_to_execute) + if ret_code == 0: + arch = out.split()[0] + print("found", arch) + command_to_execute = MAGIC_COMMAND_FIND_MATERIL_BUDGET_COMPARISON_CHECKS.replace( + "RELEASE_NAME", release_name + ).replace("ARCHITECTURE", architecture) + print("Running ", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + if ret_code == 0: + return (arch, out.split()[0], "found") + return (arch, "-1", "found") + print("inprogress") + return (None, None, "inprogress") + + +def find_one_comparison_baseline_errors(release_name, architecture): + """ + Looks for one comparison baseline errors result for the IB, if no errors then value is 'ok' if not, + the value is 'errors' + """ + command_to_execute = MAGIC_COMMAND_COMPARISON_BASELINE_ERRORS.replace( + "RELEASE_NAME", release_name + ) + command_to_execute = command_to_execute.replace("ARCHITECTURE", architecture) + print("Running ", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + print("Ran:", out, err, ret_code, command_to_execute) + if out == "": + return "ok" + else: + return "errors" + + +def find_one_comparison_baseline(release_name, architecture): + """ + Looks for one comparison baseline result for the IB, if it finds it, the value is 'found' if not, the value is '' + """ + command_to_execute = MAGIC_COMMAND_FIND_COMPARISON_BASELINE.replace( + "RELEASE_NAME", release_name + ) + command_to_execute = command_to_execute.replace("ARCHITECTURE", architecture) + print("Running ", command_to_execute) + out, err, ret_code = get_output_command(command_to_execute) + print("Ran:", out, err, ret_code, command_to_execute) + if ret_code == 0: + print("found") + return COMPARISON_BASELINE_TESTS_URL.replace("RELEASE_NAME", release_name).replace( + "ARCHITECTURE", architecture + ) + print("inprogress") + return "inprogress" + + +def generate_separated_json_results(results): + """ + reads the results and generates a separated json for each release_queue + it also generates a csv file with statistics per release_queue and a general one + """ + all_ibs_list = [] + all_prs_list = [] + + for rq in results: + file_name = rq["release_name"] + ".json" + summary_file_name = rq["release_name"] + "_summary.txt" + out_json = open(file_name, "w") + json.dump(rq, out_json, indent=4) + out_json.close() + + f_summary = open(summary_file_name, "w") + ibs = [ + comp["release_name"] + for comp in rq["comparisons"] + if (comp["release_name"] != rq["base_branch"]) and comp["isIB"] + ] + + all_ibs_list.extend(ibs) + + # Ignore forward ported prs, and merge commits + only_prs_list = [] + for comp in rq["comparisons"]: + only_prs_list.extend( + [ + pr["number"] + for pr in comp["merged_prs"] + if not (pr["is_merge_commit"] or pr["from_merge_commit"]) + ] + ) + + all_prs_list.extend(only_prs_list) + f_summary.write("IBs:%s\n" % ibs) + f_summary.write("NumIBs:%d\n" % len(ibs)) + f_summary.write("PRs:%s\n" % only_prs_list) + f_summary.write("NumPRs:%d\n" % len(only_prs_list)) + f_summary.close() + + all_ibs_list = list(set(all_ibs_list)) + all_ibs_list.sort() + + all_prs_list = list(set(all_prs_list)) + all_prs_list.sort() + + f_summary_all = open("ibsSummaryAll.txt", "w") + f_summary_all.write("IBs:%s\n" % all_ibs_list) + f_summary_all.write("NumIBs:%d\n" % len(all_ibs_list)) + + f_summary_all.write("PRs:%s\n" % all_prs_list) + f_summary_all.write("NumPRs:%d\n" % len(all_prs_list)) + + +def get_production_archs(config_map): + archs = {} + for release in config_map: + if ("PROD_ARCH" in release) and ( + ("DISABLED" not in release) or ("IB_WEB_PAGE" in release) + ): + archs[release["RELEASE_QUEUE"]] = release["SCRAM_ARCH"] + return archs + + +def generate_ib_json_short_summary(results): + """ + Generates a json file with the global status of the last IB for each architecture, + per each Release Queue + Schema of short_summary + [ releaseQueue1, releaseQueue2, ... , releaseQueueN ] + Schema of releaseQueueN + { + "": { + "": { + "status": "ok|warning|error|unknown" + "latest_IB" : "" + } + } + } + """ + short_summary = {} + for rq in results: + # this should not be called 'release name', this should be fixed + rq_name = rq["release_name"] + enabled_archs = RELEASES_ARCHS[rq_name] + for arch in enabled_archs: + ibs_for_current_arch = [rel for rel in rq["comparisons"] if arch in rel["tests_archs"]] + # it starts as ok and checks the conditions + ib_status = "ok" + + if len(ibs_for_current_arch) == 0: + pass + # TODO unused + # latest_IB = 'N/A' + # ib_status = 'unknown' + else: + latest_IB_info = ibs_for_current_arch[-1] + latest_IB_name = latest_IB_info["release_name"] + + build_info = [b for b in latest_IB_info["builds"] if b["arch"] == arch] + if len(build_info) == 0: + build_passed = "unknown" + else: + build_passed = build_info[0]["passed"] + + fwlite_info = [b for b in latest_IB_info["fwlite"] if b["arch"] == arch] + # TODO unused + # if len(fwlite_info) == 0: + # fwlite_passed = 'unknown' + # else: + # fwlite_passed = build_info[0]["passed"] + + unit_tests_info = [u for u in latest_IB_info["utests"] if u["arch"] == arch] + if len(unit_tests_info) == 0: + utests_passed = "unknown" + else: + utests_passed = unit_tests_info[0]["passed"] + + gpu_unit_tests_info = [ + u for u in latest_IB_info["gpu_utests"] if u["arch"] == arch + ] + if len(gpu_unit_tests_info) == 0: + gpu_utests_passed = "unknown" + else: + gpu_utests_passed = gpu_unit_tests_info[0]["passed"] + + relvals_info = [r for r in latest_IB_info["relvals"] if r["arch"] == arch] + if len(relvals_info) == 0: + relvals_passed = "unknown" + else: + relvals_passed = relvals_info[0]["passed"] + + if not short_summary.get(rq_name): + short_summary[rq_name] = {} + short_summary[rq_name][arch] = {} + short_summary[rq_name][arch]["latest_IB"] = latest_IB_name + + merged_statuses = "%s-%s-%s-%s" % ( + build_passed, + utests_passed, + relvals_passed, + gpu_utests_passed, + ) + + if "unknown" in merged_statuses: + ib_status = "unknown" + elif "failed" in merged_statuses or "False" in merged_statuses: + ib_status = "error" + elif "warning" in merged_statuses: + ib_status = "warning" + short_summary[rq_name][arch]["status"] = ib_status + + short_summary["all_archs"] = ARCHITECTURES + short_summary["prod_archs"] = get_production_archs(get_config_map_properties()) + out_json = open("LatestIBsSummary.json", "w") + json.dump(short_summary, out_json, indent=4) + out_json.close() + + +def identify_release_groups(results): + """ + Identifies and groups the releases accodring to their prefix + For example if the release queues are: + CMSSW_7_1_X, CMSSW_7_0_X, CMSSW_6_2_X, CMSSW_5_3_X, CMSSW_7_1_THREADED_X + CMSSW_7_1_BOOSTIO_X, CMSSW_7_1_ROOT6_X, CMSSW_7_1_GEANT10_X, CMSSW_6_2_X_SLHC + CMSSW_7_1_DEVEL_X, CMSSW_7_1_CLANG_X, CMSSW_7_2_X, CMSSW_7_2_DEVEL_X, CMSSW_7_2_CLANG_X + CMSSW_7_2_GEANT10_X + It will organize them like this: + CMSSW_5_3_X: CMSSW_5_3_X + CMSSW_7_2_X: CMSSW_7_2_X, CMSSW_7_2_DEVEL_X, CMSSW_7_2_CLANG_X, CMSSW_7_2_GEANT10_X + CMSSW_6_2_X: CMSSW_6_2_X CMSSW_6_2_X_SLHC + CMSSW_7_0_X: CMSSW_7_0_X + CMSSW_7_1_X: CMSSW_7_1_X, CMSSW_7_1_THREADED_X, CMSSW_7_1_BOOSTIO_X, CMSSW_7_1_ROOT6_X', + CMSSW_7_1_GEANT10_X, CMSSW_7_1_DEVEL_X, CMSSW_7_1_CLANG_X + It returns a dictionary in which the keys are the release prefixes, and the values are + the release queues + """ + from operator import itemgetter + + releases = [] + release_objs = {} + for rq in results: + rn = rq["release_name"] + release_objs[rn] = rq + releases.append([rn] + [int(x) for x in rn.split("_")[1:3]]) + + groups = [] + for item in sorted(releases, key=itemgetter(1, 2)): + prefix = "CMSSW_" + "_".join([str(s) for s in item[1:3]]) + "_X" + group = None + for g in groups: + if g[0] == prefix: + group = g + break + if not group: + group = [prefix, []] + groups.append(group) + if not item[0] in group[1]: + group[1].append(item[0]) + + structure = {"all_release_queues": [], "all_prefixes": [], "default_release": ""} + for g in groups: + rq = g[0] + structure[rq] = sorted(g[1], reverse=True) + structure["all_release_queues"] = structure[rq] + structure["all_release_queues"] + structure["all_prefixes"].append(rq) + for rq in structure["all_prefixes"][::-1]: + rn = structure[rq][0] + if ( + (rn in release_objs) + and ("comparisons" in release_objs[rn]) + and (release_objs[rn]["comparisons"]) + ): + for comp in release_objs[rn]["comparisons"]: + if ("builds" in comp) and (comp["builds"]): + structure["default_release"] = rn + return structure + return structure + + +def fix_results(results): + for rq in results: + prev_ib_date = "" + release_count = 0 + for comp in rq["comparisons"]: + comp["release_queue"] = rq["release_name"] + comp["base_branch"] = rq["base_branch"] + if comp["ib_date"]: + prev_ib_date = comp["ib_date"] + release_count = 0 + comp["ib_date"] = prev_ib_date + "-0000" + else: + release_count += 1 + xstr = str(format(release_count, "04d")) + if not prev_ib_date: + comp["ib_date"] = xstr + "-" + comp["release_name"] + else: + comp["ib_date"] = prev_ib_date + "-" + xstr + comp["next_ib"] = False + if comp["release_name"] == rq["base_branch"]: + comp["next_ib"] = True + rq["comparisons"].reverse() + + +# ----------------------------------------------------------------------------------- +# ---- Start of execution +# ----------------------------------------------------------------------------------- + +if __name__ == "__main__": + MAGIC_COMMAND_CMSDIST_TAGS = ( + "pushd %s; git tag -l '*/*/ARCHITECTURE' | grep -E 'IB|ERR'; popd" % CMSDIST_REPO + ) + CMSSDT_DIR = "/data/sdt" + BUILD_LOG_DIR = CMSSDT_DIR + "/buildlogs" + JENKINS_ARTIFACTS_SUBDIR = "SDT/jenkins-artifacts" + JENKINS_ARTIFACTS_DIR = CMSSDT_DIR + "/" + JENKINS_ARTIFACTS_SUBDIR + # I used this type of concatenation because the string has %s inside + MAGIC_COMMAND_FIND_FIRST_MERGE_WITH_TAG = ( + "GIT_DIR=" + + CMSSW_REPO_LOCAL + + ' git log --pretty=\'"%s", "tags->,%d"\' END_TAG | grep "\\"tags->," | head -n1' + ) + MAGIC_COMMAD_FIND_RESULTS_RELVALS = ( + "find " + + BUILD_LOG_DIR + + '/ARCHITECTURE/www -mindepth 6 -maxdepth 6 -path "*/pyRelValMatrixLogs/run/runall-report-step123-.log"' + ) + MAGIC_COMMAND_FIND_EXCEPTIONS_RESULTS_RELVALS = ( + "find cms-sw.github.io/data/relvals/ -name '*EXCEPTIONS.json'" + ) + MAGIC_COMMAND_TAGS = ( + "GIT_DIR=" + + CMSSW_REPO_LOCAL + + ' git log --pretty=\'"%s", "tags->,%d"\' START_TAG..END_TAG | grep -E "\\"tags->, " | grep -E "RELEASE_QUEUE"' + ) + MAGIC_COMMAND_FIND_RESULTS_UNIT_TESTS = ( + "find " + + BUILD_LOG_DIR + + "/ARCHITECTURE/www -mindepth 4 -maxdepth 4 -name unitTests-summary.log" + ) + MAGIC_COMMAND_FIND_RESULTS_GPU_UNIT_TESTS = ( + "find " + + BUILD_LOG_DIR + + '/ARCHITECTURE/www -mindepth 5 -maxdepth 5 -name unitTests-summary.log | grep "/GPU/"' + ) + MAGIC_COMMAND_FIND_RESULTS_ADDON = ( + "find " + BUILD_LOG_DIR + "/ARCHITECTURE/www -mindepth 4 -maxdepth 4 -name addOnTests.log" + ) + MAGIC_COMMAND_FIND_RESULTS_BUILD = ( + "find " + + BUILD_LOG_DIR + + '/ARCHITECTURE/www -mindepth 5 -maxdepth 5 -path "*/new/logAnalysis.pkl"' + ) + MAGIC_COMMAND_FIND_RESULTS_FWLITE = ( + "find " + + BUILD_LOG_DIR + + '/ARCHITECTURE/www -mindepth 5 -maxdepth 5 -path "*/new_FWLITE/logAnalysis.pkl"' + ) + MAGIC_COMMAND_FIND_RESULTS_PYTHON3 = ( + "find " + BUILD_LOG_DIR + "/ARCHITECTURE/www -mindepth 4 -maxdepth 4 -name python3.html" + ) + MAGIC_COMMAND_FIND_INVALID_INCLUDES = ( + "find " + + JENKINS_ARTIFACTS_DIR + + '/invalid-includes -maxdepth 3 -mindepth 3 -path "*/ARCHITECTURE/summary.json" -type f' + ) + MAGIC_COMMAND_FIND_STATIC_CHECKS = ( + "test -d " + + JENKINS_ARTIFACTS_DIR + + "/ib-static-analysis/RELEASE_NAME && ls " + + JENKINS_ARTIFACTS_DIR + + "/ib-static-analysis/RELEASE_NAME/" + ) + MAGIC_COMMAND_FIND_STATIC_CHECKS_FILTER1 = ( + "test -s " + + JENKINS_ARTIFACTS_DIR + + "/ib-static-analysis/RELEASE_NAME/ARCHITECTURE/reports/modules2statics-filter1.txt && echo reports/modules2statics-filter1.txt" + ) + MAGIC_COMMAND_FIND_MATERIL_BUDGET_CHECKS = ( + "test -d " + + JENKINS_ARTIFACTS_DIR + + "/material-budget/RELEASE_NAME && ls " + + JENKINS_ARTIFACTS_DIR + + "/material-budget/RELEASE_NAME/" + ) + MAGIC_COMMAND_FIND_MATERIL_BUDGET_COMPARISON_CHECKS = ( + "TEST_FILE=" + + JENKINS_ARTIFACTS_DIR + + "/material-budget/RELEASE_NAME/ARCHITECTURE/comparison/Images/MBDiff.txt && test -f $TEST_FILE && grep '0$' $TEST_FILE | wc -l" + ) + MAGIC_COMMAND_FIND_VALGRIND = "test -d " + JENKINS_ARTIFACTS_DIR + "/valgrind/RELEASE_NAME" + MAGIC_COMMAND_FIND_IGPROF = "test -d " + JENKINS_ARTIFACTS_DIR + "/igprof/RELEASE_NAME" + MAGIC_COMMAND_FIND_PROFILING = "test -d " + JENKINS_ARTIFACTS_DIR + "/profiling/RELEASE_NAME" + MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER1 = ( + "ls " + + JENKINS_ARTIFACTS_DIR + + '/profiling/RELEASE_NAME/ARCHITECTURE/WORKFLOW/step3_*.resources.json 2>/dev/null | head -1 | sed "s|.*/RELEASE_NAME/||;s|.json$||"' + ) + MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER2 = ( + "ls " + + JENKINS_ARTIFACTS_DIR + + '/igprof/RELEASE_NAME/ARCHITECTURE/profiling/*/sorted_RES_CPU_step3.txt 2>/dev/null | head -1 | sed "s|.*/RELEASE_NAME/||"' + ) + MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER3 = ( + "ls " + + JENKINS_ARTIFACTS_DIR + + '/profiling/RELEASE_NAME/ARCHITECTURE/*/step3_gpu_nsys.txt 2>/dev/null | head -1 | sed "s|.*/RELEASE_NAME||"' + ) + MAGIC_COMMAND_FIND_COMPARISON_BASELINE = ( + "test -f " + + JENKINS_ARTIFACTS_DIR + + "/ib-baseline-tests/RELEASE_NAME/ARCHITECTURE/-GenuineIntel/matrix-results/wf_errors.txt" + ) + MAGIC_COMMAND_COMPARISON_BASELINE_ERRORS = ( + "cat " + + JENKINS_ARTIFACTS_DIR + + "/ib-baseline-tests/RELEASE_NAME/ARCHITECTURE/-GenuineIntel/matrix-results/wf_errors.txt" + ) + COMPARISON_BASELINE_TESTS_URL = ( + "https://cmssdt.cern.ch/" + + JENKINS_ARTIFACTS_SUBDIR + + "/ib-baseline-tests/RELEASE_NAME/ARCHITECTURE/-GenuineIntel/matrix-results" + ) + CHECK_HLT_PATH = ( + JENKINS_ARTIFACTS_DIR + "/HLT-Validation/RELEASE_NAME/ARCHITECTURE/jenkins.log" + ) + CHECK_CRAB_PATH = JENKINS_ARTIFACTS_DIR + "/ib-run-crab/RELEASE_NAME/*" + MAGIC_COMMAND_FIND_DQM_TESTS = ( + "test -d " + JENKINS_ARTIFACTS_DIR + "/ib-dqm-tests/RELEASE_NAME" + ) + MAGIC_COMMAND_FIND_LIZARD = ( + "test -d " + JENKINS_ARTIFACTS_DIR + "/lizard/RELEASE_NAME/ARCHITECTURE" + ) + MAGIC_COMMAND_FIND_CHECK_HEADERS = ( + "test -d " + JENKINS_ARTIFACTS_DIR + "/check_headers/RELEASE_NAME/ARCHITECTURE" + ) + CHECK_HEADERS_PATH = ( + JENKINS_ARTIFACTS_DIR + "/check_headers/RELEASE_NAME/ARCHITECTURE/headers_with_error.log" + ) + CHECK_UBSANLOG_PATH = JENKINS_ARTIFACTS_DIR + "/ubsan_logs/CMSSW_*/ubsan_runtime_errors.log" + MAGIC_COMMAND_FIND_FLAWFINDER = ( + "test -d " + JENKINS_ARTIFACTS_DIR + "/flawfinder/RELEASE_NAME/ARCHITECTURE" + ) + MAGIC_COMMAND_FIND_DUP_DICT = ( + "grep -v '^Searching for ' " + + BUILD_LOG_DIR + + "/ARCHITECTURE/www/*/*/RELEASE_NAME/testLogs/dup*.log" + + " | grep -v ':**** SKIPPING ' | grep -v '^ *$' | wc -l " + ) + CONFIG_MAP_FILE = "config.map" + # this will be filled using config.map by get_config_map_params() + ARCHITECTURES = [] + # this will be filled using config.map by get_config_map_params() + RELEASES_BRANCHES = {} + # this will be filled using config.map by get_config_map_params() SLHC releases have a different format, so it is hardcoded + SPECIAL_RELEASES = ["SLHC"] + # this will be filled using config.map by get_config_map_params() + SP_REL_REGEX = "" # Needs to be declared empty before using + # These are the release queues that need to be shown, this this will be filled using config.map by get_config_map_params() + RELEASE_QUEUES = [] + # These are the ibs and archs for which the aditional tests need to be shown + # The schema is: + # { + # "": { + # "" : [ test1, test2, ... , testN ] + # } + # } + # This this will be filled using config.map by get_config_map_params() + RELEASE_ADITIONAL_TESTS = {} + # the acrhitectures for which the enabled releases are currently avaiable + # The schema is: + # { + # "": [ "arch1" , "arch2" , ... ,"archN" ] + # } + # will be filled using config.map by get_config_map_params() + RELEASES_ARCHS = {} + """ + { + "RELEASE_QUE" : { "ARCH1" : "CMSDIST_BRANCH", "ARCH2" : "CMSDIST_BRANCH2" } + } + """ + RELEASES_ARCHS_WITH_DIST_BRANCH = {} + # The IBs and arch for which relval results are availavle + # The schema is: + # { + # "": [ "arch1" , "arch2" , ... ,"archN" ] + # } + MAGIC_COMMAND_FIND_ALL_TAGS = ( + "GIT_DIR=" + + CMSSW_REPO_LOCAL + + ' git log --pretty=\'"%s", "tags->,%d"\' END_TAG | grep -E "\\"tags->, " | grep -E "RELEASE_QUEUE"' + ) + # This regular expression allows to identify if a merge commit is an automatic forward port + AUTO_FORWARD_PORT_REGEX = "^.*Merge CMSSW.+ into CMSSW.+$" + + class BuildResultsKeys(object): + DICT_ERROR = "dictError" + COMP_ERROR = "compError" + LINK_ERROR = "linkError" + COMP_WARNING = "compWarning" + DWNL_ERROR = "dwnlError" + MISC_ERROR = "miscError" + IGNORE_WARNING = "ignoreWarning" + PYTHON_ERROR = "pythonError" + PYTHON3_ERROR = "python3Warning" + + class PossibleBuildResults(object): + PASSED = "passed" + WARNING = "warning" + ERROR = "error" + + class PossibleUnitTestResults(object): + PASSED = "passed" + FAILED = "failed" + UNKNOWN = "unknown" + + results = [] + + get_config_map_params() + SP_REL_REGEX = "|".join(SPECIAL_RELEASES) + REQUESTED_COMPARISONS = [("%s_%s..%s" % (rq, START_DATE, rq)) for rq in RELEASE_QUEUES] + + AFS_INSTALLATION = "/cvmfs/cms.cern.ch/*/cms" + installedPaths = [] + for ib_path in ["/cvmfs/cms-ib.cern.ch", "/cvmfs/cms-ib.cern.ch/sw/*"]: + installedPaths += [x for x in glob(ib_path + "/week*/*/cms/cmssw/*")] + installedPaths += [x for x in glob(ib_path + "/week*/*/cms/cmssw-patch/*")] + installedPaths += [x for x in glob(AFS_INSTALLATION + "/cmssw/*")] + installedPaths += [x for x in glob(AFS_INSTALLATION + "/cmssw-patch/*")] + + installedReleases = [basename(x) for x in installedPaths] + + print_verbose("Installed Releases:") + print_verbose(installedReleases) + prs_file = GITHUB_IO_REPO + "/_data/prs_cmssw_cache.json" + token = open(expanduser("~/.github-token")).read().strip() + github = Github(login_or_token=token) + CMSSW_REPO = github.get_repo(CMSSW_REPO_NAME) + + for comp in REQUESTED_COMPARISONS: + start_tag = comp.split("..")[0] + end_tag = comp.split("..")[1] + release_queue = start_tag + + # if is a SLHC or any special release, the split will happen with the fifth underscore _ + if re.search(SP_REL_REGEX, release_queue): + print_verbose("This is a special release") + release_queue = re.match(r"^((?:[^_]*_){%d}[^_]*)_(.*)" % (4), release_queue).groups()[ + 0 + ] + else: + release_queue = re.match(r"^((?:[^_]*_){%d}[^_]*)_(.*)" % (3), release_queue).groups()[ + 0 + ] + + print("####################################################################") + print("I will analyze %s from %s to %s:" % (release_queue, start_tag, end_tag)) + + release_branch = RELEASES_BRANCHES[release_queue] + release_queue_results = {} + release_queue_results["release_name"] = release_queue + release_queue_results["base_branch"] = release_branch + + print("Identifying tags...") + tags = execute_magic_command_tags( + start_tag, + end_tag, + release_queue, + release_branch, + ignore_tags="^CMSSW_9_3_.+_2017-09-(06-2300|07-1100)$", + ) + originalTags = tags + tags = [ + x for x in tags if x in installedReleases + ] # NOTE: comment out on local development + tags.append(release_branch) + print("I got these tags: ") + print(tags) + + print("Getting merged pull requests between tags...") + release_queue_results["comparisons"] = compare_tags( + release_branch, tags, CMSSW_REPO_LOCAL, CMSSW_REPO + ) + print("Done") + + # It checks if the tests are being run for that architecture, if they don't, it doesn't look for them. + # Then it goes over each selected tests, executes 'magic' command to look for tests results, interprets it + # and writes back in to 'release_queue_results['comparisons']'. Finally, it appends it back to results object. + # (check config.map file) + additional_tests = RELEASE_ADITIONAL_TESTS.get(release_queue) + if additional_tests: + for arch in additional_tests.keys(): + tests_to_find = additional_tests[arch] + if "HLT" in tests_to_find: + find_check_hlt(release_queue_results["comparisons"], arch) + if "crab" in tests_to_find: + find_check_crab(release_queue_results["comparisons"], arch) + if "static-checks" in tests_to_find: + find_static_results(release_queue_results["comparisons"], arch) + if "material-budget" in tests_to_find: + find_material_budget_results(release_queue_results["comparisons"], arch) + if "baseline" in tests_to_find: + find_comparison_baseline_results(release_queue_results["comparisons"], arch) + if "valgrind" in tests_to_find: + find_general_test_results( + "valgrind", + release_queue_results["comparisons"], + arch, + MAGIC_COMMAND_FIND_VALGRIND, + ) + if "lizard" in tests_to_find: + find_general_test_results( + "lizard", + release_queue_results["comparisons"], + arch, + MAGIC_COMMAND_FIND_LIZARD, + ) + if "flawfinder" in tests_to_find: + find_general_test_results( + "flawfinder", + release_queue_results["comparisons"], + arch, + MAGIC_COMMAND_FIND_FLAWFINDER, + ) + if ("igprof-mp" in tests_to_find) or ("igprof-pp" in tests_to_find): + find_general_test_results( + "igprof", + release_queue_results["comparisons"], + arch, + MAGIC_COMMAND_FIND_IGPROF, + ) + if "profiling" in tests_to_find: + find_general_test_results( + "profiling", + release_queue_results["comparisons"], + arch, + MAGIC_COMMAND_FIND_PROFILING, + ) + find_general_test_results( + "piechart", + release_queue_results["comparisons"], + arch, + MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER1, + find_one_profiling_result, + ) + find_general_test_results( + "reco_event_loop", + release_queue_results["comparisons"], + arch, + MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER2, + find_one_profiling_result, + ) + find_general_test_results( + "reco_gpu_mods", + release_queue_results["comparisons"], + arch, + MAGIC_COMMAND_FIND_PROFILING_CHECKS_FILTER3, + find_one_profiling_result, + ) + if "check-headers" in tests_to_find: + find_check_headers(release_queue_results["comparisons"], arch) + # will run every time for Q/A, that is why not checked if it is in tests to find + + find_general_test_results_2( + "dqm_tests", release_queue_results["comparisons"], MAGIC_COMMAND_FIND_DQM_TESTS + ) + results.append(release_queue_results) + + add_tests_to_results( + results, + execute_magic_command_find_results("utests"), + execute_magic_command_find_results("relvals"), + execute_magic_command_find_results("addOn"), + execute_magic_command_find_results("builds"), + execute_magic_command_get_cmsdist_tags(), + execute_magic_command_find_rv_exceptions_results(), # rv_Exceptions_Results + execute_magic_command_find_results("fwlite"), + execute_magic_command_find_results("gpu_utests"), + execute_magic_command_find_results("python3"), + execute_magic_command_find_results("invalid-includes"), + ) + + ubsan_data = {} + out, err, rcode = get_output_command("wc -l %s" % CHECK_UBSANLOG_PATH) + for line in out.split("\n"): + if not "/CMSSW_" in line: + continue + print("UBSAN", line) + count, rel = line.strip().split(" ", 1) + rel = rel.split("/")[-2] + ubsan_data[rel] = int(count) + if "_UBSAN_" in rel: + ubsan_data[rel.replace("_UBSAN_", "_")] = int(count) + + for release_queue_results in results: + find_dup_dict_result(release_queue_results["comparisons"]) + find_ubsan_logs(release_queue_results["comparisons"], ubsan_data) + + fill_missing_cmsdist_tags(results) + get_cmsdist_merge_commits(results) + print_results(results) + + structure = identify_release_groups(results) + fix_results(results) + generate_separated_json_results(results) + generate_ib_json_short_summary(results) + + out_json = open("merged_prs_summary.json", "w") + json.dump(results, out_json, indent=4) + out_json.close() + + out_groups = open("structure.json", "w") + json.dump(structure, out_groups, indent=4) + out_groups.close() diff --git a/report_size.py b/report_size.py index 43ad3b879b85..6242bf8d6f68 100755 --- a/report_size.py +++ b/report_size.py @@ -1,33 +1,37 @@ #!/usr/bin/env python from __future__ import print_function import sys -#run this command for once to create the data file or directly pipe its output to this script -#for releases -#find /afs/cern.ch/cms/slc[5-7]* -maxdepth 3 -type d -print -exec fs lq {} \; | grep -v 'Volume Name' | sed 'N;s/\n/ /' | uniq -c -f2 -#for ibs -#find /afs/cern.ch/cms/sw/ReleaseCandidates/ -maxdepth 3 -type d -print -exec fs lq {} \; |grep -v '^Volume' | sed 'N;s/\n/ /' | uniq -c -f3 + +# run this command for once to create the data file or directly pipe its output to this script +# for releases +# find /afs/cern.ch/cms/slc[5-7]* -maxdepth 3 -type d -print -exec fs lq {} \; | grep -v 'Volume Name' | sed 'N;s/\n/ /' | uniq -c -f2 +# for ibs +# find /afs/cern.ch/cms/sw/ReleaseCandidates/ -maxdepth 3 -type d -print -exec fs lq {} \; |grep -v '^Volume' | sed 'N;s/\n/ /' | uniq -c -f3 data = {} -allocated = 0 +allocated = 0 used = 0 volumes = 0 max_volume_len = 0 max_path_len = 0 for line in sys.stdin: - info = line.strip().split() - if info[2] in data: continue - volumes += 1 - allocated = allocated + int(info[3]) - used = used + int(info[4]) - data[info[2]]=info - if len(info[2])>max_volume_len: max_volume_len=len(info[2]) - if len(info[1])>max_path_len: max_path_len=len(info[1]) + info = line.strip().split() + if info[2] in data: + continue + volumes += 1 + allocated = allocated + int(info[3]) + used = used + int(info[4]) + data[info[2]] = info + if len(info[2]) > max_volume_len: + max_volume_len = len(info[2]) + if len(info[1]) > max_path_len: + max_path_len = len(info[1]) max_volume_len = max_volume_len + 4 max_path_len = max_path_len + 4 -print ("Total Volumes :",volumes) -print ("Allocated Space:",int(allocated/1000000),"GB") -print ("Used Space :",int(used/1000000),"GB") +print("Total Volumes :", volumes) +print("Allocated Space:", int(allocated / 1000000), "GB") +print("Used Space :", int(used / 1000000), "GB") for vol in sorted(data): - msg = "{0:<"+str(max_volume_len)+"}{1:<"+str(max_path_len)+"}" - print(msg.format(vol, data[vol][1]),data[vol][4]+"/"+data[vol][3]) + msg = "{0:<" + str(max_volume_len) + "}{1:<" + str(max_path_len) + "}" + print(msg.format(vol, data[vol][1]), data[vol][4] + "/" + data[vol][3]) diff --git a/repos/EcalLaserValidation/HLT_EcalLaserValidation/categories.py b/repos/EcalLaserValidation/HLT_EcalLaserValidation/categories.py index 06b774a2e879..b58d27668b96 100644 --- a/repos/EcalLaserValidation/HLT_EcalLaserValidation/categories.py +++ b/repos/EcalLaserValidation/HLT_EcalLaserValidation/categories.py @@ -4,35 +4,35 @@ from repo_config import CMSBUILD_USER, GH_REPO_NAME CMSSW_L1 = [] -APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1)) +APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1)) REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE -TRIGGER_PR_TESTS = list(set(['smuzaffar'] + REQUEST_BUILD_RELEASE)) -PR_HOLD_MANAGERS = [ ] +TRIGGER_PR_TESTS = list(set(["smuzaffar"] + REQUEST_BUILD_RELEASE)) +PR_HOLD_MANAGERS = [] -COMMON_CATEGORIES = [ "tests", "code-checks" ] -EXTERNAL_CATEGORIES = [ "externals" ] +COMMON_CATEGORIES = ["tests", "code-checks"] +EXTERNAL_CATEGORIES = ["externals"] EXTERNAL_REPOS = [] -CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ] -CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ] +CMSSW_REPOS = [gh_user + "/" + gh_cmssw] +CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist] CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1)) COMPARISON_MISSING_MAP = [] -#github_user:[list of categories] +# github_user:[list of categories] CMSSW_L2 = { - CMSBUILD_USER : ["tests", "code-checks" ], - 'degrutto' : ['ecal-laser'], - 'ndaci' : ['ecal-laser'], - 'zghiche' : ['ecal-laser'], - 'gennai' : ['ecal-laser'], - 'zhenbinwu' : ['ecal-laser'], - 'wang-hui' : ['ecal-laser'], - 'abrinke1' : ['ecal-laser'], - 'mkovac' : ['ecal-laser'], + CMSBUILD_USER: ["tests", "code-checks"], + "degrutto": ["ecal-laser"], + "ndaci": ["ecal-laser"], + "zghiche": ["ecal-laser"], + "gennai": ["ecal-laser"], + "zhenbinwu": ["ecal-laser"], + "wang-hui": ["ecal-laser"], + "abrinke1": ["ecal-laser"], + "mkovac": ["ecal-laser"], } -CMSSW_CATEGORIES={ - 'ecal-laser': [GH_REPO_NAME], +CMSSW_CATEGORIES = { + "ecal-laser": [GH_REPO_NAME], } USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys())) diff --git a/repos/EcalLaserValidation/HLT_EcalLaserValidation/releases.py b/repos/EcalLaserValidation/HLT_EcalLaserValidation/releases.py index 776ad15456f3..6beacaeea30a 100644 --- a/repos/EcalLaserValidation/HLT_EcalLaserValidation/releases.py +++ b/repos/EcalLaserValidation/HLT_EcalLaserValidation/releases.py @@ -1,13 +1,14 @@ -#Default development branch +# Default development branch # Changes from master branch will be merge in to it # Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch) # For new release cycle just change this and make sure to add its milestone and production branches CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X" -RELEASE_BRANCH_MILESTONE={} -RELEASE_BRANCH_CLOSED=[] -RELEASE_BRANCH_PRODUCTION=[] -SPECIAL_RELEASE_MANAGERS=[] -RELEASE_MANAGERS={} -USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ]) - +RELEASE_BRANCH_MILESTONE = {} +RELEASE_BRANCH_CLOSED = [] +RELEASE_BRANCH_PRODUCTION = [] +SPECIAL_RELEASE_MANAGERS = [] +RELEASE_MANAGERS = {} +USERS_TO_TRIGGER_HOOKS = set( + SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel] +) diff --git a/repos/EcalLaserValidation/HLT_EcalLaserValidation/repo_config.py b/repos/EcalLaserValidation/HLT_EcalLaserValidation/repo_config.py index da3c74bc46ca..cfe5cd554235 100644 --- a/repos/EcalLaserValidation/HLT_EcalLaserValidation/repo_config.py +++ b/repos/EcalLaserValidation/HLT_EcalLaserValidation/repo_config.py @@ -1,24 +1,28 @@ -from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER -from os.path import basename,dirname,abspath -GH_TOKEN="~/.github-token" -GH_TOKEN_READONLY="~/.github-token-readonly" -CONFIG_DIR=dirname(abspath(__file__)) -CMSBUILD_USER="cmsbuild" -GH_REPO_ORGANIZATION="EcalLaserValidation" -GH_REPO_NAME="HLT_EcalLaserValidation" -GH_REPO_FULLNAME=GH_REPO_ORGANIZATION+"/"+GH_REPO_NAME -CREATE_EXTERNAL_ISSUE=False -JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins" -GITHUB_WEBHOOK_TOKEN='U2FsdGVkX18OTa0HlxmA6uQ9oimETZqECqGDvkqQsEW/7jod1rl8AF1GnmAu0kGt' -#GITHUB_WEBHOOK_TOKEN='U2FsdGVkX18uyTkiQtIOYUfVj2PQLV34u5hQAbfNhl8=' -ADD_LABELS=False -ADD_WEB_HOOK=False -JENKINS_UPLOAD_DIRECTORY="EcalLaserValidation/HLT_EcalLaserValidation" -JENKINS_NOTIFICATION_EMAIL="" -OPEN_ISSUE_FOR_PUSH_TESTS=True +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER +from os.path import basename, dirname, abspath + +GH_TOKEN = "~/.github-token" +GH_TOKEN_READONLY = "~/.github-token-readonly" +CONFIG_DIR = dirname(abspath(__file__)) +CMSBUILD_USER = "cmsbuild" +GH_REPO_ORGANIZATION = "EcalLaserValidation" +GH_REPO_NAME = "HLT_EcalLaserValidation" +GH_REPO_FULLNAME = GH_REPO_ORGANIZATION + "/" + GH_REPO_NAME +CREATE_EXTERNAL_ISSUE = False +JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins" +GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX18OTa0HlxmA6uQ9oimETZqECqGDvkqQsEW/7jod1rl8AF1GnmAu0kGt" +# GITHUB_WEBHOOK_TOKEN='U2FsdGVkX18uyTkiQtIOYUfVj2PQLV34u5hQAbfNhl8=' +ADD_LABELS = False +ADD_WEB_HOOK = False +JENKINS_UPLOAD_DIRECTORY = "EcalLaserValidation/HLT_EcalLaserValidation" +JENKINS_NOTIFICATION_EMAIL = "" +OPEN_ISSUE_FOR_PUSH_TESTS = True IGNORE_ISSUES = [] -#Valid Web hooks -VALID_WEB_HOOKS=['push'] -#Set the Jenkins slave label is your tests needs special machines to run. -JENKINS_SLAVE_LABEL="slc6 && amd64 && cmsbuild" -def file2Package(filename): return GH_REPO_NAME +# Valid Web hooks +VALID_WEB_HOOKS = ["push"] +# Set the Jenkins slave label is your tests needs special machines to run. +JENKINS_SLAVE_LABEL = "slc6 && amd64 && cmsbuild" + + +def file2Package(filename): + return GH_REPO_NAME diff --git a/repos/EcalLaserValidation/L1T_EcalLaserValidation/categories.py b/repos/EcalLaserValidation/L1T_EcalLaserValidation/categories.py index 06b774a2e879..b58d27668b96 100644 --- a/repos/EcalLaserValidation/L1T_EcalLaserValidation/categories.py +++ b/repos/EcalLaserValidation/L1T_EcalLaserValidation/categories.py @@ -4,35 +4,35 @@ from repo_config import CMSBUILD_USER, GH_REPO_NAME CMSSW_L1 = [] -APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1)) +APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1)) REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE -TRIGGER_PR_TESTS = list(set(['smuzaffar'] + REQUEST_BUILD_RELEASE)) -PR_HOLD_MANAGERS = [ ] +TRIGGER_PR_TESTS = list(set(["smuzaffar"] + REQUEST_BUILD_RELEASE)) +PR_HOLD_MANAGERS = [] -COMMON_CATEGORIES = [ "tests", "code-checks" ] -EXTERNAL_CATEGORIES = [ "externals" ] +COMMON_CATEGORIES = ["tests", "code-checks"] +EXTERNAL_CATEGORIES = ["externals"] EXTERNAL_REPOS = [] -CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ] -CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ] +CMSSW_REPOS = [gh_user + "/" + gh_cmssw] +CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist] CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1)) COMPARISON_MISSING_MAP = [] -#github_user:[list of categories] +# github_user:[list of categories] CMSSW_L2 = { - CMSBUILD_USER : ["tests", "code-checks" ], - 'degrutto' : ['ecal-laser'], - 'ndaci' : ['ecal-laser'], - 'zghiche' : ['ecal-laser'], - 'gennai' : ['ecal-laser'], - 'zhenbinwu' : ['ecal-laser'], - 'wang-hui' : ['ecal-laser'], - 'abrinke1' : ['ecal-laser'], - 'mkovac' : ['ecal-laser'], + CMSBUILD_USER: ["tests", "code-checks"], + "degrutto": ["ecal-laser"], + "ndaci": ["ecal-laser"], + "zghiche": ["ecal-laser"], + "gennai": ["ecal-laser"], + "zhenbinwu": ["ecal-laser"], + "wang-hui": ["ecal-laser"], + "abrinke1": ["ecal-laser"], + "mkovac": ["ecal-laser"], } -CMSSW_CATEGORIES={ - 'ecal-laser': [GH_REPO_NAME], +CMSSW_CATEGORIES = { + "ecal-laser": [GH_REPO_NAME], } USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys())) diff --git a/repos/EcalLaserValidation/L1T_EcalLaserValidation/releases.py b/repos/EcalLaserValidation/L1T_EcalLaserValidation/releases.py index 776ad15456f3..6beacaeea30a 100644 --- a/repos/EcalLaserValidation/L1T_EcalLaserValidation/releases.py +++ b/repos/EcalLaserValidation/L1T_EcalLaserValidation/releases.py @@ -1,13 +1,14 @@ -#Default development branch +# Default development branch # Changes from master branch will be merge in to it # Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch) # For new release cycle just change this and make sure to add its milestone and production branches CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X" -RELEASE_BRANCH_MILESTONE={} -RELEASE_BRANCH_CLOSED=[] -RELEASE_BRANCH_PRODUCTION=[] -SPECIAL_RELEASE_MANAGERS=[] -RELEASE_MANAGERS={} -USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ]) - +RELEASE_BRANCH_MILESTONE = {} +RELEASE_BRANCH_CLOSED = [] +RELEASE_BRANCH_PRODUCTION = [] +SPECIAL_RELEASE_MANAGERS = [] +RELEASE_MANAGERS = {} +USERS_TO_TRIGGER_HOOKS = set( + SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel] +) diff --git a/repos/EcalLaserValidation/L1T_EcalLaserValidation/repo_config.py b/repos/EcalLaserValidation/L1T_EcalLaserValidation/repo_config.py index 0accae686106..76056dd78533 100644 --- a/repos/EcalLaserValidation/L1T_EcalLaserValidation/repo_config.py +++ b/repos/EcalLaserValidation/L1T_EcalLaserValidation/repo_config.py @@ -1,23 +1,27 @@ -from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER -from os.path import basename,dirname,abspath -GH_TOKEN="~/.github-token" -GH_TOKEN_READONLY="~/.github-token-readonly" -CONFIG_DIR=dirname(abspath(__file__)) -CMSBUILD_USER="cmsbuild" -GH_REPO_ORGANIZATION="EcalLaserValidation" -GH_REPO_NAME="L1T_EcalLaserValidation" -GH_REPO_FULLNAME=GH_REPO_ORGANIZATION+"/"+GH_REPO_NAME -CREATE_EXTERNAL_ISSUE=False -JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins" -GITHUB_WEBHOOK_TOKEN='U2FsdGVkX18FTI2p/ZkGhERFC/gPJhXtW+bjAF9xtuWoJIDhv3B+ifsXz3gWm5Xq' -ADD_LABELS=False -ADD_WEB_HOOK=False -JENKINS_UPLOAD_DIRECTORY="EcalLaserValidation/L1T_EcalLaserValidation" -JENKINS_NOTIFICATION_EMAIL="" -OPEN_ISSUE_FOR_PUSH_TESTS=True +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER +from os.path import basename, dirname, abspath + +GH_TOKEN = "~/.github-token" +GH_TOKEN_READONLY = "~/.github-token-readonly" +CONFIG_DIR = dirname(abspath(__file__)) +CMSBUILD_USER = "cmsbuild" +GH_REPO_ORGANIZATION = "EcalLaserValidation" +GH_REPO_NAME = "L1T_EcalLaserValidation" +GH_REPO_FULLNAME = GH_REPO_ORGANIZATION + "/" + GH_REPO_NAME +CREATE_EXTERNAL_ISSUE = False +JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins" +GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX18FTI2p/ZkGhERFC/gPJhXtW+bjAF9xtuWoJIDhv3B+ifsXz3gWm5Xq" +ADD_LABELS = False +ADD_WEB_HOOK = False +JENKINS_UPLOAD_DIRECTORY = "EcalLaserValidation/L1T_EcalLaserValidation" +JENKINS_NOTIFICATION_EMAIL = "" +OPEN_ISSUE_FOR_PUSH_TESTS = True IGNORE_ISSUES = [] -#Valid Web hooks -VALID_WEB_HOOKS=['push'] -#Set the Jenkins slave label is your tests needs special machines to run. -JENKINS_SLAVE_LABEL="slc6 && amd64 && cmsbuild" -def file2Package(filename): return GH_REPO_NAME +# Valid Web hooks +VALID_WEB_HOOKS = ["push"] +# Set the Jenkins slave label is your tests needs special machines to run. +JENKINS_SLAVE_LABEL = "slc6 && amd64 && cmsbuild" + + +def file2Package(filename): + return GH_REPO_NAME diff --git a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/categories.py b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/categories.py index ea2cb61f6b09..0f19e7b84c73 100644 --- a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/categories.py +++ b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/categories.py @@ -4,28 +4,28 @@ from repo_config import CMSBUILD_USER, GH_REPO_NAME CMSSW_L1 = [] -APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1)) +APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1)) REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE -TRIGGER_PR_TESTS = list(set(['smuzaffar',CMSBUILD_USER] + REQUEST_BUILD_RELEASE)) -PR_HOLD_MANAGERS = [ ] +TRIGGER_PR_TESTS = list(set(["smuzaffar", CMSBUILD_USER] + REQUEST_BUILD_RELEASE)) +PR_HOLD_MANAGERS = [] -COMMON_CATEGORIES = [ "tests", "code-checks" ] -EXTERNAL_CATEGORIES = [ "externals" ] +COMMON_CATEGORIES = ["tests", "code-checks"] +EXTERNAL_CATEGORIES = ["externals"] EXTERNAL_REPOS = [] -CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ] -CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ] +CMSSW_REPOS = [gh_user + "/" + gh_cmssw] +CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist] CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1)) COMPARISON_MISSING_MAP = [] -#github_user:[list of categories] +# github_user:[list of categories] CMSSW_L2 = { - CMSBUILD_USER : ["tests", "code-checks" ], - 'emanueledimarco' : ['ecal-pulse-shape'], + CMSBUILD_USER: ["tests", "code-checks"], + "emanueledimarco": ["ecal-pulse-shape"], } -CMSSW_CATEGORIES={ - 'ecal-pulse-shape': [GH_REPO_NAME], +CMSSW_CATEGORIES = { + "ecal-pulse-shape": [GH_REPO_NAME], } USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys())) diff --git a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/releases.py b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/releases.py index 5a8b19e90cde..50279e124ba8 100644 --- a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/releases.py +++ b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/releases.py @@ -1,13 +1,14 @@ -#Default development branch +# Default development branch # Changes from master branch will be merge in to it # Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch) # For new release cycle just change this and make sure to add its milestone and production branches CMSSW_DEVEL_BRANCH = "CMSSW_10_2_X" -RELEASE_BRANCH_MILESTONE={} -RELEASE_BRANCH_CLOSED=[] -RELEASE_BRANCH_PRODUCTION=[] -SPECIAL_RELEASE_MANAGERS=[] -RELEASE_MANAGERS={} -USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ]) - +RELEASE_BRANCH_MILESTONE = {} +RELEASE_BRANCH_CLOSED = [] +RELEASE_BRANCH_PRODUCTION = [] +SPECIAL_RELEASE_MANAGERS = [] +RELEASE_MANAGERS = {} +USERS_TO_TRIGGER_HOOKS = set( + SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel] +) diff --git a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/repo_config.py b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/repo_config.py index fe2229299320..d45ddf0dad95 100644 --- a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/repo_config.py +++ b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/repo_config.py @@ -1,23 +1,27 @@ -from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER -from os.path import basename,dirname,abspath -GH_TOKEN="~/.github-token" -GH_TOKEN_READONLY="~/.github-token-readonly" -CONFIG_DIR=dirname(abspath(__file__)) -CMSBUILD_USER="cmsbuild" -GH_REPO_ORGANIZATION="EcalLaserValidation" -GH_REPO_NAME="RECO_EcalPulseShapeValidation" -GH_REPO_FULLNAME=GH_REPO_ORGANIZATION+"/"+GH_REPO_NAME -CREATE_EXTERNAL_ISSUE=False -JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins" -GITHUB_WEBHOOK_TOKEN='U2FsdGVkX1+r+XWzRjZHPgURrshDykGdtONgxUa7XBof1Nh1/BiWgt3IyWXu4t60' -ADD_LABELS=False -ADD_WEB_HOOK=False -JENKINS_UPLOAD_DIRECTORY="EcalLaserValidation/RECO_EcalPulseShapeValidation" -JENKINS_NOTIFICATION_EMAIL="" -OPEN_ISSUE_FOR_PUSH_TESTS=True +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER +from os.path import basename, dirname, abspath + +GH_TOKEN = "~/.github-token" +GH_TOKEN_READONLY = "~/.github-token-readonly" +CONFIG_DIR = dirname(abspath(__file__)) +CMSBUILD_USER = "cmsbuild" +GH_REPO_ORGANIZATION = "EcalLaserValidation" +GH_REPO_NAME = "RECO_EcalPulseShapeValidation" +GH_REPO_FULLNAME = GH_REPO_ORGANIZATION + "/" + GH_REPO_NAME +CREATE_EXTERNAL_ISSUE = False +JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins" +GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX1+r+XWzRjZHPgURrshDykGdtONgxUa7XBof1Nh1/BiWgt3IyWXu4t60" +ADD_LABELS = False +ADD_WEB_HOOK = False +JENKINS_UPLOAD_DIRECTORY = "EcalLaserValidation/RECO_EcalPulseShapeValidation" +JENKINS_NOTIFICATION_EMAIL = "" +OPEN_ISSUE_FOR_PUSH_TESTS = True IGNORE_ISSUES = [] -#Valid Web hooks -VALID_WEB_HOOKS=['push'] -#Set the Jenkins slave label is your tests needs special machines to run. -JENKINS_SLAVE_LABEL="slc6 && amd64 && cmsbuild" -def file2Package(filename): return GH_REPO_NAME +# Valid Web hooks +VALID_WEB_HOOKS = ["push"] +# Set the Jenkins slave label is your tests needs special machines to run. +JENKINS_SLAVE_LABEL = "slc6 && amd64 && cmsbuild" + + +def file2Package(filename): + return GH_REPO_NAME diff --git a/repos/EcalLaserValidation/TPG_EcalLaserValidation/categories.py b/repos/EcalLaserValidation/TPG_EcalLaserValidation/categories.py index 02c961df2d53..ce7f155453dc 100644 --- a/repos/EcalLaserValidation/TPG_EcalLaserValidation/categories.py +++ b/repos/EcalLaserValidation/TPG_EcalLaserValidation/categories.py @@ -4,36 +4,36 @@ from repo_config import CMSBUILD_USER, GH_REPO_NAME CMSSW_L1 = [] -APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1)) +APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1)) REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE -TRIGGER_PR_TESTS = list(set(['smuzaffar'] + REQUEST_BUILD_RELEASE)) -PR_HOLD_MANAGERS = [ ] +TRIGGER_PR_TESTS = list(set(["smuzaffar"] + REQUEST_BUILD_RELEASE)) +PR_HOLD_MANAGERS = [] -COMMON_CATEGORIES = [ "tests", "code-checks" ] -EXTERNAL_CATEGORIES = [ "externals" ] +COMMON_CATEGORIES = ["tests", "code-checks"] +EXTERNAL_CATEGORIES = ["externals"] EXTERNAL_REPOS = [] -CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ] -CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ] +CMSSW_REPOS = [gh_user + "/" + gh_cmssw] +CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist] CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1)) COMPARISON_MISSING_MAP = [] -#github_user:[list of categories] +# github_user:[list of categories] CMSSW_L2 = { - CMSBUILD_USER : ["tests", "code-checks" ], - 'degrutto' : ['ecal-laser'], - 'ndaci' : ['ecal-laser'], - 'zghiche' : ['ecal-laser'], - 'gennai' : ['ecal-laser'], - 'zhenbinwu' : ['ecal-laser'], - 'wang-hui' : ['ecal-laser'], - 'abrinke1' : ['ecal-laser'], - 'mkovac' : ['ecal-laser'], - 'InnaKucher' : ['ecal-laser'], + CMSBUILD_USER: ["tests", "code-checks"], + "degrutto": ["ecal-laser"], + "ndaci": ["ecal-laser"], + "zghiche": ["ecal-laser"], + "gennai": ["ecal-laser"], + "zhenbinwu": ["ecal-laser"], + "wang-hui": ["ecal-laser"], + "abrinke1": ["ecal-laser"], + "mkovac": ["ecal-laser"], + "InnaKucher": ["ecal-laser"], } -CMSSW_CATEGORIES={ - 'ecal-laser': [GH_REPO_NAME], +CMSSW_CATEGORIES = { + "ecal-laser": [GH_REPO_NAME], } USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys())) diff --git a/repos/EcalLaserValidation/TPG_EcalLaserValidation/releases.py b/repos/EcalLaserValidation/TPG_EcalLaserValidation/releases.py index 776ad15456f3..6beacaeea30a 100644 --- a/repos/EcalLaserValidation/TPG_EcalLaserValidation/releases.py +++ b/repos/EcalLaserValidation/TPG_EcalLaserValidation/releases.py @@ -1,13 +1,14 @@ -#Default development branch +# Default development branch # Changes from master branch will be merge in to it # Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch) # For new release cycle just change this and make sure to add its milestone and production branches CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X" -RELEASE_BRANCH_MILESTONE={} -RELEASE_BRANCH_CLOSED=[] -RELEASE_BRANCH_PRODUCTION=[] -SPECIAL_RELEASE_MANAGERS=[] -RELEASE_MANAGERS={} -USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ]) - +RELEASE_BRANCH_MILESTONE = {} +RELEASE_BRANCH_CLOSED = [] +RELEASE_BRANCH_PRODUCTION = [] +SPECIAL_RELEASE_MANAGERS = [] +RELEASE_MANAGERS = {} +USERS_TO_TRIGGER_HOOKS = set( + SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel] +) diff --git a/repos/EcalLaserValidation/TPG_EcalLaserValidation/repo_config.py b/repos/EcalLaserValidation/TPG_EcalLaserValidation/repo_config.py index de15a7f7db51..dc9168d6345a 100644 --- a/repos/EcalLaserValidation/TPG_EcalLaserValidation/repo_config.py +++ b/repos/EcalLaserValidation/TPG_EcalLaserValidation/repo_config.py @@ -1,23 +1,27 @@ -from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER -from os.path import basename,dirname,abspath -GH_TOKEN="~/.github-token" -GH_TOKEN_READONLY="~/.github-token-readonly" -CONFIG_DIR=dirname(abspath(__file__)) -CMSBUILD_USER="cmsbuild" -GH_REPO_ORGANIZATION="EcalLaserValidation" -GH_REPO_NAME="TPG_EcalLaserValidation" -GH_REPO_FULLNAME=GH_REPO_ORGANIZATION+"/"+GH_REPO_NAME -CREATE_EXTERNAL_ISSUE=False -JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins" -GITHUB_WEBHOOK_TOKEN='U2FsdGVkX1+GGHoH9PB4G9rRvEuoKejWnx1zWHOV39VGswFB1rX9s7F3HFdaTtcs' -ADD_LABELS=False -ADD_WEB_HOOK=False -JENKINS_UPLOAD_DIRECTORY="EcalLaserValidation/TPG_EcalLaserValidation" -JENKINS_NOTIFICATION_EMAIL="" -OPEN_ISSUE_FOR_PUSH_TESTS=True +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER +from os.path import basename, dirname, abspath + +GH_TOKEN = "~/.github-token" +GH_TOKEN_READONLY = "~/.github-token-readonly" +CONFIG_DIR = dirname(abspath(__file__)) +CMSBUILD_USER = "cmsbuild" +GH_REPO_ORGANIZATION = "EcalLaserValidation" +GH_REPO_NAME = "TPG_EcalLaserValidation" +GH_REPO_FULLNAME = GH_REPO_ORGANIZATION + "/" + GH_REPO_NAME +CREATE_EXTERNAL_ISSUE = False +JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins" +GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX1+GGHoH9PB4G9rRvEuoKejWnx1zWHOV39VGswFB1rX9s7F3HFdaTtcs" +ADD_LABELS = False +ADD_WEB_HOOK = False +JENKINS_UPLOAD_DIRECTORY = "EcalLaserValidation/TPG_EcalLaserValidation" +JENKINS_NOTIFICATION_EMAIL = "" +OPEN_ISSUE_FOR_PUSH_TESTS = True IGNORE_ISSUES = [] -#Valid Web hooks -VALID_WEB_HOOKS=['push'] -#Set the Jenkins slave label if your tests needs special machines to run. -JENKINS_SLAVE_LABEL="slc6 && amd64 && cmsbuild" -def file2Package(filename): return GH_REPO_NAME +# Valid Web hooks +VALID_WEB_HOOKS = ["push"] +# Set the Jenkins slave label if your tests needs special machines to run. +JENKINS_SLAVE_LABEL = "slc6 && amd64 && cmsbuild" + + +def file2Package(filename): + return GH_REPO_NAME diff --git a/repos/HcalConditionsAutomatization/ConditionsValidation/categories.py b/repos/HcalConditionsAutomatization/ConditionsValidation/categories.py index db8b95d34890..a9d0a71c3b98 100644 --- a/repos/HcalConditionsAutomatization/ConditionsValidation/categories.py +++ b/repos/HcalConditionsAutomatization/ConditionsValidation/categories.py @@ -4,28 +4,28 @@ from repo_config import CMSBUILD_USER, GH_REPO_NAME CMSSW_L1 = [] -APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1)) +APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1)) REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE -TRIGGER_PR_TESTS = list(set(['smuzaffar'] + REQUEST_BUILD_RELEASE)) -PR_HOLD_MANAGERS = [ ] +TRIGGER_PR_TESTS = list(set(["smuzaffar"] + REQUEST_BUILD_RELEASE)) +PR_HOLD_MANAGERS = [] -COMMON_CATEGORIES = [ "tests", "code-checks" ] -EXTERNAL_CATEGORIES = [ "externals" ] +COMMON_CATEGORIES = ["tests", "code-checks"] +EXTERNAL_CATEGORIES = ["externals"] EXTERNAL_REPOS = [] -CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ] -CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ] +CMSSW_REPOS = [gh_user + "/" + gh_cmssw] +CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist] CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1)) COMPARISON_MISSING_MAP = [] -#github_user:[list of categories] +# github_user:[list of categories] CMSSW_L2 = { - CMSBUILD_USER : ["tests", "code-checks" ], - 'GilsonCS' : ['hcal-conditions'], + CMSBUILD_USER: ["tests", "code-checks"], + "GilsonCS": ["hcal-conditions"], } -CMSSW_CATEGORIES={ - 'hcal-conditions': [GH_REPO_NAME], +CMSSW_CATEGORIES = { + "hcal-conditions": [GH_REPO_NAME], } USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys())) diff --git a/repos/HcalConditionsAutomatization/ConditionsValidation/releases.py b/repos/HcalConditionsAutomatization/ConditionsValidation/releases.py index 776ad15456f3..6beacaeea30a 100644 --- a/repos/HcalConditionsAutomatization/ConditionsValidation/releases.py +++ b/repos/HcalConditionsAutomatization/ConditionsValidation/releases.py @@ -1,13 +1,14 @@ -#Default development branch +# Default development branch # Changes from master branch will be merge in to it # Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch) # For new release cycle just change this and make sure to add its milestone and production branches CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X" -RELEASE_BRANCH_MILESTONE={} -RELEASE_BRANCH_CLOSED=[] -RELEASE_BRANCH_PRODUCTION=[] -SPECIAL_RELEASE_MANAGERS=[] -RELEASE_MANAGERS={} -USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ]) - +RELEASE_BRANCH_MILESTONE = {} +RELEASE_BRANCH_CLOSED = [] +RELEASE_BRANCH_PRODUCTION = [] +SPECIAL_RELEASE_MANAGERS = [] +RELEASE_MANAGERS = {} +USERS_TO_TRIGGER_HOOKS = set( + SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel] +) diff --git a/repos/HcalConditionsAutomatization/ConditionsValidation/repo_config.py b/repos/HcalConditionsAutomatization/ConditionsValidation/repo_config.py index 5dcfb2ba808f..fccf54a5e47c 100644 --- a/repos/HcalConditionsAutomatization/ConditionsValidation/repo_config.py +++ b/repos/HcalConditionsAutomatization/ConditionsValidation/repo_config.py @@ -1,22 +1,26 @@ -from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER -from os.path import basename,dirname,abspath -GH_TOKEN="~/.github-token-cmsbot" -GH_TOKEN_READONLY="~/.github-token-readonly" -CONFIG_DIR=dirname(abspath(__file__)) -CMSBUILD_USER="cmsbot" -GH_REPO_ORGANIZATION="HcalConditionsAutomatization" -GH_REPO_NAME="ConditionsValidation" -GH_REPO_FULLNAME=GH_REPO_ORGANIZATION+"/"+GH_REPO_NAME -CREATE_EXTERNAL_ISSUE=False -JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins" -ADD_LABELS=False -ADD_WEB_HOOK=True -JENKINS_UPLOAD_DIRECTORY="HcalConditionsAutomatization/ConditionsValidation" -JENKINS_NOTIFICATION_EMAIL="" -OPEN_ISSUE_FOR_PUSH_TESTS=True +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER +from os.path import basename, dirname, abspath + +GH_TOKEN = "~/.github-token-cmsbot" +GH_TOKEN_READONLY = "~/.github-token-readonly" +CONFIG_DIR = dirname(abspath(__file__)) +CMSBUILD_USER = "cmsbot" +GH_REPO_ORGANIZATION = "HcalConditionsAutomatization" +GH_REPO_NAME = "ConditionsValidation" +GH_REPO_FULLNAME = GH_REPO_ORGANIZATION + "/" + GH_REPO_NAME +CREATE_EXTERNAL_ISSUE = False +JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins" +ADD_LABELS = False +ADD_WEB_HOOK = True +JENKINS_UPLOAD_DIRECTORY = "HcalConditionsAutomatization/ConditionsValidation" +JENKINS_NOTIFICATION_EMAIL = "" +OPEN_ISSUE_FOR_PUSH_TESTS = True IGNORE_ISSUES = [] -#Valid Web hooks -VALID_WEB_HOOKS=['push'] -#Set the Jenkins slave label is your tests needs special machines to run. -JENKINS_SLAVE_LABEL="lxplus7||lxplus6" -def file2Package(filename): return GH_REPO_NAME +# Valid Web hooks +VALID_WEB_HOOKS = ["push"] +# Set the Jenkins slave label is your tests needs special machines to run. +JENKINS_SLAVE_LABEL = "lxplus7||lxplus6" + + +def file2Package(filename): + return GH_REPO_NAME diff --git a/repos/cms_patatrack/cmssw/categories.py b/repos/cms_patatrack/cmssw/categories.py index fca5cfe2e3d3..8533ece43ae3 100644 --- a/repos/cms_patatrack/cmssw/categories.py +++ b/repos/cms_patatrack/cmssw/categories.py @@ -5,24 +5,24 @@ from categories_map import CMSSW_CATEGORIES CMSSW_L1 = [] -APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1)) +APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1)) REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE -TRIGGER_PR_TESTS = list(set(['smuzaffar','felicepantaleo'] + REQUEST_BUILD_RELEASE)) -PR_HOLD_MANAGERS = [ ] +TRIGGER_PR_TESTS = list(set(["smuzaffar", "felicepantaleo"] + REQUEST_BUILD_RELEASE)) +PR_HOLD_MANAGERS = [] -COMMON_CATEGORIES = [ "tests", "code-checks" ] -EXTERNAL_CATEGORIES = [ "externals" ] +COMMON_CATEGORIES = ["tests", "code-checks"] +EXTERNAL_CATEGORIES = ["externals"] EXTERNAL_REPOS = [] -CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ] -CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ] +CMSSW_REPOS = [gh_user + "/" + gh_cmssw] +CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist] CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1)) COMPARISON_MISSING_MAP = [] -#github_user:[list of categories] +# github_user:[list of categories] CMSSW_L2 = { - CMSBUILD_USER : ["tests", "code-checks" ], - 'fwyzard' : list(CMSSW_CATEGORIES.keys()), + CMSBUILD_USER: ["tests", "code-checks"], + "fwyzard": list(CMSSW_CATEGORIES.keys()), } USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys())) diff --git a/repos/cms_patatrack/cmssw/releases.py b/repos/cms_patatrack/cmssw/releases.py index a69f01107464..90bd5636bdff 100644 --- a/repos/cms_patatrack/cmssw/releases.py +++ b/repos/cms_patatrack/cmssw/releases.py @@ -1,13 +1,14 @@ -#Default development branch +# Default development branch # Changes from master branch will be merge in to it # Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch) # For new release cycle just change this and make sure to add its milestone and production branches CMSSW_DEVEL_BRANCH = "CMSSW_10_1_X" -RELEASE_BRANCH_MILESTONE={} -RELEASE_BRANCH_CLOSED=[] -RELEASE_BRANCH_PRODUCTION=[] -SPECIAL_RELEASE_MANAGERS=[] -RELEASE_MANAGERS={} -USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ]) - +RELEASE_BRANCH_MILESTONE = {} +RELEASE_BRANCH_CLOSED = [] +RELEASE_BRANCH_PRODUCTION = [] +SPECIAL_RELEASE_MANAGERS = [] +RELEASE_MANAGERS = {} +USERS_TO_TRIGGER_HOOKS = set( + SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel] +) diff --git a/repos/cms_patatrack/cmssw/repo_config.py b/repos/cms_patatrack/cmssw/repo_config.py index 21e6f7d51659..bf5e7c4e978c 100644 --- a/repos/cms_patatrack/cmssw/repo_config.py +++ b/repos/cms_patatrack/cmssw/repo_config.py @@ -1,36 +1,36 @@ -from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER -from os.path import basename,dirname,abspath -#GH read/write token: Use default ~/.github-token-cmsbot -GH_TOKEN="~/.github-token-cmsbot" -#GH readonly token: Use default ~/.github-token-readonly -GH_TOKEN_READONLY="~/.github-token-readonly" -CONFIG_DIR=dirname(abspath(__file__)) -#GH bot user: Use default cmsbot -CMSBUILD_USER="cmsbot" -GH_REPO_ORGANIZATION="cms-patatrack" -GH_REPO_FULLNAME="cms-patatrack/cmssw" -CREATE_EXTERNAL_ISSUE=False -#Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins -JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins" -#GH Web hook pass phrase. This is encrypeted used bot keys. -GITHUB_WEBHOOK_TOKEN='''U2FsdGVkX19C9pvh4GUbgDDUy0G9tSJZu7pFoQ0QodGMQtb/h4AFOKPsBxKlORAz -KXg7+k1B6egPueUzlaJ9BA==''' -#Set to True if you want bot to add build/test labels to your repo -ADD_LABELS=True -#Set to True if you want bot to add GH webhooks. cmsbot needs admin rights -ADD_WEB_HOOK=False -#List of issues/pr which bot should ignore +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER +from os.path import basename, dirname, abspath + +# GH read/write token: Use default ~/.github-token-cmsbot +GH_TOKEN = "~/.github-token-cmsbot" +# GH readonly token: Use default ~/.github-token-readonly +GH_TOKEN_READONLY = "~/.github-token-readonly" +CONFIG_DIR = dirname(abspath(__file__)) +# GH bot user: Use default cmsbot +CMSBUILD_USER = "cmsbot" +GH_REPO_ORGANIZATION = "cms-patatrack" +GH_REPO_FULLNAME = "cms-patatrack/cmssw" +CREATE_EXTERNAL_ISSUE = False +# Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins +JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins" +# GH Web hook pass phrase. This is encrypeted used bot keys. +GITHUB_WEBHOOK_TOKEN = """U2FsdGVkX19C9pvh4GUbgDDUy0G9tSJZu7pFoQ0QodGMQtb/h4AFOKPsBxKlORAz +KXg7+k1B6egPueUzlaJ9BA==""" +# Set to True if you want bot to add build/test labels to your repo +ADD_LABELS = True +# Set to True if you want bot to add GH webhooks. cmsbot needs admin rights +ADD_WEB_HOOK = False +# List of issues/pr which bot should ignore IGNORE_ISSUES = [] -#Set the Jenkins slave label is your tests needs special machines to run. -JENKINS_SLAVE_LABEL="slc7_amd64 && GPU" -#For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests -CMS_STANDARD_TESTS=True -#Map your branches with cmssw branches for tests -#User Branch => CMSSW/CMSDIST Bracnh -CMS_BRANCH_MAP={ - 'CMSSW_10_1_X_Patatrack' : 'CMSSW_10_1_X', - 'CMSSW_10_2_X_Patatrack' : 'CMSSW_10_2_X' +# Set the Jenkins slave label is your tests needs special machines to run. +JENKINS_SLAVE_LABEL = "slc7_amd64 && GPU" +# For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests +CMS_STANDARD_TESTS = True +# Map your branches with cmssw branches for tests +# User Branch => CMSSW/CMSDIST Bracnh +CMS_BRANCH_MAP = { + "CMSSW_10_1_X_Patatrack": "CMSSW_10_1_X", + "CMSSW_10_2_X_Patatrack": "CMSSW_10_2_X", } -#Valid Web hooks e.g. '.+' to match all event -VALID_WEB_HOOKS=['.+'] - +# Valid Web hooks e.g. '.+' to match all event +VALID_WEB_HOOKS = [".+"] diff --git a/repos/dmwm/CRABServer/repo_config.py b/repos/dmwm/CRABServer/repo_config.py index 6c9f652f865c..ccd51bda5457 100644 --- a/repos/dmwm/CRABServer/repo_config.py +++ b/repos/dmwm/CRABServer/repo_config.py @@ -1,31 +1,31 @@ -from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER -from os.path import basename,dirname,abspath -#GH read/write token: Use default ~/.github-token-cmsbot -GH_TOKEN="~/.github-token-cmsdmwmbot" -#GH readonly token: Use default ~/.github-token-readonly -GH_TOKEN_READONLY="~/.github-token-cmsdmwmbot" -CONFIG_DIR=dirname(abspath(__file__)) -#GH bot user: Use default cmsbot -CMSBUILD_USER="cmsdmwmbot" -GH_REPO_ORGANIZATION=basename(dirname(CONFIG_DIR)) -GH_REPO_FULLNAME="dmwm/CRABServer" -CREATE_EXTERNAL_ISSUE=False -#Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins -JENKINS_SERVER="http://cmsjenkins11.cern.ch:8080/dmwm-jenkins" -#Set to True if you want bot to add build/test labels to your repo -ADD_LABELS=False -#Set to True if you want bot to add GH webhooks. cmsbot needs admin rights -ADD_WEB_HOOK=False -#List of issues/pr which bot should ignore +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER +from os.path import basename, dirname, abspath + +# GH read/write token: Use default ~/.github-token-cmsbot +GH_TOKEN = "~/.github-token-cmsdmwmbot" +# GH readonly token: Use default ~/.github-token-readonly +GH_TOKEN_READONLY = "~/.github-token-cmsdmwmbot" +CONFIG_DIR = dirname(abspath(__file__)) +# GH bot user: Use default cmsbot +CMSBUILD_USER = "cmsdmwmbot" +GH_REPO_ORGANIZATION = basename(dirname(CONFIG_DIR)) +GH_REPO_FULLNAME = "dmwm/CRABServer" +CREATE_EXTERNAL_ISSUE = False +# Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins +JENKINS_SERVER = "http://cmsjenkins11.cern.ch:8080/dmwm-jenkins" +# Set to True if you want bot to add build/test labels to your repo +ADD_LABELS = False +# Set to True if you want bot to add GH webhooks. cmsbot needs admin rights +ADD_WEB_HOOK = False +# List of issues/pr which bot should ignore IGNORE_ISSUES = [10] -#Set the Jenkins slave label is your tests needs special machines to run. -JENKINS_SLAVE_LABEL="" -#For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests -CMS_STANDARD_TESTS=True -#Map your branches with cmssw branches for tests -#User Branch => CMSSW/CMSDIST Bracnh -CMS_BRANCH_MAP={ -} -#Valid Web hooks e.g. '.+' to match all event -VALID_WEB_HOOKS=['release', 'workflow_dispatch'] -WEBHOOK_PAYLOAD=True +# Set the Jenkins slave label is your tests needs special machines to run. +JENKINS_SLAVE_LABEL = "" +# For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests +CMS_STANDARD_TESTS = True +# Map your branches with cmssw branches for tests +# User Branch => CMSSW/CMSDIST Bracnh +CMS_BRANCH_MAP = {} +# Valid Web hooks e.g. '.+' to match all event +VALID_WEB_HOOKS = ["release", "workflow_dispatch"] +WEBHOOK_PAYLOAD = True diff --git a/repos/smuzaffar/SCRAM/repo_config.py b/repos/smuzaffar/SCRAM/repo_config.py index 40b9c8f7a1ea..264cc90f5162 100644 --- a/repos/smuzaffar/SCRAM/repo_config.py +++ b/repos/smuzaffar/SCRAM/repo_config.py @@ -1,9 +1,9 @@ -from os.path import basename,dirname,abspath +from os.path import basename, dirname, abspath -CONFIG_DIR=dirname(abspath(__file__)) -GITHUB_WEBHOOK_TOKEN='U2FsdGVkX1+8ckT0H3wKIUb59hZQrF5PZ2VlBxYyFek=' -RUN_DEFAULT_CMS_BOT=False +CONFIG_DIR = dirname(abspath(__file__)) +GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX1+8ckT0H3wKIUb59hZQrF5PZ2VlBxYyFek=" +RUN_DEFAULT_CMS_BOT = False -VALID_WEB_HOOKS=['.*'] -WEBHOOK_PAYLOAD=True -JENKINS_SERVER="http://cmsjenkins11.cern.ch:8080/dmwm-jenkins" +VALID_WEB_HOOKS = [".*"] +WEBHOOK_PAYLOAD = True +JENKINS_SERVER = "http://cmsjenkins11.cern.ch:8080/dmwm-jenkins" diff --git a/repos/smuzaffar/cmssw/categories.py b/repos/smuzaffar/cmssw/categories.py index 3cc2400f831b..b03ba39a16eb 100644 --- a/repos/smuzaffar/cmssw/categories.py +++ b/repos/smuzaffar/cmssw/categories.py @@ -5,24 +5,24 @@ from categories_map import CMSSW_CATEGORIES CMSSW_L1 = [] -APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1)) +APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1)) REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE TRIGGER_PR_TESTS = list(set([] + REQUEST_BUILD_RELEASE)) -PR_HOLD_MANAGERS = [ ] +PR_HOLD_MANAGERS = [] -COMMON_CATEGORIES = [ "tests", "code-checks" ] -EXTERNAL_CATEGORIES = [ "externals" ] +COMMON_CATEGORIES = ["tests", "code-checks"] +EXTERNAL_CATEGORIES = ["externals"] EXTERNAL_REPOS = [] -CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ] -CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ] +CMSSW_REPOS = [gh_user + "/" + gh_cmssw] +CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist] CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1)) COMPARISON_MISSING_MAP = [] -#github_user:[list of categories] +# github_user:[list of categories] CMSSW_L2 = { - CMSBUILD_USER : ["tests", "code-checks" ], - gh_user : list(CMSSW_CATEGORIES.keys()), + CMSBUILD_USER: ["tests", "code-checks"], + gh_user: list(CMSSW_CATEGORIES.keys()), } USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys())) diff --git a/repos/smuzaffar/cmssw/releases.py b/repos/smuzaffar/cmssw/releases.py index 776ad15456f3..6beacaeea30a 100644 --- a/repos/smuzaffar/cmssw/releases.py +++ b/repos/smuzaffar/cmssw/releases.py @@ -1,13 +1,14 @@ -#Default development branch +# Default development branch # Changes from master branch will be merge in to it # Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch) # For new release cycle just change this and make sure to add its milestone and production branches CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X" -RELEASE_BRANCH_MILESTONE={} -RELEASE_BRANCH_CLOSED=[] -RELEASE_BRANCH_PRODUCTION=[] -SPECIAL_RELEASE_MANAGERS=[] -RELEASE_MANAGERS={} -USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ]) - +RELEASE_BRANCH_MILESTONE = {} +RELEASE_BRANCH_CLOSED = [] +RELEASE_BRANCH_PRODUCTION = [] +SPECIAL_RELEASE_MANAGERS = [] +RELEASE_MANAGERS = {} +USERS_TO_TRIGGER_HOOKS = set( + SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel] +) diff --git a/repos/smuzaffar/cmssw/repo_config.py b/repos/smuzaffar/cmssw/repo_config.py index dcfdc19cac37..00f477b7e2c8 100644 --- a/repos/smuzaffar/cmssw/repo_config.py +++ b/repos/smuzaffar/cmssw/repo_config.py @@ -1,32 +1,32 @@ -from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER -from os.path import basename,dirname,abspath -#GH read/write token: Use default ~/.github-token-cmsbot -GH_TOKEN="~/.github-token-cmsbot" -#GH readonly token: Use default ~/.github-token-readonly -GH_TOKEN_READONLY="~/.github-token-readonly" -CONFIG_DIR=dirname(abspath(__file__)) -#GH bot user: Use default cmsbot -CMSBUILD_USER="cmsbot" -GH_REPO_ORGANIZATION=basename(dirname(CONFIG_DIR)) -GH_REPO_FULLNAME="smuzaffar/cmssw" -CREATE_EXTERNAL_ISSUE=False -#Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins -JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins" -#GH Web hook pass phrase. This is encrypeted used bot keys. -GITHUB_WEBHOOK_TOKEN='U2FsdGVkX1+GEHdp/Cmu73+ctvrzSGXc9OvL+8bZyjOe6ZPkqr/GIPgpJHiEp+hR' -#Set to True if you want bot to add build/test labels to your repo -ADD_LABELS=False -#Set to True if you want bot to add GH webhooks. cmsbot needs admin rights -ADD_WEB_HOOK=False -#List of issues/pr which bot should ignore +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER +from os.path import basename, dirname, abspath + +# GH read/write token: Use default ~/.github-token-cmsbot +GH_TOKEN = "~/.github-token-cmsbot" +# GH readonly token: Use default ~/.github-token-readonly +GH_TOKEN_READONLY = "~/.github-token-readonly" +CONFIG_DIR = dirname(abspath(__file__)) +# GH bot user: Use default cmsbot +CMSBUILD_USER = "cmsbot" +GH_REPO_ORGANIZATION = basename(dirname(CONFIG_DIR)) +GH_REPO_FULLNAME = "smuzaffar/cmssw" +CREATE_EXTERNAL_ISSUE = False +# Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins +JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins" +# GH Web hook pass phrase. This is encrypeted used bot keys. +GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX1+GEHdp/Cmu73+ctvrzSGXc9OvL+8bZyjOe6ZPkqr/GIPgpJHiEp+hR" +# Set to True if you want bot to add build/test labels to your repo +ADD_LABELS = False +# Set to True if you want bot to add GH webhooks. cmsbot needs admin rights +ADD_WEB_HOOK = False +# List of issues/pr which bot should ignore IGNORE_ISSUES = [10] -#Set the Jenkins slave label is your tests needs special machines to run. -JENKINS_SLAVE_LABEL="" -#For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests -CMS_STANDARD_TESTS=True -#Map your branches with cmssw branches for tests -#User Branch => CMSSW/CMSDIST Bracnh -CMS_BRANCH_MAP={ -} -#Valid Web hooks e.g. '.+' to match all event -VALID_WEB_HOOKS=['.+'] +# Set the Jenkins slave label is your tests needs special machines to run. +JENKINS_SLAVE_LABEL = "" +# For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests +CMS_STANDARD_TESTS = True +# Map your branches with cmssw branches for tests +# User Branch => CMSSW/CMSDIST Bracnh +CMS_BRANCH_MAP = {} +# Valid Web hooks e.g. '.+' to match all event +VALID_WEB_HOOKS = [".+"] diff --git a/repos/smuzaffar/int_build/categories.py b/repos/smuzaffar/int_build/categories.py index d48a94fbde15..14c32ed77c74 100644 --- a/repos/smuzaffar/int_build/categories.py +++ b/repos/smuzaffar/int_build/categories.py @@ -4,28 +4,28 @@ from repo_config import CMSBUILD_USER CMSSW_L1 = [] -APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1)) +APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1)) REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE TRIGGER_PR_TESTS = list(set([] + REQUEST_BUILD_RELEASE)) -PR_HOLD_MANAGERS = [ ] +PR_HOLD_MANAGERS = [] -COMMON_CATEGORIES = [ "tests", "code-checks" ] -EXTERNAL_CATEGORIES = [ "externals" ] +COMMON_CATEGORIES = ["tests", "code-checks"] +EXTERNAL_CATEGORIES = ["externals"] EXTERNAL_REPOS = [] -CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ] -CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ] +CMSSW_REPOS = [gh_user + "/" + gh_cmssw] +CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist] CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1)) COMPARISON_MISSING_MAP = [] -#github_user:[list of categories] +# github_user:[list of categories] CMSSW_L2 = { - CMSBUILD_USER : ["tests", "code-checks" ], - gh_user : [gh_user], + CMSBUILD_USER: ["tests", "code-checks"], + gh_user: [gh_user], } -CMSSW_CATEGORIES={ - gh_user: [gh_user], +CMSSW_CATEGORIES = { + gh_user: [gh_user], } USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys())) diff --git a/repos/smuzaffar/int_build/releases.py b/repos/smuzaffar/int_build/releases.py index 776ad15456f3..6beacaeea30a 100644 --- a/repos/smuzaffar/int_build/releases.py +++ b/repos/smuzaffar/int_build/releases.py @@ -1,13 +1,14 @@ -#Default development branch +# Default development branch # Changes from master branch will be merge in to it # Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch) # For new release cycle just change this and make sure to add its milestone and production branches CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X" -RELEASE_BRANCH_MILESTONE={} -RELEASE_BRANCH_CLOSED=[] -RELEASE_BRANCH_PRODUCTION=[] -SPECIAL_RELEASE_MANAGERS=[] -RELEASE_MANAGERS={} -USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ]) - +RELEASE_BRANCH_MILESTONE = {} +RELEASE_BRANCH_CLOSED = [] +RELEASE_BRANCH_PRODUCTION = [] +SPECIAL_RELEASE_MANAGERS = [] +RELEASE_MANAGERS = {} +USERS_TO_TRIGGER_HOOKS = set( + SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel] +) diff --git a/repos/smuzaffar/int_build/repo_config.py b/repos/smuzaffar/int_build/repo_config.py index c208c5c2b659..b0c045bc69ef 100644 --- a/repos/smuzaffar/int_build/repo_config.py +++ b/repos/smuzaffar/int_build/repo_config.py @@ -1,15 +1,19 @@ -from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER -from os.path import basename,dirname,abspath -GH_TOKEN="~/.github-token-cmsbot" -GH_TOKEN_READONLY="~/.github-token-readonly" -CONFIG_DIR=dirname(abspath(__file__)) -CMSBUILD_USER="cmsbot" -GH_REPO_ORGANIZATION=basename(dirname(CONFIG_DIR)) -GH_REPO_FULLNAME="smuzaffar/int-build" -CREATE_EXTERNAL_ISSUE=False -JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins" -GITHUB_WEBHOOK_TOKEN='U2FsdGVkX1+GEHdp/Cmu73+ctvrzSGXc9OvL+8bZyjOe6ZPkqr/GIPgpJHiEp+hR' -ADD_LABELS=False -ADD_WEB_HOOK=False +from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER +from os.path import basename, dirname, abspath + +GH_TOKEN = "~/.github-token-cmsbot" +GH_TOKEN_READONLY = "~/.github-token-readonly" +CONFIG_DIR = dirname(abspath(__file__)) +CMSBUILD_USER = "cmsbot" +GH_REPO_ORGANIZATION = basename(dirname(CONFIG_DIR)) +GH_REPO_FULLNAME = "smuzaffar/int-build" +CREATE_EXTERNAL_ISSUE = False +JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins" +GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX1+GEHdp/Cmu73+ctvrzSGXc9OvL+8bZyjOe6ZPkqr/GIPgpJHiEp+hR" +ADD_LABELS = False +ADD_WEB_HOOK = False IGNORE_ISSUES = [] -def file2Package(filename): return GH_REPO_ORGANIZATION + + +def file2Package(filename): + return GH_REPO_ORGANIZATION diff --git a/run-ib-addon.py b/run-ib-addon.py index 82d571cd852f..649613f54a46 100755 --- a/run-ib-addon.py +++ b/run-ib-addon.py @@ -13,14 +13,20 @@ from logUpdater import LogUpdater if ("CMSSW_BASE" not in environ) or ("SCRAM_ARCH" not in environ): - print("ERROR: Unable to file the release environment, please make sure you have set the cmssw environment before calling this script") - exit(1) + print( + "ERROR: Unable to file the release environment, please make sure you have set the cmssw environment before calling this script" + ) + exit(1) -timeout=7200 -try: timeout=int(argv[1]) -except: timeout=7200 +timeout = 7200 +try: + timeout = int(argv[1]) +except: + timeout = 7200 logger = LogUpdater(environ["CMSSW_BASE"]) -ret = doCmd('cd %s; rm -rf addOnTests; timeout %s addOnTests.py -j %s 2>&1 >addOnTests.log' % (environ["CMSSW_BASE"], timeout,cmsRunProcessCount)) -doCmd('cd '+environ["CMSSW_BASE"]+'/addOnTests/logs; zip -r addOnTests.zip *.log') +ret = doCmd( + "cd %s; rm -rf addOnTests; timeout %s addOnTests.py -j %s 2>&1 >addOnTests.log" + % (environ["CMSSW_BASE"], timeout, cmsRunProcessCount) +) +doCmd("cd " + environ["CMSSW_BASE"] + "/addOnTests/logs; zip -r addOnTests.zip *.log") logger.updateAddOnTestsLogs() - diff --git a/run-ib-relval.py b/run-ib-relval.py index 5ee819f717a1..1ca9ac1e8dae 100755 --- a/run-ib-relval.py +++ b/run-ib-relval.py @@ -19,82 +19,140 @@ from os.path import abspath, dirname import re, socket from time import time + SCRIPT_DIR = dirname(abspath(argv[0])) -def process_relvals(threads=None,cmssw_version=None,arch=None,cmssw_base=None,logger=None): - pass + +def process_relvals(threads=None, cmssw_version=None, arch=None, cmssw_base=None, logger=None): + pass + if __name__ == "__main__": - parser = OptionParser(usage="%prog -i|--id -l|--list ") - parser.add_option("-i", "--id", dest="jobid", help="Job Id e.g. 1of3", default="1of1") - parser.add_option("-l", "--list", dest="workflow", help="List of workflows to run e.g. 1.0,2.0,3.0 or -s", type=str, default=None) - parser.add_option("-n", "--dry-run",dest="dryRun", action="store_true", help="Do not upload results", default=False) - parser.add_option("-f", "--force",dest="force", help="Force running of workflows without checking the server for previous run", action="store_true", default=False) - parser.add_option("-N", "--non-threaded",dest="nonThreaded", action="store_true", help="Do not run in threaded mode", default=False) - parser.add_option("-J", "--job-config", dest="jobConfig", help="Extra arguments to pass to jobscheduler", type=str, default='') - opts, args = parser.parse_args() + parser = OptionParser(usage="%prog -i|--id -l|--list ") + parser.add_option("-i", "--id", dest="jobid", help="Job Id e.g. 1of3", default="1of1") + parser.add_option( + "-l", + "--list", + dest="workflow", + help="List of workflows to run e.g. 1.0,2.0,3.0 or -s", + type=str, + default=None, + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + action="store_true", + help="Do not upload results", + default=False, + ) + parser.add_option( + "-f", + "--force", + dest="force", + help="Force running of workflows without checking the server for previous run", + action="store_true", + default=False, + ) + parser.add_option( + "-N", + "--non-threaded", + dest="nonThreaded", + action="store_true", + help="Do not run in threaded mode", + default=False, + ) + parser.add_option( + "-J", + "--job-config", + dest="jobConfig", + help="Extra arguments to pass to jobscheduler", + type=str, + default="", + ) + opts, args = parser.parse_args() - if len(args) > 0: parser.error("Too many/few arguments") - if not opts.workflow: parser.error("Missing -l|--list argument.") - if ("CMSSW_VERSION" not in environ) or ("CMSSW_BASE" not in environ) or ("SCRAM_ARCH" not in environ): - print("ERROR: Unable to file the release environment, please make sure you have set the cmssw environment before calling this script") - exit(1) + if len(args) > 0: + parser.error("Too many/few arguments") + if not opts.workflow: + parser.error("Missing -l|--list argument.") + if ( + ("CMSSW_VERSION" not in environ) + or ("CMSSW_BASE" not in environ) + or ("SCRAM_ARCH" not in environ) + ): + print( + "ERROR: Unable to file the release environment, please make sure you have set the cmssw environment before calling this script" + ) + exit(1) + + if opts.dryRun: + environ["CMSSW_DRY_RUN"] = "true" + if opts.nonThreaded: + environ["CMSSW_NON_THREADED"] = "true" + elif "CMSSW_NON_THREADED" in environ: + del os.environ["CMSSW_NON_THREADED"] + thrds = cmsRunProcessCount + cmssw_ver = environ["CMSSW_VERSION"] + arch = environ["SCRAM_ARCH"] + cmssw_base = environ["CMSSW_BASE"] + logger = None + if not opts.dryRun: + logger = LogUpdater(dirIn=cmssw_base) + if logger and not opts.force: + doneWFs = logger.getDoneRelvals() + print("Already done workflows: ", doneWFs) + wfs = opts.workflow.split(",") + opts.workflow = ",".join([w for w in wfs if (w not in doneWFs)]) + print("Workflow to run:", opts.workflow) + else: + print("Force running all workflows") - if opts.dryRun: environ["CMSSW_DRY_RUN"]="true" - if opts.nonThreaded: environ["CMSSW_NON_THREADED"]="true" - elif "CMSSW_NON_THREADED" in environ: del os.environ['CMSSW_NON_THREADED'] - thrds = cmsRunProcessCount - cmssw_ver = environ["CMSSW_VERSION"] - arch = environ["SCRAM_ARCH"] - cmssw_base = environ["CMSSW_BASE"] - logger=None - if not opts.dryRun: logger=LogUpdater(dirIn=cmssw_base) - if logger and not opts.force: - doneWFs = logger.getDoneRelvals() - print("Already done workflows: ",doneWFs) - wfs = opts.workflow.split(",") - opts.workflow = ",".join([w for w in wfs if (w not in doneWFs)]) - print("Workflow to run:",opts.workflow) - else: - print("Force running all workflows") + if re.match("^CMSSW_(9_([3-9]|[1-9][0-9]+)|[1-9][0-9]+)_.*$", cmssw_ver): + e = 0 + if opts.workflow: + stime = time() + p = Popen("%s/jobs/create-relval-jobs.py %s" % (SCRIPT_DIR, opts.workflow), shell=True) + e = waitpid(p.pid, 0)[1] + print("Time took to create jobs:", int(time() - stime), "sec") + if e: + exit(e) - if re.match("^CMSSW_(9_([3-9]|[1-9][0-9]+)|[1-9][0-9]+)_.*$",cmssw_ver): - e=0 - if opts.workflow: - stime = time() - p=Popen("%s/jobs/create-relval-jobs.py %s" % (SCRIPT_DIR, opts.workflow),shell=True) - e=waitpid(p.pid,0)[1] - print("Time took to create jobs:",int(time()-stime),"sec") - if e: exit(e) + p = None + stime = time() + xopt = "-c 150 -m 85" + if "lxplus" in socket.gethostname(): + xopt = "-c 120 -m 40" + p = Popen( + "cd %s/pyRelval ; %s/jobs/jobscheduler.py -M 0 %s -o time %s" + % (cmssw_base, SCRIPT_DIR, xopt, opts.jobConfig), + shell=True, + ) + e = waitpid(p.pid, 0)[1] + print("Time took to create jobs:", int(time() - stime), "sec") + else: + print("No workflow to run.") + system("touch " + cmssw_base + "/done." + opts.jobid) + if logger: + logger.updateRelValMatrixPartialLogs(cmssw_base, "done." + opts.jobid) + exit(e) - p = None - stime = time() - xopt="-c 150 -m 85" - if "lxplus" in socket.gethostname(): - xopt="-c 120 -m 40" - p = Popen("cd %s/pyRelval ; %s/jobs/jobscheduler.py -M 0 %s -o time %s" % (cmssw_base,SCRIPT_DIR,xopt,opts.jobConfig), shell=True) - e=waitpid(p.pid,0)[1] - print("Time took to create jobs:",int(time()-stime),"sec") + if isThreaded(cmssw_ver, arch): + print("Threaded IB Found") + thrds = int(MachineMemoryGB / 4.5) + if thrds == 0: + thrds = 1 + elif "fc24_ppc64le_" in arch: + print("FC22 IB Found") + thrds = int(MachineMemoryGB / 4) + elif "fc24_ppc64le_" in arch: + print("CentOS 7.2 + PPC64LE Found") + thrds = int(MachineMemoryGB / 3) else: - print("No workflow to run.") - system("touch "+cmssw_base+"/done."+opts.jobid) - if logger: logger.updateRelValMatrixPartialLogs(cmssw_base, "done."+opts.jobid) - exit(e) - - if isThreaded(cmssw_ver,arch): - print("Threaded IB Found") - thrds=int(MachineMemoryGB/4.5) - if thrds==0: thrds=1 - elif "fc24_ppc64le_" in arch: - print("FC22 IB Found") - thrds=int(MachineMemoryGB/4) - elif "fc24_ppc64le_" in arch: - print("CentOS 7.2 + PPC64LE Found") - thrds=int(MachineMemoryGB/3) - else: - print("Normal IB Found") - if thrds>cmsRunProcessCount: thrds=cmsRunProcessCount - known_errs = get_known_errors(cmssw_ver, arch, "relvals") - matrix = PyRelValsThread(thrds, cmssw_base+"/pyRelval", opts.jobid) - matrix.setArgs(GetMatrixOptions(cmssw_ver,arch)) - matrix.run_workflows(opts.workflow.split(","),logger,known_errors=known_errs) + print("Normal IB Found") + if thrds > cmsRunProcessCount: + thrds = cmsRunProcessCount + known_errs = get_known_errors(cmssw_ver, arch, "relvals") + matrix = PyRelValsThread(thrds, cmssw_base + "/pyRelval", opts.jobid) + matrix.setArgs(GetMatrixOptions(cmssw_ver, arch)) + matrix.run_workflows(opts.workflow.split(","), logger, known_errors=known_errs) diff --git a/runPyRelValThread.py b/runPyRelValThread.py index c977a624a715..8e6b214c721b 100755 --- a/runPyRelValThread.py +++ b/runPyRelValThread.py @@ -8,300 +8,399 @@ import json from logreaderUtils import transform_and_write_config_file, add_exception_to_config -def runStep1Only(basedir, workflow, args=''): - args = FixWFArgs (os.environ["CMSSW_VERSION"],os.environ["SCRAM_ARCH"],workflow,args) - workdir = os.path.join(basedir, workflow) - matrixCmd = 'runTheMatrix.py --maxSteps=0 -l ' + workflow +' '+args - try: - if not os.path.isdir(workdir): - os.makedirs(workdir) - except Exception as e: - print("runPyRelVal> ERROR during test PyReleaseValidation steps, workflow "+str(workflow)+" : can't create thread folder: " + str(e)) - try: - ret = doCmd(matrixCmd, False, workdir) - except Exception as e: - print("runPyRelVal> ERROR during test PyReleaseValidation steps, workflow "+str(workflow)+" : caught exception: " + str(e)) - return -def runThreadMatrix(basedir, workflow, args='', logger=None, wf_err={}): - args = FixWFArgs (os.environ["CMSSW_VERSION"],os.environ["SCRAM_ARCH"],workflow,args) - workdir = os.path.join(basedir, workflow) - matrixCmd = 'runTheMatrix.py -l ' + workflow +' '+args - try: - if not os.path.isdir(workdir): - os.makedirs(workdir) - except Exception as e: - print("runPyRelVal> ERROR during test PyReleaseValidation, workflow "+str(workflow)+" : can't create thread folder: " + str(e)) - wftime = time.time() - try: - ret = doCmd(matrixCmd, False, workdir) - except Exception as e: - print("runPyRelVal> ERROR during test PyReleaseValidation, workflow "+str(workflow)+" : caught exception: " + str(e)) - wftime = time.time() - wftime - outfolders = [file for file in os.listdir(workdir) if re.match("^" + str(workflow) + "_", file)] - if len(outfolders)==0: return - outfolder = os.path.join(basedir,outfolders[0]) - wfdir = os.path.join(workdir,outfolders[0]) - ret = doCmd("rm -rf " + outfolder + "; mkdir -p " + outfolder) - ret = doCmd("find . -mindepth 1 -maxdepth 1 -name '*.xml' -o -name '*.log' -o -name '*.py' -o -name '*.json' -o -name 'cmdLog' -type f | xargs -i mv '{}' "+outfolder+"/", False, wfdir) - logRE = re.compile('^(.*/[0-9]+(\.[0-9]+|)_([^/]+))/step1_dasquery.log$') - for logFile in glob.glob(outfolder+"/step1_dasquery.log"): - m = logRE.match(logFile) - if not m : continue - ret = doCmd ("cp "+logFile+" "+m.group(1)+"/step1_"+m.group(3)+".log") - ret = doCmd("mv "+os.path.join(workdir,"runall-report-step*.log")+" "+os.path.join(outfolder,"workflow.log")) - ret = doCmd("echo " + str(wftime) +" > " + os.path.join(outfolder,"time.log")) - ret = doCmd("hostname -s > " + os.path.join(outfolder,"hostname")) - if wf_err: json.dump(wf_err, open("%s/known_error.json" % outfolder,"w")) - if logger: logger.updateRelValMatrixPartialLogs(basedir, outfolders[0]) - shutil.rmtree(workdir) - return +def runStep1Only(basedir, workflow, args=""): + args = FixWFArgs(os.environ["CMSSW_VERSION"], os.environ["SCRAM_ARCH"], workflow, args) + workdir = os.path.join(basedir, workflow) + matrixCmd = "runTheMatrix.py --maxSteps=0 -l " + workflow + " " + args + try: + if not os.path.isdir(workdir): + os.makedirs(workdir) + except Exception as e: + print( + "runPyRelVal> ERROR during test PyReleaseValidation steps, workflow " + + str(workflow) + + " : can't create thread folder: " + + str(e) + ) + try: + ret = doCmd(matrixCmd, False, workdir) + except Exception as e: + print( + "runPyRelVal> ERROR during test PyReleaseValidation steps, workflow " + + str(workflow) + + " : caught exception: " + + str(e) + ) + return + + +def runThreadMatrix(basedir, workflow, args="", logger=None, wf_err={}): + args = FixWFArgs(os.environ["CMSSW_VERSION"], os.environ["SCRAM_ARCH"], workflow, args) + workdir = os.path.join(basedir, workflow) + matrixCmd = "runTheMatrix.py -l " + workflow + " " + args + try: + if not os.path.isdir(workdir): + os.makedirs(workdir) + except Exception as e: + print( + "runPyRelVal> ERROR during test PyReleaseValidation, workflow " + + str(workflow) + + " : can't create thread folder: " + + str(e) + ) + wftime = time.time() + try: + ret = doCmd(matrixCmd, False, workdir) + except Exception as e: + print( + "runPyRelVal> ERROR during test PyReleaseValidation, workflow " + + str(workflow) + + " : caught exception: " + + str(e) + ) + wftime = time.time() - wftime + outfolders = [ + file for file in os.listdir(workdir) if re.match("^" + str(workflow) + "_", file) + ] + if len(outfolders) == 0: + return + outfolder = os.path.join(basedir, outfolders[0]) + wfdir = os.path.join(workdir, outfolders[0]) + ret = doCmd("rm -rf " + outfolder + "; mkdir -p " + outfolder) + ret = doCmd( + "find . -mindepth 1 -maxdepth 1 -name '*.xml' -o -name '*.log' -o -name '*.py' -o -name '*.json' -o -name 'cmdLog' -type f | xargs -i mv '{}' " + + outfolder + + "/", + False, + wfdir, + ) + logRE = re.compile("^(.*/[0-9]+(\.[0-9]+|)_([^/]+))/step1_dasquery.log$") + for logFile in glob.glob(outfolder + "/step1_dasquery.log"): + m = logRE.match(logFile) + if not m: + continue + ret = doCmd("cp " + logFile + " " + m.group(1) + "/step1_" + m.group(3) + ".log") + ret = doCmd( + "mv " + + os.path.join(workdir, "runall-report-step*.log") + + " " + + os.path.join(outfolder, "workflow.log") + ) + ret = doCmd("echo " + str(wftime) + " > " + os.path.join(outfolder, "time.log")) + ret = doCmd("hostname -s > " + os.path.join(outfolder, "hostname")) + if wf_err: + json.dump(wf_err, open("%s/known_error.json" % outfolder, "w")) + if logger: + logger.updateRelValMatrixPartialLogs(basedir, outfolders[0]) + shutil.rmtree(workdir) + return + def find_argv(args, arg): - val="" - fullval = "" - reX = re.compile('\s*(('+arg+')(\s+|=)([^ ]+))') - m=reX.search(args) - if m: glen = len(m.groups()) - while m: - fullval = m.group(1) - val = m.group(glen) - args = args.replace(fullval,"") - m=reX.search(args) - return (args, fullval, val) + val = "" + fullval = "" + reX = re.compile("\s*((" + arg + ")(\s+|=)([^ ]+))") + m = reX.search(args) + if m: + glen = len(m.groups()) + while m: + fullval = m.group(1) + val = m.group(glen) + args = args.replace(fullval, "") + m = reX.search(args) + return (args, fullval, val) + def splitWorkflows(workflows, max_wf_pre_set): - print(workflows) - avg_t = sum ([ x[1] for x in workflows ] ) / len(workflows) - wf_max = len(workflows) - wf_pre_set = wf_max - wf_sets = 1 - while (wf_pre_set > max_wf_pre_set): - wf_sets=wf_sets+1 - wf_pre_set = int(wf_max/wf_sets) - long_wf=int(wf_pre_set/2) - short_wf=wf_pre_set-long_wf - merged = [] - for i in range (1, wf_sets): - wf_count = len(workflows) - sub_set=workflows[0:long_wf]+workflows[-short_wf:] - new_avg = sum([ x[1] for x in sub_set])/len(sub_set) - new_index=0 - while (new_avg > avg_t) and (new_index max_wf_pre_set: + wf_sets = wf_sets + 1 + wf_pre_set = int(wf_max / wf_sets) + long_wf = int(wf_pre_set / 2) + short_wf = wf_pre_set - long_wf + merged = [] + for i in range(1, wf_sets): + wf_count = len(workflows) + sub_set = workflows[0:long_wf] + workflows[-short_wf:] + new_avg = sum([x[1] for x in sub_set]) / len(sub_set) + new_index = 0 + while (new_avg > avg_t) and (new_index < long_wf): + new_index += 1 + sub_set = workflows[0 : long_wf - new_index] + workflows[-short_wf - new_index :] + new_avg = sum([x[1] for x in sub_set]) / len(sub_set) + merged.append([x[0] for x in sub_set]) + workflows = workflows[long_wf - new_index : wf_count - short_wf - new_index] + merged.append([x[0] for x in workflows]) + return merged + class PyRelValsThread(object): - def __init__(self, jobs, basedir, jobid="1of1", outdir=None): - if not outdir: outdir = basedir - self.jobs = jobs - self.basedir = basedir - self.jobid=jobid - self.outdir = outdir - self.args = {} - self.setArgs("") + def __init__(self, jobs, basedir, jobid="1of1", outdir=None): + if not outdir: + outdir = basedir + self.jobs = jobs + self.basedir = basedir + self.jobid = jobid + self.outdir = outdir + self.args = {} + self.setArgs("") - def setArgs(self, args): - args = args.replace('\\"','"') - args, self.args['w'], tmp = find_argv(args,"-w|--what") - args, self.args['l'], tmp = find_argv(args,"-l|--list") - args, self.args['j'], tmp = find_argv(args,"-j|--nproc") - if ' -s ' in args: - self.args['s']='-s' - args = args.replace(' -s ','') - else: self.args['s']= "" - self.args['rest'] = args + def setArgs(self, args): + args = args.replace('\\"', '"') + args, self.args["w"], tmp = find_argv(args, "-w|--what") + args, self.args["l"], tmp = find_argv(args, "-l|--list") + args, self.args["j"], tmp = find_argv(args, "-j|--nproc") + if " -s " in args: + self.args["s"] = "-s" + args = args.replace(" -s ", "") + else: + self.args["s"] = "" + self.args["rest"] = args - def getWorkFlows(self, args): - self.setArgs(args) - workflowsCmd = "runTheMatrix.py -n "+self.args['w']+" "+self.args['s']+" "+self.args['l']+" | grep -v ' workflows with ' | grep -E '^[0-9][0-9]*(\.[0-9][0-9]*|)\s\s*' | sort -nr | awk '{print $1}'" - print("RunTheMatrix>>",workflowsCmd) - cmsstat, workflows = doCmd(workflowsCmd) - if not cmsstat: - return workflows.split("\n") - print("runPyRelVal> ERROR during test PyReleaseValidation : could not get output of " + workflowsCmd) - return [] + def getWorkFlows(self, args): + self.setArgs(args) + workflowsCmd = ( + "runTheMatrix.py -n " + + self.args["w"] + + " " + + self.args["s"] + + " " + + self.args["l"] + + " | grep -v ' workflows with ' | grep -E '^[0-9][0-9]*(\.[0-9][0-9]*|)\s\s*' | sort -nr | awk '{print $1}'" + ) + print("RunTheMatrix>>", workflowsCmd) + cmsstat, workflows = doCmd(workflowsCmd) + if not cmsstat: + return workflows.split("\n") + print( + "runPyRelVal> ERROR during test PyReleaseValidation : could not get output of " + + workflowsCmd + ) + return [] - def isNewRunTheMatrix(self): - e, o = doCmd("runTheMatrix.py --help | grep 'maxSteps=MAXSTEPS' | wc -l") - if e: return False - return o=="1" + def isNewRunTheMatrix(self): + e, o = doCmd("runTheMatrix.py --help | grep 'maxSteps=MAXSTEPS' | wc -l") + if e: + return False + return o == "1" - def getWorkflowSteps(self, workflows): - threads = [] - while(len(workflows) > 0): - threads = [t for t in threads if t.is_alive()] - if(len(threads) < self.jobs): - try: - t = threading.Thread(target=runStep1Only, args=(self.basedir, workflows.pop(), self.args['rest']+" "+self.args['w'])) - t.start() - threads.append(t) - except Exception as e: - print("runPyRelVal> ERROR threading matrix step1 : caught exception: " + str(e)) - for t in threads: t.join() - return + def getWorkflowSteps(self, workflows): + threads = [] + while len(workflows) > 0: + threads = [t for t in threads if t.is_alive()] + if len(threads) < self.jobs: + try: + t = threading.Thread( + target=runStep1Only, + args=( + self.basedir, + workflows.pop(), + self.args["rest"] + " " + self.args["w"], + ), + ) + t.start() + threads.append(t) + except Exception as e: + print( + "runPyRelVal> ERROR threading matrix step1 : caught exception: " + str(e) + ) + for t in threads: + t.join() + return - def run_workflows(self, workflows=[], logger=None, known_errors={}): - if not workflows: return - workflows = workflows[::-1] - threads = [] - while(len(workflows) > 0): - threads = [t for t in threads if t.is_alive()] - if(len(threads) < self.jobs): - try: - wf = workflows.pop() - wf_err = {} - if wf in known_errors: wf_err = known_errors[wf] - t = threading.Thread(target=runThreadMatrix, args=(self.basedir, wf, self.args['rest']+" "+self.args['w'], logger, wf_err)) - t.start() - threads.append(t) - except Exception as e: - print("runPyRelVal> ERROR threading matrix : caught exception: " + str(e)) - else: - time.sleep(5) - for t in threads: t.join() - ret, out = doCmd("touch "+self.basedir+"/done."+self.jobid) - if logger: logger.updateRelValMatrixPartialLogs(self.basedir, "done."+self.jobid) - return - - def update_runall(self): - self.update_known_errors() - runall = os.path.join(self.outdir,"runall-report-step123-.log") - outFile = open(runall+".tmp","w") - status_ok = [] - status_err = [] - len_ok = 0 - len_err = 0 - for logFile in glob.glob(self.basedir+'/*/workflow.log'): - inFile = open(logFile) - for line in inFile: - if re.match("^\s*(\d+\s+)+tests passed,\s+(\d+\s+)+failed\s*$",line): - res = line.strip().split(" tests passed, ") - res[0] = res[0].split() - res[1]=res[1].replace(" failed","").split() - len_res = len(res[0]) - if len_res>len_ok: - for i in range(len_ok,len_res): status_ok.append(0) - len_ok = len_res - for i in range(0,len_res): - status_ok[i]=status_ok[i]+int(res[0][i]) - len_res = len(res[1]) - if len_res>len_err: - for i in range(len_err,len_res): status_err.append(0) - len_err = len_res - for i in range(0,len_res): - status_err[i]=status_err[i]+int(res[1][i]) - else: outFile.write(line) - inFile.close() - outFile.write(" ".join(str(x) for x in status_ok)+" tests passed, "+" ".join(str(x) for x in status_err)+" failed\n") - outFile.close() - save = True - if os.path.exists(runall): - e, o = run_cmd("diff %s.tmp %s | wc -l" % (runall, runall)) - if o=="0": save=False - if save: run_cmd("mv %s.tmp %s" % (runall, runall)) - return + def run_workflows(self, workflows=[], logger=None, known_errors={}): + if not workflows: + return + workflows = workflows[::-1] + threads = [] + while len(workflows) > 0: + threads = [t for t in threads if t.is_alive()] + if len(threads) < self.jobs: + try: + wf = workflows.pop() + wf_err = {} + if wf in known_errors: + wf_err = known_errors[wf] + t = threading.Thread( + target=runThreadMatrix, + args=( + self.basedir, + wf, + self.args["rest"] + " " + self.args["w"], + logger, + wf_err, + ), + ) + t.start() + threads.append(t) + except Exception as e: + print("runPyRelVal> ERROR threading matrix : caught exception: " + str(e)) + else: + time.sleep(5) + for t in threads: + t.join() + ret, out = doCmd("touch " + self.basedir + "/done." + self.jobid) + if logger: + logger.updateRelValMatrixPartialLogs(self.basedir, "done." + self.jobid) + return - def update_known_errors(self): - known_errors = {} - for logFile in glob.glob(self.basedir+'/*/known_error.json'): - try: - wf = logFile.split("/")[-2].split("_")[0] - known_errors[wf] = json.load(open(logFile)) - except Exception as e: - print("ERROR:",e) - outFile = open(os.path.join(self.outdir,"all_known_errors.json"),"w") - json.dump(known_errors, outFile) - outFile.close() + def update_runall(self): + self.update_known_errors() + runall = os.path.join(self.outdir, "runall-report-step123-.log") + outFile = open(runall + ".tmp", "w") + status_ok = [] + status_err = [] + len_ok = 0 + len_err = 0 + for logFile in glob.glob(self.basedir + "/*/workflow.log"): + inFile = open(logFile) + for line in inFile: + if re.match("^\s*(\d+\s+)+tests passed,\s+(\d+\s+)+failed\s*$", line): + res = line.strip().split(" tests passed, ") + res[0] = res[0].split() + res[1] = res[1].replace(" failed", "").split() + len_res = len(res[0]) + if len_res > len_ok: + for i in range(len_ok, len_res): + status_ok.append(0) + len_ok = len_res + for i in range(0, len_res): + status_ok[i] = status_ok[i] + int(res[0][i]) + len_res = len(res[1]) + if len_res > len_err: + for i in range(len_err, len_res): + status_err.append(0) + len_err = len_res + for i in range(0, len_res): + status_err[i] = status_err[i] + int(res[1][i]) + else: + outFile.write(line) + inFile.close() + outFile.write( + " ".join(str(x) for x in status_ok) + + " tests passed, " + + " ".join(str(x) for x in status_err) + + " failed\n" + ) + outFile.close() + save = True + if os.path.exists(runall): + e, o = run_cmd("diff %s.tmp %s | wc -l" % (runall, runall)) + if o == "0": + save = False + if save: + run_cmd("mv %s.tmp %s" % (runall, runall)) + return - def update_wftime(self): - time_info = {} - for logFile in glob.glob(self.basedir+'/*/time.log'): - try: - wf = logFile.split("/")[-2].split("_")[0] - inFile = open(logFile) - line = inFile.readline().strip() - inFile.close() - m = re.match("^(\d+)(\.\d+|)$",line) - if m: time_info[wf]=int(m.group(1)) - except Exception as e: - print("ERROR:",e) - outFile = open(os.path.join(self.outdir,"relval-times.json"),"w") - json.dump(time_info, outFile) - outFile.close() + def update_known_errors(self): + known_errors = {} + for logFile in glob.glob(self.basedir + "/*/known_error.json"): + try: + wf = logFile.split("/")[-2].split("_")[0] + known_errors[wf] = json.load(open(logFile)) + except Exception as e: + print("ERROR:", e) + outFile = open(os.path.join(self.outdir, "all_known_errors.json"), "w") + json.dump(known_errors, outFile) + outFile.close() - def parseLog(self): - logData = {} - logRE = re.compile('^.*/([1-9][0-9]*(\.[0-9]+|))_[^/]+/step([1-9])_.*\.log$') - max_steps = 0 - for logFile in glob.glob(self.basedir+'/[1-9]*/step[0-9]*.log'): - m = logRE.match(logFile) - if not m: continue - wf = m.group(1) - step = int(m.group(3)) - if step>max_steps: max_steps=step - if wf not in logData: - logData[wf] = {'steps': {}, 'events' : [], 'failed' : [], 'warning' : []} - if step not in logData[wf]['steps']: - logData[wf]['steps'][step]=logFile - cache_read=0 - log_processed=0 - for wf in logData: - for k in logData[wf]: - if k == 'steps': continue - for s in range(0, max_steps): - logData[wf][k].append(-1) - index =0 - for step in sorted(logData[wf]['steps']): - data = [0, 0, 0] - logFile = logData[wf]['steps'][step] - json_cache = os.path.dirname(logFile)+"/logcache_"+str(step)+".json" - log_reader_config_path = logFile + "-read_config" - config_list = [] - cache_ok = False - if (os.path.exists(json_cache)) and (os.path.getmtime(logFile)<=os.path.getmtime(json_cache)): - try: - jfile = open(json_cache,"r") - data = json.load(jfile) - jfile.close() - cache_read+=1 - cache_ok = True - except: - os.remove(json_cache) - if not cache_ok: - try: - es_parse_log(logFile) - except Exception as e: - print("Sending log information to elasticsearch failed" , str(e)) - inFile = open(logFile) - for line_nr, line in enumerate(inFile): - config_list = add_exception_to_config(line, line_nr, config_list) - if '%MSG-w' in line: data[1]=data[1]+1 - if '%MSG-e' in line: data[2]=data[2]+1 - if 'Begin processing the ' in line: data[0]=data[0]+1 - inFile.close() - jfile = open(json_cache,"w") - json.dump(data,jfile) - jfile.close() - transform_and_write_config_file(log_reader_config_path, config_list) - log_processed+=1 - logData[wf]['events'][index] = data[0] - logData[wf]['failed'][index] = data[2] - logData[wf]['warning'][index] = data[1] - index+=1 - del logData[wf]['steps'] + def update_wftime(self): + time_info = {} + for logFile in glob.glob(self.basedir + "/*/time.log"): + try: + wf = logFile.split("/")[-2].split("_")[0] + inFile = open(logFile) + line = inFile.readline().strip() + inFile.close() + m = re.match("^(\d+)(\.\d+|)$", line) + if m: + time_info[wf] = int(m.group(1)) + except Exception as e: + print("ERROR:", e) + outFile = open(os.path.join(self.outdir, "relval-times.json"), "w") + json.dump(time_info, outFile) + outFile.close() - print("Log processed: ",log_processed) - print("Caches read:",cache_read) - from pickle import Pickler - outFile = open(os.path.join(self.outdir,'runTheMatrixMsgs.pkl'), 'wb') - pklFile = Pickler(outFile, protocol=2) - pklFile.dump(logData) - outFile.close() - return + def parseLog(self): + logData = {} + logRE = re.compile("^.*/([1-9][0-9]*(\.[0-9]+|))_[^/]+/step([1-9])_.*\.log$") + max_steps = 0 + for logFile in glob.glob(self.basedir + "/[1-9]*/step[0-9]*.log"): + m = logRE.match(logFile) + if not m: + continue + wf = m.group(1) + step = int(m.group(3)) + if step > max_steps: + max_steps = step + if wf not in logData: + logData[wf] = {"steps": {}, "events": [], "failed": [], "warning": []} + if step not in logData[wf]["steps"]: + logData[wf]["steps"][step] = logFile + cache_read = 0 + log_processed = 0 + for wf in logData: + for k in logData[wf]: + if k == "steps": + continue + for s in range(0, max_steps): + logData[wf][k].append(-1) + index = 0 + for step in sorted(logData[wf]["steps"]): + data = [0, 0, 0] + logFile = logData[wf]["steps"][step] + json_cache = os.path.dirname(logFile) + "/logcache_" + str(step) + ".json" + log_reader_config_path = logFile + "-read_config" + config_list = [] + cache_ok = False + if (os.path.exists(json_cache)) and ( + os.path.getmtime(logFile) <= os.path.getmtime(json_cache) + ): + try: + jfile = open(json_cache, "r") + data = json.load(jfile) + jfile.close() + cache_read += 1 + cache_ok = True + except: + os.remove(json_cache) + if not cache_ok: + try: + es_parse_log(logFile) + except Exception as e: + print("Sending log information to elasticsearch failed", str(e)) + inFile = open(logFile) + for line_nr, line in enumerate(inFile): + config_list = add_exception_to_config(line, line_nr, config_list) + if "%MSG-w" in line: + data[1] = data[1] + 1 + if "%MSG-e" in line: + data[2] = data[2] + 1 + if "Begin processing the " in line: + data[0] = data[0] + 1 + inFile.close() + jfile = open(json_cache, "w") + json.dump(data, jfile) + jfile.close() + transform_and_write_config_file(log_reader_config_path, config_list) + log_processed += 1 + logData[wf]["events"][index] = data[0] + logData[wf]["failed"][index] = data[2] + logData[wf]["warning"][index] = data[1] + index += 1 + del logData[wf]["steps"] + + print("Log processed: ", log_processed) + print("Caches read:", cache_read) + from pickle import Pickler + outFile = open(os.path.join(self.outdir, "runTheMatrixMsgs.pkl"), "wb") + pklFile = Pickler(outFile, protocol=2) + pklFile.dump(logData) + outFile.close() + return diff --git a/runTests.py b/runTests.py index 7fe14679befe..b99aab9e5e41 100755 --- a/runTests.py +++ b/runTests.py @@ -24,7 +24,7 @@ if scriptPath not in sys.path: sys.path.append(scriptPath) -sys.path.append(os.path.join(scriptPath,"python")) +sys.path.append(os.path.join(scriptPath, "python")) from cmsutils import doCmd, MachineCPUCount, getHostName @@ -34,7 +34,7 @@ # ================================================================================ def runCmd(cmd): - cmd = cmd.rstrip(';') + cmd = cmd.rstrip(";") print("Running cmd> ", cmd) ret, out = run_cmd(cmd) if out: @@ -87,6 +87,7 @@ def checkTestLogs(self): # -------------------------------------------------------------------------------- def checkUnitTestLog(self): import checkTestLog + print("unitTest>Going to check log file from unit-tests in ", self.startDir) # noinspection PyBroadException try: @@ -100,39 +101,57 @@ def checkUnitTestLog(self): # -------------------------------------------------------------------------------- def splitUnitTestLogs(self): import splitUnitTestLog + print("unitTest>Going to split log file from unit-tests in ", self.startDir) tls = splitUnitTestLog.LogSplitter(self.startDir + "/unitTests-summary.log", True) tls.split(self.startDir + "/unitTests.log") - runCmd('cd ' + self.startDir + '; zip -r unitTestLogs.zip unitTestLogs') + runCmd("cd " + self.startDir + "; zip -r unitTestLogs.zip unitTestLogs") return # -------------------------------------------------------------------------------- def run(self): IBThreadBase.run(self) - arch = os.environ['SCRAM_ARCH'] - if platform.system() == 'Darwin': - print('unitTest> Skipping unit tests for MacOS') + arch = os.environ["SCRAM_ARCH"] + if platform.system() == "Darwin": + print("unitTest> Skipping unit tests for MacOS") return - precmd="" + precmd = "" paralleJobs = MachineCPUCount - if ('_ASAN_X' in os.environ["CMSSW_VERSION"]) or ('_UBSAN_X' in os.environ["CMSSW_VERSION"]): - paralleJobs = int(MachineCPUCount/2) - if (self.xType == 'GPU') or ("_GPU_X" in os.environ["CMSSW_VERSION"]): - precmd="export USER_UNIT_TESTS=cuda ;" + if ("_ASAN_X" in os.environ["CMSSW_VERSION"]) or ( + "_UBSAN_X" in os.environ["CMSSW_VERSION"] + ): + paralleJobs = int(MachineCPUCount / 2) + if (self.xType == "GPU") or ("_GPU_X" in os.environ["CMSSW_VERSION"]): + precmd = "export USER_UNIT_TESTS=cuda ;" skiptests = "" - if 'lxplus' in getHostName(): - skiptests = 'SKIP_UNITTESTS=ExpressionEvaluatorUnitTest' - TEST_PATH = os.environ['CMSSW_RELEASE_BASE'] + "/test/" + arch + if "lxplus" in getHostName(): + skiptests = "SKIP_UNITTESTS=ExpressionEvaluatorUnitTest" + TEST_PATH = os.environ["CMSSW_RELEASE_BASE"] + "/test/" + arch err, cmd = run_cmd( - "cd " + self.startDir + ";scram tool info cmssw 2>&1 | grep CMSSW_BASE= | sed 's|^CMSSW_BASE=||'") + "cd " + + self.startDir + + ";scram tool info cmssw 2>&1 | grep CMSSW_BASE= | sed 's|^CMSSW_BASE=||'" + ) if cmd: TEST_PATH = TEST_PATH + ":" + cmd + "/test/" + arch try: - cmd = precmd+"cd " + self.startDir + r"; touch nodelete.root nodelete.txt nodelete.log; sed -i -e 's|testing.log; *$(CMD_rm) *-f *$($(1)_objdir)/testing.log;|testing.log;|;s|test $(1) had ERRORS\") *\&\&|test $(1) had ERRORS\" >> $($(1)_objdir)/testing.log) \&\&|' config/SCRAM/GMake/Makefile.rules; " - cmd += 'PATH=' + TEST_PATH + ':$PATH scram b -f -k -j ' + str( - paralleJobs) + ' unittests ' + skiptests + ' >unitTests1.log 2>&1 ; ' - cmd += 'touch nodelete.done; ls -l nodelete.*' - print('unitTest> Going to run ' + cmd) + cmd = ( + precmd + + "cd " + + self.startDir + + r"; touch nodelete.root nodelete.txt nodelete.log; sed -i -e 's|testing.log; *$(CMD_rm) *-f *$($(1)_objdir)/testing.log;|testing.log;|;s|test $(1) had ERRORS\") *\&\&|test $(1) had ERRORS\" >> $($(1)_objdir)/testing.log) \&\&|' config/SCRAM/GMake/Makefile.rules; " + ) + cmd += ( + "PATH=" + + TEST_PATH + + ":$PATH scram b -f -k -j " + + str(paralleJobs) + + " unittests " + + skiptests + + " >unitTests1.log 2>&1 ; " + ) + cmd += "touch nodelete.done; ls -l nodelete.*" + print("unitTest> Going to run " + cmd) ret = runCmd(cmd) if ret != 0: print("ERROR when running unit-tests: cmd returned " + str(ret)) @@ -141,16 +160,18 @@ def run(self): pass # noinspection PyBroadException try: - testLog = self.startDir + '/tmp/' + arch + '/src/' - logFile = self.startDir + '/unitTests.log' - runCmd('rm -f %s; touch %s' % (logFile, logFile)) - for packDir in glob.glob(testLog + '*/*'): - pack = packDir.replace(testLog, '') + testLog = self.startDir + "/tmp/" + arch + "/src/" + logFile = self.startDir + "/unitTests.log" + runCmd("rm -f %s; touch %s" % (logFile, logFile)) + for packDir in glob.glob(testLog + "*/*"): + pack = packDir.replace(testLog, "") runCmd("echo '>> Entering Package %s' >> %s" % (pack, logFile)) - packDir += '/test' + packDir += "/test" if os.path.exists(packDir): - err, testFiles = run_cmd('find ' + packDir + ' -maxdepth 2 -mindepth 2 -name testing.log -type f') - for lFile in testFiles.strip().split('\n'): + err, testFiles = run_cmd( + "find " + packDir + " -maxdepth 2 -mindepth 2 -name testing.log -type f" + ) + for lFile in testFiles.strip().split("\n"): if lFile: runCmd("cat %s >> %s" % (lFile, logFile)) runCmd("echo '>> Leaving Package %s' >> %s" % (pack, logFile)) @@ -164,6 +185,7 @@ def run(self): # ================================================================================ + class LibDepsTester(IBThreadBase): def __init__(self, startDirIn, Logger, deps=None): if deps is None: @@ -176,8 +198,17 @@ def __init__(self, startDirIn, Logger, deps=None): def run(self): IBThreadBase.run(self) - cmd = 'cd ' + self.startDir + ' ; ' + scriptPath + '/checkLibDeps.py -d ' + os.environ[ - "CMSSW_RELEASE_BASE"] + ' --plat ' + os.environ['SCRAM_ARCH'] + ' > chkLibDeps.log 2>&1' + cmd = ( + "cd " + + self.startDir + + " ; " + + scriptPath + + "/checkLibDeps.py -d " + + os.environ["CMSSW_RELEASE_BASE"] + + " --plat " + + os.environ["SCRAM_ARCH"] + + " > chkLibDeps.log 2>&1" + ) try: ret = runCmd(cmd) if ret != 0: @@ -187,12 +218,13 @@ def run(self): print(" cmd as of now : '" + cmd + "'") self.logger.updateLogFile("chkLibDeps.log") - self.logger.updateLogFile("libchk.pkl", 'new') + self.logger.updateLogFile("libchk.pkl", "new") return # ================================================================================ + class DirSizeTester(IBThreadBase): def __init__(self, startDirIn, Logger, deps=None): if deps is None: @@ -205,12 +237,16 @@ def __init__(self, startDirIn, Logger, deps=None): def run(self): IBThreadBase.run(self) - cmd = 'cd ' + self.startDir + '; ' + scriptPath + '/checkDirSizes.py ' + cmd = "cd " + self.startDir + "; " + scriptPath + "/checkDirSizes.py " ret = runCmd(cmd) if ret != 0: print("ERROR when running DirSizeTester: cmd returned " + str(ret)) - cmd = 'cd ' + self.startDir + '; storeTreeInfo.py --checkDir src --outFile treeInfo-IBsrc.json ' + cmd = ( + "cd " + + self.startDir + + "; storeTreeInfo.py --checkDir src --outFile treeInfo-IBsrc.json " + ) ret = runCmd(cmd) if ret != 0: print("ERROR when running DirSizeTester: cmd returned " + str(ret)) @@ -221,6 +257,7 @@ def run(self): # ================================================================================ + class ReleaseProductsDump(IBThreadBase): def __init__(self, startDirIn, Logger, deps=None): IBThreadBase.__init__(self, deps) @@ -231,22 +268,25 @@ def __init__(self, startDirIn, Logger, deps=None): def run(self): IBThreadBase.run(self) - logDir = os.path.join(self.startDir, 'logs', os.environ['SCRAM_ARCH']) + logDir = os.path.join(self.startDir, "logs", os.environ["SCRAM_ARCH"]) if not os.path.exists(logDir): os.makedirs(logDir) - rperrFileName = os.path.join(logDir, 'relProducts.err') + rperrFileName = os.path.join(logDir, "relProducts.err") - cmd = 'cd ' + self.startDir + '; RelProducts.pl > ReleaseProducts.list 2> ' + rperrFileName + cmd = ( + "cd " + self.startDir + "; RelProducts.pl > ReleaseProducts.list 2> " + rperrFileName + ) ret = runCmd(cmd) if ret != 0: print("ERROR when running ReleaseProductsChecks: cmd returned " + str(ret)) self.logger.updateLogFile(self.startDir + "/ReleaseProducts.list") - self.logger.updateLogFile(rperrFileName, "logs/" + os.environ['SCRAM_ARCH']) + self.logger.updateLogFile(rperrFileName, "logs/" + os.environ["SCRAM_ARCH"]) # ================================================================================ + class BuildFileDependencyCheck(IBThreadBase): def __init__(self, startDirIn, Logger, deps=None): IBThreadBase.__init__(self, deps) @@ -256,36 +296,46 @@ def __init__(self, startDirIn, Logger, deps=None): def run(self): IBThreadBase.run(self) - logDir = os.path.join(self.startDir, 'logs', os.environ['SCRAM_ARCH']) + logDir = os.path.join(self.startDir, "logs", os.environ["SCRAM_ARCH"]) if not os.path.exists(logDir): os.makedirs(logDir) - dverrFileName = os.path.join(logDir, 'depsViolations.err') + dverrFileName = os.path.join(logDir, "depsViolations.err") - depDir = os.path.join(self.startDir, 'etc/dependencies') + depDir = os.path.join(self.startDir, "etc/dependencies") if not os.path.exists(depDir): os.makedirs(depDir) - depFile = os.path.join(depDir, 'depsViolations.txt') - - cmd = 'cd ' + self.startDir + '; ReleaseDepsChecks.pl --detail > ' + depFile + ' 2> ' + dverrFileName + depFile = os.path.join(depDir, "depsViolations.txt") + + cmd = ( + "cd " + + self.startDir + + "; ReleaseDepsChecks.pl --detail > " + + depFile + + " 2> " + + dverrFileName + ) ret = runCmd(cmd) if ret != 0: print("ERROR when running BuildFileDependencyCheck: cmd returned " + str(ret)) - cmd = 'cd ' + self.startDir + '; ' + scriptPath + '/splitDepViolationLog.py --log ' + depFile + cmd = ( + "cd " + self.startDir + "; " + scriptPath + "/splitDepViolationLog.py --log " + depFile + ) ret = runCmd(cmd) if ret != 0: print("ERROR when running BuildFileDependencyCheck: cmd returned " + str(ret)) bdir = os.path.join(depDir, "depViolationLogs") import fnmatch + for root, dirnames, filenames in os.walk(bdir): - for filename in fnmatch.filter(filenames, 'depViolation.log'): - pkg = "/".join(root.replace(bdir, "").split('/')[1:3]) + for filename in fnmatch.filter(filenames, "depViolation.log"): + pkg = "/".join(root.replace(bdir, "").split("/")[1:3]) log = os.path.join(bdir, pkg, "log.txt") runCmd("touch " + log + "; cat " + os.path.join(root, filename) + " >> " + log) self.logger.updateLogFile(self.startDir + "/depViolationSummary.pkl", "testLogs") - self.logger.updateLogFile(dverrFileName, "logs/" + os.environ['SCRAM_ARCH']) + self.logger.updateLogFile(dverrFileName, "logs/" + os.environ["SCRAM_ARCH"]) self.logger.updateLogFile(depFile, "etc/dependencies/") self.logger.updateLogFile(bdir, "etc/dependencies/") return @@ -293,6 +343,7 @@ def run(self): # ================================================================================ + class CodeRulesChecker(IBThreadBase): def __init__(self, startDirIn, Logger, deps=None): IBThreadBase.__init__(self, deps) @@ -302,12 +353,15 @@ def __init__(self, startDirIn, Logger, deps=None): def run(self): IBThreadBase.run(self) - cmd = 'cd ' + self.startDir + '; rm -rf codeRules; mkdir codeRules; cd codeRules; ' - cmd += 'cmsCodeRulesChecker.py -r 1,2,3,4,5 -d ' + os.environ[ - 'CMSSW_RELEASE_BASE'] + '/src -S . -html 2>&1 >CodeRulesChecker.log ;' + cmd = "cd " + self.startDir + "; rm -rf codeRules; mkdir codeRules; cd codeRules; " + cmd += ( + "cmsCodeRulesChecker.py -r 1,2,3,4,5 -d " + + os.environ["CMSSW_RELEASE_BASE"] + + "/src -S . -html 2>&1 >CodeRulesChecker.log ;" + ) cmd += "find . -name log.html -type f | xargs --no-run-if-empty sed -i -e 's|cmslxr.fnal.gov|cmssdt.cern.ch|'" - print('CodeRulesChecker: in: ', os.getcwd()) - print(' ... going to execute:', cmd) + print("CodeRulesChecker: in: ", os.getcwd()) + print(" ... going to execute:", cmd) try: ret = runCmd(cmd) if ret != 0: @@ -322,8 +376,8 @@ def run(self): # ================================================================================ -class ReleaseTester(object): +class ReleaseTester(object): def __init__(self, releaseDir, dryRun=False): self.dryRun = dryRun self.plat = os.environ["SCRAM_ARCH"] @@ -333,8 +387,10 @@ def __init__(self, releaseDir, dryRun=False): self.relTag = self.release self.threadList = {} from cmsutils import getIBReleaseInfo + self.relCycle, day, hour = getIBReleaseInfo(self.release) from logUpdater import LogUpdater + self.logger = LogUpdater(self.cmsswBuildDir, self.dryRun) return @@ -355,60 +411,60 @@ def doTest(self, only=None): return self.runProjectInit() - if not only or 'dirsize' in only: - print('\n' + 80 * '-' + ' dirsize \n') - self.threadList['dirsize'] = self.runDirSize() + if not only or "dirsize" in only: + print("\n" + 80 * "-" + " dirsize \n") + self.threadList["dirsize"] = self.runDirSize() - if not only or 'depViolation' in only: - print('\n' + 80 * '-' + ' depViolation \n') - self.threadList['depViolation'] = self.runBuildFileDeps() + if not only or "depViolation" in only: + print("\n" + 80 * "-" + " depViolation \n") + self.threadList["depViolation"] = self.runBuildFileDeps() - if not only or 'relProducts' in only: - print('\n' + 80 * '-' + ' relProducts \n') - self.threadList['relProducts'] = self.runReleaseProducts() + if not only or "relProducts" in only: + print("\n" + 80 * "-" + " relProducts \n") + self.threadList["relProducts"] = self.runReleaseProducts() - if not only or 'unit' in only: - print('\n' + 80 * '-' + ' unit \n') - self.threadList['unit'] = self.runUnitTests() + if not only or "unit" in only: + print("\n" + 80 * "-" + " unit \n") + self.threadList["unit"] = self.runUnitTests() # We only want to explicitly run this test. - if only and 'gpu_unit' in only: - print('\n' + 80 * '-' + ' gpu_unit \n') - self.threadList['gpu_unit'] = self.runUnitTests([], 'GPU') + if only and "gpu_unit" in only: + print("\n" + 80 * "-" + " gpu_unit \n") + self.threadList["gpu_unit"] = self.runUnitTests([], "GPU") - if not only or 'codeRules' in only: - print('\n' + 80 * '-' + ' codeRules \n') - self.threadList['codeRules'] = self.runCodeRulesChecker() + if not only or "codeRules" in only: + print("\n" + 80 * "-" + " codeRules \n") + self.threadList["codeRules"] = self.runCodeRulesChecker() - if not only or 'libcheck' in only: - print('\n' + 80 * '-' + ' libcheck\n') - self.threadList['libcheck'] = self.checkLibDeps() + if not only or "libcheck" in only: + print("\n" + 80 * "-" + " libcheck\n") + self.threadList["libcheck"] = self.checkLibDeps() - if not only or 'pyConfigs' in only: - print('\n' + 80 * '-' + ' pyConfigs \n') + if not only or "pyConfigs" in only: + print("\n" + 80 * "-" + " pyConfigs \n") # noinspection PyNoneFunctionAssignment - self.threadList['pyConfigs'] = self.checkPyConfigs() + self.threadList["pyConfigs"] = self.checkPyConfigs() - if not only or 'dupDict' in only: - print('\n' + 80 * '-' + ' dupDict \n') + if not only or "dupDict" in only: + print("\n" + 80 * "-" + " dupDict \n") # noinspection PyNoneFunctionAssignment - self.threadList['dupDict'] = self.runDuplicateDictCheck() + self.threadList["dupDict"] = self.runDuplicateDictCheck() - print('TestWait> waiting for tests to finish ....') + print("TestWait> waiting for tests to finish ....") for task in self.threadList: if self.threadList[task]: self.threadList[task].join() - print('TestWait> Tests finished ') + print("TestWait> Tests finished ") return # -------------------------------------------------------------------------------- # noinspection PyUnusedLocal def checkPyConfigs(self, deps=None): print("Going to check python configs in ", os.getcwd()) - cmd = scriptPath + '/checkPyConfigs.py > chkPyConf.log 2>&1' + cmd = scriptPath + "/checkPyConfigs.py > chkPyConf.log 2>&1" doCmd(cmd, self.dryRun, self.cmsswBuildDir) self.logger.updateLogFile("chkPyConf.log") - self.logger.updateLogFile("chkPyConf.log", 'testLogs') + self.logger.updateLogFile("chkPyConf.log", "testLogs") return None # -------------------------------------------------------------------------------- @@ -456,9 +512,9 @@ def runCodeRulesChecker(self, deps=None): # noinspection PyUnusedLocal def runDuplicateDictCheck(self, deps=None): print("runDuplicateDictTests> Going to run duplicateReflexLibrarySearch.py ... ") - script = 'export USER_SCRAM_TARGET=default ; eval $(scram run -sh) ; duplicateReflexLibrarySearch.py' - for opt in ['dup', 'lostDefs', 'edmPD']: - cmd = script + ' --' + opt + ' 2>&1 >dupDict-' + opt + '.log' + script = "export USER_SCRAM_TARGET=default ; eval $(scram run -sh) ; duplicateReflexLibrarySearch.py" + for opt in ["dup", "lostDefs", "edmPD"]: + cmd = script + " --" + opt + " 2>&1 >dupDict-" + opt + ".log" try: doCmd(cmd, self.dryRun, self.cmsswBuildDir) except Exception as e: @@ -521,6 +577,7 @@ def runBuildFileDeps(self, deps=None): # ================================================================================ + def main(): try: import argparse @@ -528,11 +585,11 @@ def main(): import archived_argparse as argparse parser = argparse.ArgumentParser() - parser.add_argument('--dryRun', default=False, action='store_true') - parser.add_argument('--only') + parser.add_argument("--dryRun", default=False, action="store_true") + parser.add_argument("--only") args = parser.parse_args() - rel = os.environ.get('CMSSW_BASE') + rel = os.environ.get("CMSSW_BASE") dryRun = args.dryRun if args.only is not None: only = args.only.split(",") diff --git a/scram-package-monitor-sender b/scram-package-monitor-sender deleted file mode 100755 index facd2dbe0b12..000000000000 --- a/scram-package-monitor-sender +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python3 -import os, sys, json, socket, re, base64 -from glob import glob -from optparse import OptionParser -from os.path import basename, join -from os import getenv -from time import strftime, localtime, strptime, sleep -from hashlib import sha1 -from _py2with3compatibility import run_cmd, Request, urlopen, HTTPError - -TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.0" -SLEEP_CYCLE = 2 # seconds -TERM_CMD = "kill_reader" - -def esReportPackages(results): - # Silently exit if we cannot contact elasticsearch - es_hostname = getenv("ES_HOSTNAME") - es_auth = getenv("ES_AUTH") - if not es_hostname and not es_auth: - return - - url = "https://%s/_bulk" % (es_hostname) - - request = Request(url) - if es_auth: - base64string = base64.encodestring(es_auth).replace('\n', '') - request.add_header("Authorization", "Basic %s" % base64string) - request.get_method = lambda: 'POST' - data = "\n".join(results) + "\n" - try: - result = urlopen(request, data=data) - except HTTPError as e: - print(e) - try: - print(result.read()) - except: - pass - -if __name__ == "__main__": - WORKSPACE = os.getenv("WORKSPACE", "./") - WORK_DIR = join(WORKSPACE, "pkg_mon") - SOURCE_DIR = join(os.getenv("CMSSW_BASE", "./"), "src") - - parser = OptionParser(usage="%prog [-f, -n]") - parser.add_option("-f", "--force", dest="force", action="store_true", - help="Force pushing", default=False) - parser.add_option("-n", "--dry-run", dest="dryrun", action="store_true", - help="Do not push files to server", default=False) - opts, args = parser.parse_args() - - CMSSW_VERSION = os.getenv("CMSSW_VERSION", "unknown") - SCRAM_ARCH = os.getenv("SCRAM_ARCH", "unknown") - INDEX_NAME = strftime("ib-scram-stats-%Y.%m.%d") - defaults = { "hostname": socket.gethostname(), - "scram_arch": SCRAM_ARCH, - "cmssw_version": CMSSW_VERSION, - } - print("Workspace: " + WORKSPACE + " workdir: " + WORK_DIR) - while(True): - sleep(SLEEP_CYCLE) - print("New cycle, finding timestamps to send.") - timestamps = sorted([int(basename(x).split("_")[1].split("-")[0]) - for x in glob(join(WORK_DIR, "st*"))]) - job_done = os.path.isfile(join(WORK_DIR, TERM_CMD)) - if (len(timestamps) < 2 and not opts.force and not job_done): - continue - results = [] - removables = [] - RE_FILE = "(start|stop)_([0-9]+)-(.*)" - pushables = [basename(f).replace(":", "/") for f in glob(join(WORK_DIR, "st*"))] - info = [re.match(RE_FILE, f).groups() for f in pushables] - m = re.match("(.*)_(20[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{4})", CMSSW_VERSION) - if m: - defaults["cmssw_queue"] = m.group(1) - defaults["@timestamp"] = strftime("%Y-%m-%dT%H:%M:00.0", - strptime(m.group(2), "%Y-%m-%d-%H%M")) - starts = dict([(x[2], int(x[1])) for x in info if x[0] == "start"]) - stops = dict([(x[2], int(x[1])) for x in info if x[0] == "stop"]) - packages = set(x[2] for x in info) - for x in packages: - h = sha1((x + SCRAM_ARCH + CMSSW_VERSION).encode()).hexdigest() - header = { "index" : { "_index" : INDEX_NAME, - "_type" : "cmssw_pkg_times", - "_id": h} - } - data = {"package": x} - data.update(defaults) - startTime = starts.get(x, None) - stopTime = stops.get(x, None) - if startTime: - data["start"] = strftime(TIME_FORMAT, localtime(startTime)) - if job_done: - removables.append(x.replace("/",":")) - if stopTime: - data["stop"] = strftime(TIME_FORMAT, localtime(stopTime)) - if job_done: - removables.append(x.replace("/",":")) - if startTime and stopTime: - data["diff"] = stopTime - startTime - removables.append(x.replace("/", ":")) - results += [json.dumps(header), json.dumps(data)] - - # Actually do the push to ES. - if opts.dryrun: - print("Dry run specified, what I would have sent:\n" + "\n".join(results)) - else: - esReportPackages(results) - - for x in removables: - cmd = "find %s -name \"*%s\" -delete" % (WORK_DIR, x) - err, out = run_cmd(cmd) - # Terminate this program when - if len(os.listdir(WORK_DIR)) == 1 and job_done: - sys.exit(0) diff --git a/scram-package-monitor-sender b/scram-package-monitor-sender new file mode 120000 index 000000000000..26c92cd7b1c4 --- /dev/null +++ b/scram-package-monitor-sender @@ -0,0 +1 @@ +scram-package-monitor-sender.py \ No newline at end of file diff --git a/scram-package-monitor-sender.py b/scram-package-monitor-sender.py new file mode 100755 index 000000000000..4598b21f4b7c --- /dev/null +++ b/scram-package-monitor-sender.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 +import os, sys, json, socket, re, base64 +from glob import glob +from optparse import OptionParser +from os.path import basename, join +from os import getenv +from time import strftime, localtime, strptime, sleep +from hashlib import sha1 +from _py2with3compatibility import run_cmd, Request, urlopen, HTTPError + +TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.0" +SLEEP_CYCLE = 2 # seconds +TERM_CMD = "kill_reader" + + +def esReportPackages(results): + # Silently exit if we cannot contact elasticsearch + es_hostname = getenv("ES_HOSTNAME") + es_auth = getenv("ES_AUTH") + if not es_hostname and not es_auth: + return + + url = "https://%s/_bulk" % (es_hostname) + + request = Request(url) + if es_auth: + base64string = base64.encodestring(es_auth).replace("\n", "") + request.add_header("Authorization", "Basic %s" % base64string) + request.get_method = lambda: "POST" + data = "\n".join(results) + "\n" + try: + result = urlopen(request, data=data) + except HTTPError as e: + print(e) + try: + print(result.read()) + except: + pass + + +if __name__ == "__main__": + WORKSPACE = os.getenv("WORKSPACE", "./") + WORK_DIR = join(WORKSPACE, "pkg_mon") + SOURCE_DIR = join(os.getenv("CMSSW_BASE", "./"), "src") + + parser = OptionParser(usage="%prog [-f, -n]") + parser.add_option( + "-f", "--force", dest="force", action="store_true", help="Force pushing", default=False + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryrun", + action="store_true", + help="Do not push files to server", + default=False, + ) + opts, args = parser.parse_args() + + CMSSW_VERSION = os.getenv("CMSSW_VERSION", "unknown") + SCRAM_ARCH = os.getenv("SCRAM_ARCH", "unknown") + INDEX_NAME = strftime("ib-scram-stats-%Y.%m.%d") + defaults = { + "hostname": socket.gethostname(), + "scram_arch": SCRAM_ARCH, + "cmssw_version": CMSSW_VERSION, + } + print("Workspace: " + WORKSPACE + " workdir: " + WORK_DIR) + while True: + sleep(SLEEP_CYCLE) + print("New cycle, finding timestamps to send.") + timestamps = sorted( + [int(basename(x).split("_")[1].split("-")[0]) for x in glob(join(WORK_DIR, "st*"))] + ) + job_done = os.path.isfile(join(WORK_DIR, TERM_CMD)) + if len(timestamps) < 2 and not opts.force and not job_done: + continue + results = [] + removables = [] + RE_FILE = "(start|stop)_([0-9]+)-(.*)" + pushables = [basename(f).replace(":", "/") for f in glob(join(WORK_DIR, "st*"))] + info = [re.match(RE_FILE, f).groups() for f in pushables] + m = re.match("(.*)_(20[0-9]{2}-[0-9]{2}-[0-9]{2}-[0-9]{4})", CMSSW_VERSION) + if m: + defaults["cmssw_queue"] = m.group(1) + defaults["@timestamp"] = strftime( + "%Y-%m-%dT%H:%M:00.0", strptime(m.group(2), "%Y-%m-%d-%H%M") + ) + starts = dict([(x[2], int(x[1])) for x in info if x[0] == "start"]) + stops = dict([(x[2], int(x[1])) for x in info if x[0] == "stop"]) + packages = set(x[2] for x in info) + for x in packages: + h = sha1((x + SCRAM_ARCH + CMSSW_VERSION).encode()).hexdigest() + header = {"index": {"_index": INDEX_NAME, "_type": "cmssw_pkg_times", "_id": h}} + data = {"package": x} + data.update(defaults) + startTime = starts.get(x, None) + stopTime = stops.get(x, None) + if startTime: + data["start"] = strftime(TIME_FORMAT, localtime(startTime)) + if job_done: + removables.append(x.replace("/", ":")) + if stopTime: + data["stop"] = strftime(TIME_FORMAT, localtime(stopTime)) + if job_done: + removables.append(x.replace("/", ":")) + if startTime and stopTime: + data["diff"] = stopTime - startTime + removables.append(x.replace("/", ":")) + results += [json.dumps(header), json.dumps(data)] + + # Actually do the push to ES. + if opts.dryrun: + print("Dry run specified, what I would have sent:\n" + "\n".join(results)) + else: + esReportPackages(results) + + for x in removables: + cmd = 'find %s -name "*%s" -delete' % (WORK_DIR, x) + err, out = run_cmd(cmd) + # Terminate this program when + if len(os.listdir(WORK_DIR)) == 1 and job_done: + sys.exit(0) diff --git a/scram-package-monitor-timestamps b/scram-package-monitor-timestamps deleted file mode 100755 index 82500269d3d4..000000000000 --- a/scram-package-monitor-timestamps +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python -import os, errno -from optparse import OptionParser -from os.path import join -from time import strftime - -TERM_CMD = "kill_reader" - -def create_dir(dir_to_create): - try: - os.makedirs(dir_to_create) - except OSError as exception: - if exception.errno != errno.EEXIST: - raise exception - -if __name__ == "__main__": - WORKSPACE = os.getenv("WORKSPACE", "./") - WORK_DIR = join(WORKSPACE, "pkg_mon") - - parser = OptionParser(usage="%prog <-s|-e> -p ") - parser.add_option("-s", "--start", dest="start", action="store_true", - help="Building started for package", default=True) - parser.add_option("-e", "--stop", dest="start", action="store_false", - help="Building done for package", default=True) - parser.add_option("-p", "--package", dest="pkg_name", - help="Package name to track", default=None) - opts, args = parser.parse_args() - - pkg_name = opts.pkg_name - create_dir(WORK_DIR) - - # Create the file for the current invocation. - if pkg_name: - prefix = opts.start and strftime("start_%s-") or strftime("stop_%s-") - filename = prefix + pkg_name.replace("/",":") - while(True): - try: - open(join(WORK_DIR, filename), "a").close() - break - except: - create_dir(WORK_DIR) - elif not opts.start: - while(True): - try: - open(join(WORK_DIR, TERM_CMD), "a").close() - break - except: - create_dir(WORK_DIR) diff --git a/scram-package-monitor-timestamps b/scram-package-monitor-timestamps new file mode 120000 index 000000000000..34db73bfeee2 --- /dev/null +++ b/scram-package-monitor-timestamps @@ -0,0 +1 @@ +scram-package-monitor-timestamps.py \ No newline at end of file diff --git a/scram-package-monitor-timestamps.py b/scram-package-monitor-timestamps.py new file mode 100755 index 000000000000..f7c006234d54 --- /dev/null +++ b/scram-package-monitor-timestamps.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +import os, errno +from optparse import OptionParser +from os.path import join +from time import strftime + +TERM_CMD = "kill_reader" + + +def create_dir(dir_to_create): + try: + os.makedirs(dir_to_create) + except OSError as exception: + if exception.errno != errno.EEXIST: + raise exception + + +if __name__ == "__main__": + WORKSPACE = os.getenv("WORKSPACE", "./") + WORK_DIR = join(WORKSPACE, "pkg_mon") + + parser = OptionParser(usage="%prog <-s|-e> -p ") + parser.add_option( + "-s", + "--start", + dest="start", + action="store_true", + help="Building started for package", + default=True, + ) + parser.add_option( + "-e", + "--stop", + dest="start", + action="store_false", + help="Building done for package", + default=True, + ) + parser.add_option( + "-p", "--package", dest="pkg_name", help="Package name to track", default=None + ) + opts, args = parser.parse_args() + + pkg_name = opts.pkg_name + create_dir(WORK_DIR) + + # Create the file for the current invocation. + if pkg_name: + prefix = opts.start and strftime("start_%s-") or strftime("stop_%s-") + filename = prefix + pkg_name.replace("/", ":") + while True: + try: + open(join(WORK_DIR, filename), "a").close() + break + except: + create_dir(WORK_DIR) + elif not opts.start: + while True: + try: + open(join(WORK_DIR, TERM_CMD), "a").close() + break + except: + create_dir(WORK_DIR) diff --git a/shift/libib.py b/shift/libib.py index deba5a740a21..746e095c0126 100644 --- a/shift/libib.py +++ b/shift/libib.py @@ -201,9 +201,7 @@ def check_ib(data, compilation_only=False): for itm in pkg_errors.items(): res[arch]["build"].append( - LogEntry( - name=pkg.name(), url=f"{url_prefix}/{pkg.name()}", data=itm - ) + LogEntry(name=pkg.name(), url=f"{url_prefix}/{pkg.name()}", data=itm) ) if not compilation_only: diff --git a/shift/report.py b/shift/report.py index 2cabd319fa53..748fbddd85e7 100644 --- a/shift/report.py +++ b/shift/report.py @@ -9,9 +9,7 @@ # noinspection PyUnresolvedReferences from libib import PackageInfo, ErrorInfo -if sys.version_info.major < 3 or ( - sys.version_info.major == 3 and sys.version_info.minor < 6 -): +if sys.version_info.major < 3 or (sys.version_info.major == 3 and sys.version_info.minor < 6): print("This script requires Python 3.6 or newer!", file=sys.stderr) exit(0) @@ -67,14 +65,11 @@ def main(): file=f, ) for error in errors[arch]["utest"]: - print( - f"| [{error.name}]({error.url}) | TBD | TBD |", file=f - ) + print(f"| [{error.name}]({error.url}) | TBD | TBD |", file=f) for error in errors[arch]["relval"]: print( - f"| [{error.name}]({error.url}) | {error.data} | " - f"TBD |", + f"| [{error.name}]({error.url}) | {error.data} | " f"TBD |", file=f, ) else: diff --git a/shift/uniq-errors.py b/shift/uniq-errors.py index 877d0afcdf56..f6945b8b769f 100644 --- a/shift/uniq-errors.py +++ b/shift/uniq-errors.py @@ -24,28 +24,20 @@ class CompError: def main(): parser = argparse.ArgumentParser() - parser.add_argument( - "-a", "--architecture", help="Release architecture (e.g. el9_amd64_gcc13)" - ) + parser.add_argument("-a", "--architecture", help="Release architecture (e.g. el9_amd64_gcc13)") parser.add_argument("-d", "--date", help="IB date") parser.add_argument("-s", "--series", help="IB series (e.g. CMSSW_13_3_X)") - parser.add_argument( - "-f", "--filter", help="Only display errors containing given text" - ) + parser.add_argument("-f", "--filter", help="Only display errors containing given text") args = parser.parse_args() print(f"Getting IB data for {args.series} on {args.date}") comp = libib.get_ib_comparision(args.date, args.series)[args.series] if comp is None: - print( - f"No errors found for IB {args.series} on {args.date} arch {args.architecture}" - ) + print(f"No errors found for IB {args.series} on {args.date} arch {args.architecture}") return - print( - f"Extracting build errors for {args.series} on {args.date} arch {args.architecture}" - ) + print(f"Extracting build errors for {args.series} on {args.date} arch {args.architecture}") _, errors = libib.check_ib(comp, True) errors = errors[args.architecture]["build"] seen_errors = set() diff --git a/show-ibs-schedule.py b/show-ibs-schedule.py index 8478b85dc6e1..b29df70e8160 100755 --- a/show-ibs-schedule.py +++ b/show-ibs-schedule.py @@ -4,51 +4,58 @@ specs = get_config_map_properties({"DISABLED": "1"}) data = {} days = range(7) -hours = (0,11,23) +hours = (0, 11, 23) for day in days: - data[day] = {} - for hour in hours[1:]: - data[day][hour] = [] -data[0]={0:[], 23:[]} -data[6]={11:[]} + data[day] = {} + for hour in hours[1:]: + data[day][hour] = [] +data[0] = {0: [], 23: []} +data[6] = {11: []} dev_rel = [] for spec in specs: - rel = "_".join(spec['CMSDIST_TAG'].split("/")[1].split("_")[:3]) - if ('BUILD_PATCH_RELEASE' in spec): dev_rel.append(rel) - sel_days = days[:] - sel_hours = hours[:] - if 'BUILD_DAY' in spec: - sel_days=[] - for day in spec['BUILD_DAY'].split(","): - try: - day = int(day.strip()) - if not day in data: continue - sel_days.append(day) - except: pass - if 'BUILD_HOUR' in spec: - sel_hours = [] - for hour in spec['BUILD_HOUR'].split(","): - try: - hour = int(hour.strip()) - if not hour in hours: continue - sel_hours.append(hour) - except: pass - for day in data.keys(): - if not day in sel_days: continue - for hour in data[day].keys(): - if not hour in sel_hours: continue - if (rel in dev_rel) or ((day==0) and (hour==0)): - data[day][hour].append(spec) - elif (not 0 in sel_days) or (not not 0 in sel_hours): - data[day][hour].append(spec) + rel = "_".join(spec["CMSDIST_TAG"].split("/")[1].split("_")[:3]) + if "BUILD_PATCH_RELEASE" in spec: + dev_rel.append(rel) + sel_days = days[:] + sel_hours = hours[:] + if "BUILD_DAY" in spec: + sel_days = [] + for day in spec["BUILD_DAY"].split(","): + try: + day = int(day.strip()) + if not day in data: + continue + sel_days.append(day) + except: + pass + if "BUILD_HOUR" in spec: + sel_hours = [] + for hour in spec["BUILD_HOUR"].split(","): + try: + hour = int(hour.strip()) + if not hour in hours: + continue + sel_hours.append(hour) + except: + pass + for day in data.keys(): + if not day in sel_days: + continue + for hour in data[day].keys(): + if not hour in sel_hours: + continue + if (rel in dev_rel) or ((day == 0) and (hour == 0)): + data[day][hour].append(spec) + elif (not 0 in sel_days) or (not not 0 in sel_hours): + data[day][hour].append(spec) print("Day\tHour\tx86_64\tppc64le\taarch64") for day in data.keys(): - for hour in data[day].keys(): - str = "%s\t%s\t" % (day, hour) - cnt = {"amd64":0, "ppc64le":0, "aarch64":0} - for spec in data[day][hour]: - arch = spec['SCRAM_ARCH'].split("_")[1] - cnt[arch]+=1 - str += "%s\t%s\t%s" % (cnt["amd64"], cnt["ppc64le"], cnt["aarch64"]) - print (str) + for hour in data[day].keys(): + str = "%s\t%s\t" % (day, hour) + cnt = {"amd64": 0, "ppc64le": 0, "aarch64": 0} + for spec in data[day][hour]: + arch = spec["SCRAM_ARCH"].split("_")[1] + cnt[arch] += 1 + str += "%s\t%s\t%s" % (cnt["amd64"], cnt["ppc64le"], cnt["aarch64"]) + print(str) diff --git a/splitDepViolationLog.py b/splitDepViolationLog.py index a136ca230a34..8d96eb076e68 100755 --- a/splitDepViolationLog.py +++ b/splitDepViolationLog.py @@ -10,11 +10,10 @@ class DepViolSplitter(object): def __init__(self, outFileIn=None, verbIn=False): - self.outFile = sys.stdout if outFileIn: print("Summary file:", outFileIn) - self.outFile = open(outFileIn, 'w') + self.outFile = open(outFileIn, "w") self.verbose = verbIn @@ -31,20 +30,19 @@ def setVerbose(self, verbIn=False): # -------------------------------------------------------------------------------- def split(self, logFile): + self.outFile.write("going to check " + logFile + "\n") - self.outFile.write("going to check " + logFile + '\n') - - pkgStartRe = re.compile(r'^>> Checking dependency for (.*)\s*$') - pkgEndRe = re.compile(r'^>> Done Checking dependency for (.*)\s*$') + pkgStartRe = re.compile(r"^>> Checking dependency for (.*)\s*$") + pkgEndRe = re.compile(r"^>> Done Checking dependency for (.*)\s*$") - depViolRe = re.compile(r'\s*\*+ERROR: Dependency violation') + depViolRe = re.compile(r"\s*\*+ERROR: Dependency violation") - logDirs = os.path.join(os.path.split(logFile)[0], 'depViolationLogs') + logDirs = os.path.join(os.path.split(logFile)[0], "depViolationLogs") print("logDirs ", logDirs) if not os.path.exists(logDirs): os.makedirs(logDirs) - lf = open(logFile, 'r') + lf = open(logFile, "r") lines = lf startTime = time.time() @@ -58,7 +56,6 @@ def split(self, logFile): actLogLines = [] startFound = False for line in lines: - # write out log to individual log file ... if startFound and ">> Done Checking dependency " not in line: actLogLines.append(line) @@ -78,7 +75,13 @@ def split(self, logFile): if pkgEndMatch: pkg = pkgEndMatch.group(1) if actPkg != pkg: - self.outFile.write("pkgEndMatch> package mismatch: pkg found " + pkg + ' actPkg=' + actPkg + '\n') + self.outFile.write( + "pkgEndMatch> package mismatch: pkg found " + + pkg + + " actPkg=" + + actPkg + + "\n" + ) if len(actLogLines) > 2: pkgViol[pkg] = len(depViolRe.findall("".join(actLogLines))) @@ -88,7 +91,7 @@ def split(self, logFile): os.makedirs(actLogDir) # os.makedirs(actLogDir) ############################################### - actLogFile = open(os.path.join(actLogDir, 'depViolation.log'), 'w') + actLogFile = open(os.path.join(actLogDir, "depViolation.log"), "w") actLogFile.write("".join(actLogLines)) actLogFile.close() actLogLines = [] @@ -97,17 +100,21 @@ def split(self, logFile): stopTime = time.time() lf.close() - self.outFile.write("found a total of " + str(nLines) + ' lines in logfile.\n') - self.outFile.write("analysis took " + str(stopTime - startTime) + ' sec.\n') + self.outFile.write("found a total of " + str(nLines) + " lines in logfile.\n") + self.outFile.write("analysis took " + str(stopTime - startTime) + " sec.\n") - self.outFile.write("total number of packages with violations: " + str(len(list(pkgViol.keys()))) + '\n') + self.outFile.write( + "total number of packages with violations: " + str(len(list(pkgViol.keys()))) + "\n" + ) import pprint + pprint.pprint(pkgViol) try: from pickle import Pickler - resFile = open('depViolationSummary.pkl', 'wb') + + resFile = open("depViolationSummary.pkl", "wb") pklr = Pickler(resFile, protocol=2) pklr.dump(pkgViol) resFile.close() @@ -126,9 +133,9 @@ def main(): import archived_argparse as argparse parser = argparse.ArgumentParser() - parser.add_argument('-l', '--logFile', default=None, required=True) - parser.add_argument('-v', '--verbose', action='store_true', default=False) - parser.add_argument('-s', '--outFile', default=None) + parser.add_argument("-l", "--logFile", default=None, required=True) + parser.add_argument("-v", "--verbose", action="store_true", default=False) + parser.add_argument("-s", "--outFile", default=None) args = parser.parse_args() logFile = args.logFile diff --git a/splitUnitTestLog.py b/splitUnitTestLog.py index 09fe1779ce2c..34b7990efc24 100755 --- a/splitUnitTestLog.py +++ b/splitUnitTestLog.py @@ -10,11 +10,10 @@ class LogSplitter(object): def __init__(self, outFileIn=None, verbIn=False): - self.outFile = sys.stdout if outFileIn: print("Summary file:", outFileIn) - self.outFile = open(outFileIn, 'w') + self.outFile = open(outFileIn, "w") self.verbose = verbIn @@ -31,14 +30,13 @@ def setVerbose(self, verbIn=False): # -------------------------------------------------------------------------------- def split(self, logFile): + self.outFile.write("going to check " + logFile + "\n") - self.outFile.write("going to check " + logFile + '\n') - - subsysRe = re.compile('^>> Tests for package ([A-Z].*/[A-Z].*) ran.') + subsysRe = re.compile("^>> Tests for package ([A-Z].*/[A-Z].*) ran.") - pkgTestStartRe = re.compile('^===== Test \"(.*)\" ====') - pkgTestEndRe = re.compile(r'^\^\^\^\^ End Test (.*) \^\^\^\^') - pkgTestResultRe = re.compile('.*---> test ([^ ]+) (had ERRORS|succeeded)') + pkgTestStartRe = re.compile('^===== Test "(.*)" ====') + pkgTestEndRe = re.compile(r"^\^\^\^\^ End Test (.*) \^\^\^\^") + pkgTestResultRe = re.compile(".*---> test ([^ ]+) (had ERRORS|succeeded)") pkgStartRe = re.compile("^>> Entering Package (.*)") # pkgEndRe = re.compile("^>> Leaving Package (.*)") @@ -48,7 +46,7 @@ def split(self, logFile): subsysPkgMap = {} baseDir = os.path.split(logFile)[0] - logDirs = os.path.join(baseDir, 'unitTestLogs') + logDirs = os.path.join(baseDir, "unitTestLogs") print("logDirs ", logDirs) if not os.path.exists(logDirs): os.makedirs(logDirs) @@ -85,7 +83,7 @@ def split(self, logFile): actPkgLines += 1 subsysMatch = subsysRe.match(line) if subsysMatch: - subsys, pkg = subsysMatch.group(1).split('/') + subsys, pkg = subsysMatch.group(1).split("/") if pkg not in pkgSubsysMap: pkgSubsysMap[pkg] = subsys if subsys in subsysPkgMap: @@ -105,13 +103,19 @@ def split(self, logFile): if pkgEndMatch: pkg = pkgEndMatch.group(1) if actPkg != pkg: - self.outFile.write("pkgEndMatch> package mismatch: pkg found " + pkg + ' actPkg=' + actPkg + '\n') + self.outFile.write( + "pkgEndMatch> package mismatch: pkg found " + + pkg + + " actPkg=" + + actPkg + + "\n" + ) pkgLines[pkg] = actPkgLines if len(actLogLines) > 2: actLogDir = os.path.join(logDirs, pkg) os.makedirs(actLogDir) - actLogFile = open(os.path.join(actLogDir, 'unitTest.log'), 'w') + actLogFile = open(os.path.join(actLogDir, "unitTest.log"), "w") actLogFile.write("".join(actLogLines)) actLogFile.close() actLogLines = [] @@ -140,26 +144,31 @@ def split(self, logFile): tst = pkgTestEndMatch.group(1) if actTest != tst: self.outFile.write( - "pkgTestEndMatch> test mismatch: tst found " + tst + ' actTest=' + actTest + '\n') + "pkgTestEndMatch> test mismatch: tst found " + + tst + + " actTest=" + + actTest + + "\n" + ) testLines[tst] = actTstLines stopTime = time.time() lf.close() - self.outFile.write("found a total of " + str(nLines) + ' lines in logfile.\n') - self.outFile.write("analysis took " + str(stopTime - startTime) + ' sec.\n') + self.outFile.write("found a total of " + str(nLines) + " lines in logfile.\n") + self.outFile.write("analysis took " + str(stopTime - startTime) + " sec.\n") - self.outFile.write("total number of tests: " + str(len(list(results.keys()))) + '\n') + self.outFile.write("total number of tests: " + str(len(list(results.keys()))) + "\n") nMax = 1000 self.outFile.write("tests with more than " + str(nMax) + " lines of logs:\n") for pkg, lines in list(testLines.items()): if lines > nMax: - self.outFile.write(" " + pkg + ' : ' + str(lines) + '\n') + self.outFile.write(" " + pkg + " : " + str(lines) + "\n") self.outFile.write("Number of tests for packages: \n") noTests = 0 nrTests = 0 - indent = ' ' + indent = " " totalOK = 0 totalFail = 0 unitTestResults = {} @@ -169,35 +178,59 @@ def split(self, logFile): else: nrTests += 1 if self.verbose: - self.outFile.write('-' * 80 + '\n') - self.outFile.write(indent + pkg + ' : ') + self.outFile.write("-" * 80 + "\n") + self.outFile.write(indent + pkg + " : ") nOK = 0 if self.verbose: self.outFile.write("\n") for tNam in testNames[pkg]: - if results[tNam] == 'succeeded': + if results[tNam] == "succeeded": nOK += 1 totalOK += 1 else: totalFail += 1 if self.verbose: - self.outFile.write(indent * 2 + tNam + ' ' + results[tNam] + '\n') + self.outFile.write(indent * 2 + tNam + " " + results[tNam] + "\n") if self.verbose: self.outFile.write(indent + pkg + " : ") self.outFile.write( - indent + str(len(testNames[pkg])) + ' tests in total, OK:' + str(nOK) + ' fail:' + str( - len(testNames[pkg]) - nOK) + '\n') + indent + + str(len(testNames[pkg])) + + " tests in total, OK:" + + str(nOK) + + " fail:" + + str(len(testNames[pkg]) - nOK) + + "\n" + ) unitTestResults[pkg] = [testNames[pkg], nOK, len(testNames[pkg]) - nOK] - self.outFile.write(indent + str(nrTests) + " packages with tests (" + str( - float(nrTests) / float(len(list(pkgTests.keys())))) + ")\n") - self.outFile.write(indent + str(noTests) + " packages without tests (" + str( - float(noTests) / float(len(list(pkgTests.keys())))) + ")\n") - self.outFile.write(indent + "in total: tests OK : " + str(totalOK) + ' tests FAIL : ' + str(totalFail) + '\n') + self.outFile.write( + indent + + str(nrTests) + + " packages with tests (" + + str(float(nrTests) / float(len(list(pkgTests.keys())))) + + ")\n" + ) + self.outFile.write( + indent + + str(noTests) + + " packages without tests (" + + str(float(noTests) / float(len(list(pkgTests.keys())))) + + ")\n" + ) + self.outFile.write( + indent + + "in total: tests OK : " + + str(totalOK) + + " tests FAIL : " + + str(totalFail) + + "\n" + ) try: from pickle import Pickler - resFile = open(baseDir + '/unitTestResults.pkl', 'wb') + + resFile = open(baseDir + "/unitTestResults.pkl", "wb") pklr = Pickler(resFile, protocol=2) pklr.dump(unitTestResults) pklr.dump(results) @@ -211,6 +244,7 @@ def split(self, logFile): # ================================================================================ + def main(): try: import argparse @@ -218,9 +252,9 @@ def main(): import archived_argparse as argparse parser = argparse.ArgumentParser() - parser.add_argument('-l', '--logFile', dest='logFile', required=True) - parser.add_argument('-v', '--verbose', default=False, action='store_true') - parser.add_argument('-s', '--outFile', dest='outFile') + parser.add_argument("-l", "--logFile", dest="logFile", required=True) + parser.add_argument("-v", "--verbose", default=False, action="store_true") + parser.add_argument("-s", "--outFile", dest="outFile") args = parser.parse_args() logFile = args.logFile diff --git a/tag-ib b/tag-ib deleted file mode 100755 index 808e7fce79c0..000000000000 --- a/tag-ib +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python3 -#test commit -from github import Github, GithubException -from os.path import expanduser -from optparse import OptionParser -from datetime import datetime -from sys import exit -import re -from socket import setdefaulttimeout -setdefaulttimeout(120) - -if __name__ == "__main__": - parser = OptionParser(usage="%prog -b|--branch -d|--date -t|--tag [-n|--dry-run]") - parser.add_option("-n", "--dry-run", dest="dryRun", help="Do not modify Github", default=False, action="store_true") - parser.add_option("-b", "--branch", dest="branch", help="Repository branch to use for tagging e.g CMSSW_8_0_X", type=str, default=None) - parser.add_option("-d", "--date", dest="date", help="Date/time to search for the commit to tag e.g. 2015-10-19-1100", type=str, default=None) - parser.add_option("-t", "--tag", dest="tag", help="Tag of the IB e.g. CMSSW_8_0_X_2015-10-19-1100. Default is _", type=str, default=None) - opts, args = parser.parse_args() - - if not opts.branch: - parser.error("Missing branch argument -b|--branch ") - if not opts.date: - parser.error("Missing date argument -d|--date ") - - commit_date = datetime.strptime(opts.date, '%Y-%m-%d-%H00') - if not opts.tag: - opts.tag = opts.branch + "_" + commit_date.strftime("%Y-%m-%d-%H00") - - commit_date = commit_date - (datetime.now() - datetime.utcnow()) - - gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) - rate_limit = gh.get_rate_limit().rate - print('API Rate Limit') - print('Limit: ', rate_limit.limit) - print('Remaining: ', rate_limit.remaining) - print('Reset time (GMT): ', rate_limit.reset) - - repo = gh.get_repo('cms-sw/cmssw') - commits = repo.get_commits(sha=opts.branch, until=commit_date) - last_merge = None - for c in commits: - if c.parents: - last_merge = c - break - sha = last_merge.sha - print('Found Commit :',sha,last_merge.author.login,'(',last_merge.author.name,')') - if not opts.dryRun: - try: - repo.create_git_tag(opts.tag, "Release", sha, "commit") - repo.create_git_ref("refs/tags/"+opts.tag,sha) - print("Created Tag ",opts.tag," based on ",sha) - except GithubException as e: - msg = e.data['message'].encode("ascii", "ignore").decode() - print("Message: ",msg) - if re.match('^.*Reference already exists.*$',msg,re.M): - exit(0) - print("ERROR: unable to create tag ",opts.tag) - exit (1) - else: - print("Dry run, would have created tag ",opts.tag," based on ",sha) - exit(0) diff --git a/tag-ib b/tag-ib new file mode 120000 index 000000000000..69e3b50d48bf --- /dev/null +++ b/tag-ib @@ -0,0 +1 @@ +tag-ib.py \ No newline at end of file diff --git a/tag-ib.py b/tag-ib.py new file mode 100755 index 000000000000..bb9121833b23 --- /dev/null +++ b/tag-ib.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# test commit +from github import Github, GithubException +from os.path import expanduser +from optparse import OptionParser +from datetime import datetime +from sys import exit +import re +from socket import setdefaulttimeout + +setdefaulttimeout(120) + +if __name__ == "__main__": + parser = OptionParser( + usage="%prog -b|--branch -d|--date -t|--tag [-n|--dry-run]" + ) + parser.add_option( + "-n", + "--dry-run", + dest="dryRun", + help="Do not modify Github", + default=False, + action="store_true", + ) + parser.add_option( + "-b", + "--branch", + dest="branch", + help="Repository branch to use for tagging e.g CMSSW_8_0_X", + type=str, + default=None, + ) + parser.add_option( + "-d", + "--date", + dest="date", + help="Date/time to search for the commit to tag e.g. 2015-10-19-1100", + type=str, + default=None, + ) + parser.add_option( + "-t", + "--tag", + dest="tag", + help="Tag of the IB e.g. CMSSW_8_0_X_2015-10-19-1100. Default is _", + type=str, + default=None, + ) + opts, args = parser.parse_args() + + if not opts.branch: + parser.error("Missing branch argument -b|--branch ") + if not opts.date: + parser.error("Missing date argument -d|--date ") + + commit_date = datetime.strptime(opts.date, "%Y-%m-%d-%H00") + if not opts.tag: + opts.tag = opts.branch + "_" + commit_date.strftime("%Y-%m-%d-%H00") + + commit_date = commit_date - (datetime.now() - datetime.utcnow()) + + gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip()) + rate_limit = gh.get_rate_limit().rate + print("API Rate Limit") + print("Limit: ", rate_limit.limit) + print("Remaining: ", rate_limit.remaining) + print("Reset time (GMT): ", rate_limit.reset) + + repo = gh.get_repo("cms-sw/cmssw") + commits = repo.get_commits(sha=opts.branch, until=commit_date) + last_merge = None + for c in commits: + if c.parents: + last_merge = c + break + sha = last_merge.sha + print("Found Commit :", sha, last_merge.author.login, "(", last_merge.author.name, ")") + if not opts.dryRun: + try: + repo.create_git_tag(opts.tag, "Release", sha, "commit") + repo.create_git_ref("refs/tags/" + opts.tag, sha) + print("Created Tag ", opts.tag, " based on ", sha) + except GithubException as e: + msg = e.data["message"].encode("ascii", "ignore").decode() + print("Message: ", msg) + if re.match("^.*Reference already exists.*$", msg, re.M): + exit(0) + print("ERROR: unable to create tag ", opts.tag) + exit(1) + else: + print("Dry run, would have created tag ", opts.tag, " based on ", sha) + exit(0) diff --git a/tests/test_config-map.py b/tests/test_config-map.py index c80aa74908d9..a8dd47628ef5 100755 --- a/tests/test_config-map.py +++ b/tests/test_config-map.py @@ -2,14 +2,14 @@ import re -KEYS_RE="(CMS_BOT_BRANCH|CVMFS_INSTALL_IMAGE|DEBUG_EXTERNALS|SKIP_TESTS|REQUIRED_TEST|FORCE_FULL_IB|SLAVE_LABELS|SINGULARITY|IB_ONLY|BUILD_DAY|NO_IB|SCRAM_ARCH|RELEASE_QUEUE|BUILD_PATCH_RELEASE|PKGTOOLS_TAG|CMSDIST_TAG|RELEASE_BRANCH|ADDITIONAL_TESTS|PR_TESTS|DISABLED|ALWAYS_TAG_CMSSW|DO_STATIC_CHECKS|PROD_ARCH|ENABLE_DEBUG|PRS_TEST_CLANG|MESOS_QUEUE|DO_NOT_INSTALL|BUILD_HOUR|IB_WEB_PAGE|DOCKER_IMG|SPACK)" +KEYS_RE = "(CMS_BOT_BRANCH|CVMFS_INSTALL_IMAGE|DEBUG_EXTERNALS|SKIP_TESTS|REQUIRED_TEST|FORCE_FULL_IB|SLAVE_LABELS|SINGULARITY|IB_ONLY|BUILD_DAY|NO_IB|SCRAM_ARCH|RELEASE_QUEUE|BUILD_PATCH_RELEASE|PKGTOOLS_TAG|CMSDIST_TAG|RELEASE_BRANCH|ADDITIONAL_TESTS|PR_TESTS|DISABLED|ALWAYS_TAG_CMSSW|DO_STATIC_CHECKS|PROD_ARCH|ENABLE_DEBUG|PRS_TEST_CLANG|MESOS_QUEUE|DO_NOT_INSTALL|BUILD_HOUR|IB_WEB_PAGE|DOCKER_IMG|SPACK)" if __name__ == "__main__": - for l in open("config.map").read().split("\n"): - if not l: - continue - l = l.strip(";") - for p in l.split(";"): - assert("=" in p) - (key, value) = p.split("=") - assert(re.match(KEYS_RE, key)) + for l in open("config.map").read().split("\n"): + if not l: + continue + l = l.strip(";") + for p in l.split(";"): + assert "=" in p + (key, value) = p.split("=") + assert re.match(KEYS_RE, key) diff --git a/tests/test_logreaderUtils.py b/tests/test_logreaderUtils.py index 34e54cfe330e..ef527ffaebb1 100644 --- a/tests/test_logreaderUtils.py +++ b/tests/test_logreaderUtils.py @@ -114,12 +114,19 @@ class TestSequenceFunctions(unittest.TestCase): - def test_unittestlogs(self): config_list = [] custom_rule_set = [ - {"str_to_match": "test (.*) had ERRORS", "name": "{0}{1}{2} failed", "control_type": ResultTypeEnum.ISSUE}, - {"str_to_match": '===== Test "([^\s]+)" ====', "name": "{0}", "control_type": ResultTypeEnum.TEST} + { + "str_to_match": "test (.*) had ERRORS", + "name": "{0}{1}{2} failed", + "control_type": ResultTypeEnum.ISSUE, + }, + { + "str_to_match": '===== Test "([^\s]+)" ====', + "name": "{0}", + "control_type": ResultTypeEnum.TEST, + }, ] for index, l in enumerate(unittestlog.split("\n")): config_list = add_exception_to_config(l, index, config_list, custom_rule_set) @@ -127,5 +134,5 @@ def test_unittestlogs(self): print("Example config file in %s" % ("/tmp/unittestlogs.log-read_config")) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_watchers.py b/tests/test_watchers.py index 965a4b0fce8c..3bcb87f2927c 100755 --- a/tests/test_watchers.py +++ b/tests/test_watchers.py @@ -2,55 +2,59 @@ from __future__ import print_function import os import sys + sys.path.append(os.path.join(os.path.dirname(__file__), "../")) from releases import * from categories import * import yaml + try: from yaml import CLoader as Loader, CDumper as Dumper except ImportError: from yaml import Loader, Dumper import re + # Validate the schema of watchers. KEY_RE = "^[^@]+" VALUE_RE = "[A-Za-z0-0.*+]" w = yaml.load(open("watchers.yaml", "r"), Loader=Loader) -assert(type(w) == dict) -for (key, value) in w.items(): - assert(type(key) == str) - assert(re.match(KEY_RE, key)) - assert(type(value) == list) - for x in value: - assert(type(x) == str) - assert(re.match(VALUE_RE, x)) +assert type(w) == dict +for key, value in w.items(): + assert type(key) == str + assert re.match(KEY_RE, key) + assert type(value) == list + for x in value: + assert type(x) == str + assert re.match(VALUE_RE, x) -assert(CMSSW_CATEGORIES) -assert(type(CMSSW_CATEGORIES) == dict) +assert CMSSW_CATEGORIES +assert type(CMSSW_CATEGORIES) == dict PACKAGE_RE = "^([A-Z][0-9A-Za-z]*(/[a-zA-Z][0-9A-Za-z]*|)|.gitignore|pull_request_template.md|.clang-[^/]+)$" -for (key, value) in CMSSW_CATEGORIES.items(): - assert(type(key) == str) - assert(type(value) == list) - if len(value)==0:continue - if key == "externals": - assert(len(value)>0) - continue - for p in value: - print("checking", p) - assert(type(p) == str) - assert(re.match(PACKAGE_RE, p)) +for key, value in CMSSW_CATEGORIES.items(): + assert type(key) == str + assert type(value) == list + if len(value) == 0: + continue + if key == "externals": + assert len(value) > 0 + continue + for p in value: + print("checking", p) + assert type(p) == str + assert re.match(PACKAGE_RE, p) if os.path.exists("super-users.yaml"): - w = yaml.load(open("super-users.yaml", "r"), Loader=Loader) - assert(type(w) == list) - for p in w: - assert(type(p) == str) - assert(re.match(KEY_RE, p)) + w = yaml.load(open("super-users.yaml", "r"), Loader=Loader) + assert type(w) == list + for p in w: + assert type(p) == str + assert re.match(KEY_RE, p) print("Finished with success") diff --git a/trigger_jenkins_job.py b/trigger_jenkins_job.py index ee459abd5c94..ed32a39f84fb 100755 --- a/trigger_jenkins_job.py +++ b/trigger_jenkins_job.py @@ -3,21 +3,41 @@ from jenkins_callback import build_jobs import json + def process(opts): - xparam = [] - for param in opts.params: - p,v=param.split("=",1) - xparam.append({"name":p,"value":v}) - build_jobs(opts.server, [(json.dumps({"parameter":xparam}),opts.job)], headers={}, user=opts.user) + xparam = [] + for param in opts.params: + p, v = param.split("=", 1) + xparam.append({"name": p, "value": v}) + build_jobs( + opts.server, [(json.dumps({"parameter": xparam}), opts.job)], headers={}, user=opts.user + ) + if __name__ == "__main__": - parser = OptionParser(usage="%prog") - parser.add_option("-j", "--job", dest="job", help="Jenkins jobs to trigger", default=None) - parser.add_option("-s", "--server", dest="server", help="Jenkins server URL e.g. https://cmssdt.cern.ch/cms-jenkins", default=None) - parser.add_option("-u", "--user", dest="user", help="Jenkins user name to trigger the job", default="cmssdt") - parser.add_option('-p', '--parameter', dest='params', help="Job parameter e.g. -p Param=Value. One can use this multiple times.", - action="append", type="string", metavar="PARAMETERS") - opts, args = parser.parse_args() + parser = OptionParser(usage="%prog") + parser.add_option("-j", "--job", dest="job", help="Jenkins jobs to trigger", default=None) + parser.add_option( + "-s", + "--server", + dest="server", + help="Jenkins server URL e.g. https://cmssdt.cern.ch/cms-jenkins", + default=None, + ) + parser.add_option( + "-u", "--user", dest="user", help="Jenkins user name to trigger the job", default="cmssdt" + ) + parser.add_option( + "-p", + "--parameter", + dest="params", + help="Job parameter e.g. -p Param=Value. One can use this multiple times.", + action="append", + type="string", + metavar="PARAMETERS", + ) + opts, args = parser.parse_args() - if (not opts.job) or (not opts.server): parser.error("Missing job/server parameter.") - process(opts) + if (not opts.job) or (not opts.server): + parser.error("Missing job/server parameter.") + process(opts) diff --git a/update-github-hooks-ip b/update-github-hooks-ip deleted file mode 100755 index cd2a40ee0b55..000000000000 --- a/update-github-hooks-ip +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3 -from _py2with3compatibility import urlopen -from json import loads -from os import system -from sys import exit -ip_file="/data/sdt/github-hook-meta.txt" -cnt = 0 -with open("%s.tmp" % ip_file, "w") as ref: - for m in [i.encode().decode() for i in loads(urlopen("https://api.github.com/meta").readlines()[0])['hooks']]: - ref.write("%s\n" % m) - cnt+=1 -if cnt: - system("mv %s.tmp %s" % (ip_file,ip_file)) -else: - system("rm -f %s.tmp %s" % ip_file) - exit(1) diff --git a/update-github-hooks-ip b/update-github-hooks-ip new file mode 120000 index 000000000000..0dcea56c7b97 --- /dev/null +++ b/update-github-hooks-ip @@ -0,0 +1 @@ +update-github-hooks-ip.py \ No newline at end of file diff --git a/update-github-hooks-ip.py b/update-github-hooks-ip.py new file mode 100755 index 000000000000..27d798a115c4 --- /dev/null +++ b/update-github-hooks-ip.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +from _py2with3compatibility import urlopen +from json import loads +from os import system +from sys import exit + +ip_file = "/data/sdt/github-hook-meta.txt" +cnt = 0 +with open("%s.tmp" % ip_file, "w") as ref: + for m in [ + i.encode().decode() + for i in loads(urlopen("https://api.github.com/meta").readlines()[0])["hooks"] + ]: + ref.write("%s\n" % m) + cnt += 1 +if cnt: + system("mv %s.tmp %s" % (ip_file, ip_file)) +else: + system("rm -f %s.tmp %s" % ip_file) + exit(1) diff --git a/updateVOTags b/updateVOTags deleted file mode 100755 index 7341c11b1c99..000000000000 --- a/updateVOTags +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python -# This script can be used to sync the production releases which are -# declared as announced in the tag collector and -from __future__ import print_function -from optparse import OptionParser -from _py2with3compatibility import run_cmd -from sys import exit -# Apparently there are many ways to import json, depending on the python -# version. This should make sure you get one. -from os.path import join, dirname - -HOME_DIR = dirname(__file__) - -def getAnnouncedReleases(): - lines = open(join(HOME_DIR, "releases.map")).readlines() - releases = [] - for l in lines: - l = l.strip("\n ") - data = dict([p.split("=") for p in l.split(";") if p]) - if data["type"] != "Production": - continue - if data["state"] != "Announced": - continue - releases.append(data["label"]) - return releases - -def withGridEnv(command, **kwds): - opts = {"environment": "/afs/cern.ch/cms/LCG/LCG-2/UI/cms_ui_env.sh", - "command": command % kwds} - return run_cmd("source %(environment)s ; %(command)s" % opts) - -def availableCes(): - error, out = withGridEnv("lcg-info --vo cms --list-ce" - " --query 'Cluster=*.cern.ch'" - " | grep -E -o '[a-zA-Z0-9.-]+[.]cern[.]ch'" - " | sort -u") - if error: - return None - return out.split("\n") - -def gridReleases(ce): - error, out = withGridEnv("lcg-tags --vo cms --ce %(ce)s --list", ce=ce) - if error: - return None - return ["CMSSW_" + x.split("CMSSW_")[1] - for x in out.split("\n") - if "CMSSW_" in x] - -def announceRelease(ce, release): - error, out = withGridEnv("lcg-tags --ce %(ce)s --vo cms --add --tags VO-cms-%(release)s", - ce=ce, - release=release) - return (release, error) - -if __name__ == "__main__": - parser = OptionParser(usage="%(prog)s") - announced = getAnnouncedReleases() - - error, out = withGridEnv("voms-proxy-init -voms %(voms)s", - voms="cms:/cms/Role=lcgadmin") - if error: - parser.error("Could not get a proxy") - - ces = availableCes() - if not ces: - parser.error("Could not find any CE") - - grids = gridReleases(ces[0]) - missingReleases = [x for x in announced if x not in grids] - if not missingReleases: - print("No releases to announce") - exit(0) - - errors = [] - for ce in ces: - announced = [announceRelease(ce, x) for x in missingReleases] - errors += ["Release %s cannot be announced on %s" % (x,ce) - for (x, err) in announced if err] - ok = ["Release %s announced." % (x,ce) - for (x, err) in announced if err] - if not errors: - print("\n".join(ok)) - break - - print("\n".join(errors)) diff --git a/updateVOTags b/updateVOTags new file mode 120000 index 000000000000..96f4161b181f --- /dev/null +++ b/updateVOTags @@ -0,0 +1 @@ +updateVOTags.py \ No newline at end of file diff --git a/updateVOTags.py b/updateVOTags.py new file mode 100755 index 000000000000..c366db59914c --- /dev/null +++ b/updateVOTags.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +# This script can be used to sync the production releases which are +# declared as announced in the tag collector and +from __future__ import print_function +from optparse import OptionParser +from _py2with3compatibility import run_cmd +from sys import exit + +# Apparently there are many ways to import json, depending on the python +# version. This should make sure you get one. +from os.path import join, dirname + +HOME_DIR = dirname(__file__) + + +def getAnnouncedReleases(): + lines = open(join(HOME_DIR, "releases.map")).readlines() + releases = [] + for l in lines: + l = l.strip("\n ") + data = dict([p.split("=") for p in l.split(";") if p]) + if data["type"] != "Production": + continue + if data["state"] != "Announced": + continue + releases.append(data["label"]) + return releases + + +def withGridEnv(command, **kwds): + opts = { + "environment": "/afs/cern.ch/cms/LCG/LCG-2/UI/cms_ui_env.sh", + "command": command % kwds, + } + return run_cmd("source %(environment)s ; %(command)s" % opts) + + +def availableCes(): + error, out = withGridEnv( + "lcg-info --vo cms --list-ce" + " --query 'Cluster=*.cern.ch'" + " | grep -E -o '[a-zA-Z0-9.-]+[.]cern[.]ch'" + " | sort -u" + ) + if error: + return None + return out.split("\n") + + +def gridReleases(ce): + error, out = withGridEnv("lcg-tags --vo cms --ce %(ce)s --list", ce=ce) + if error: + return None + return ["CMSSW_" + x.split("CMSSW_")[1] for x in out.split("\n") if "CMSSW_" in x] + + +def announceRelease(ce, release): + error, out = withGridEnv( + "lcg-tags --ce %(ce)s --vo cms --add --tags VO-cms-%(release)s", ce=ce, release=release + ) + return (release, error) + + +if __name__ == "__main__": + parser = OptionParser(usage="%(prog)s") + announced = getAnnouncedReleases() + + error, out = withGridEnv("voms-proxy-init -voms %(voms)s", voms="cms:/cms/Role=lcgadmin") + if error: + parser.error("Could not get a proxy") + + ces = availableCes() + if not ces: + parser.error("Could not find any CE") + + grids = gridReleases(ces[0]) + missingReleases = [x for x in announced if x not in grids] + if not missingReleases: + print("No releases to announce") + exit(0) + + errors = [] + for ce in ces: + announced = [announceRelease(ce, x) for x in missingReleases] + errors += [ + "Release %s cannot be announced on %s" % (x, ce) for (x, err) in announced if err + ] + ok = ["Release %s announced." % (x, ce) for (x, err) in announced if err] + if not errors: + print("\n".join(ok)) + break + + print("\n".join(errors)) diff --git a/utils/cmsdist_pip_pkgs_update.py b/utils/cmsdist_pip_pkgs_update.py index c5eff938f665..6a33aa35e776 100755 --- a/utils/cmsdist_pip_pkgs_update.py +++ b/utils/cmsdist_pip_pkgs_update.py @@ -1,159 +1,232 @@ #!/usr/bin/env python from __future__ import print_function -import sys,re, json, os +import sys, re, json, os import subprocess from os.path import exists, join + def check_python_require(py_str, condition): - if not condition: return True - if 'py3' in condition: return py_str.startswith('3.') - py_version = list(map(int,py_str.split('.'))) - for cond in condition.split(","): - m = re.match('^(.*?)([0-9].*)', cond.replace(" ","")) - if m: - op = m.group(1) - regex = False - req = m.group(2).split('.') - if op=='': - op = '==' - req.append('*') - while req[-1]=="*": - req.pop() - regex = True - if regex: - req_str = '^'+'.'.join(req)+'\..+$' - if op == '==': - if not re.match(req_str, py_str): return False - elif op == '!=': - if re.match(req_str, py_str): return False - try: - req = list(map(int,req)) - except: - if "'" in req: continue - #print(py_str,"A", condition,"B",req) - #raise - if op == '>': - if py_version<=req: return False - elif op == '>=': - if py_version=req: return False - elif op == '<=': - if py_version>req: return False - return True + if not condition: + return True + if "py3" in condition: + return py_str.startswith("3.") + py_version = list(map(int, py_str.split("."))) + for cond in condition.split(","): + m = re.match("^(.*?)([0-9].*)", cond.replace(" ", "")) + if m: + op = m.group(1) + regex = False + req = m.group(2).split(".") + if op == "": + op = "==" + req.append("*") + while req[-1] == "*": + req.pop() + regex = True + if regex: + req_str = "^" + ".".join(req) + "\..+$" + if op == "==": + if not re.match(req_str, py_str): + return False + elif op == "!=": + if re.match(req_str, py_str): + return False + try: + req = list(map(int, req)) + except: + if "'" in req: + continue + # print(py_str,"A", condition,"B",req) + # raise + if op == ">": + if py_version <= req: + return False + elif op == ">=": + if py_version < req: + return False + elif op == "<": + if py_version >= req: + return False + elif op == "<=": + if py_version > req: + return False + return True + def requirements_file(cmsdist): - return join(cmsdist, "pip", "requirements.txt") + return join(cmsdist, "pip", "requirements.txt") + def read_requirements(cmsdist): - print("Reading requirements ...") - req_file = requirements_file(cmsdist) - req_data = [] - proc = subprocess.Popen('grep "^### RPM" %s/python3.spec | sed "s|^.* python3 *||"' % (cmsdist), stdout=subprocess.PIPE,shell=True, universal_newlines=True) - py3_version = proc.stdout.read().strip() - print(" Python3:", py3_version) - if exists(req_file): - with open(req_file) as ref: - for line in ref.readlines(): - req_data.append({'line': line.strip(), 'data': {}}) - line=line.strip().replace(" ","") - if line.startswith("#"): continue - if "==" in line: - p,v = line.split("==",1) - req_data[-1]['data']['name'] = p - req_data[-1]['data']['pip_name'] = p - req_data[-1]['data']['version'] = v - req_data[-1]['data']['python']=py3_version - exfile = join(cmsdist, "pip",p+".file") - if exists (exfile): - with open(exfile) as xref: - for xline in xref.readlines(): - m = re.match("^%define\s+pip_name\s+([^\s]+)\s*$",xline.strip()) - if m: - req_data[-1]['data']['pip_name'] = m.group(1) - break - return req_data + print("Reading requirements ...") + req_file = requirements_file(cmsdist) + req_data = [] + proc = subprocess.Popen( + 'grep "^### RPM" %s/python3.spec | sed "s|^.* python3 *||"' % (cmsdist), + stdout=subprocess.PIPE, + shell=True, + universal_newlines=True, + ) + py3_version = proc.stdout.read().strip() + print(" Python3:", py3_version) + if exists(req_file): + with open(req_file) as ref: + for line in ref.readlines(): + req_data.append({"line": line.strip(), "data": {}}) + line = line.strip().replace(" ", "") + if line.startswith("#"): + continue + if "==" in line: + p, v = line.split("==", 1) + req_data[-1]["data"]["name"] = p + req_data[-1]["data"]["pip_name"] = p + req_data[-1]["data"]["version"] = v + req_data[-1]["data"]["python"] = py3_version + exfile = join(cmsdist, "pip", p + ".file") + if exists(exfile): + with open(exfile) as xref: + for xline in xref.readlines(): + m = re.match("^%define\s+pip_name\s+([^\s]+)\s*$", xline.strip()) + if m: + req_data[-1]["data"]["pip_name"] = m.group(1) + break + return req_data + def check_updates(req_data): - from datetime import datetime - epoch = datetime.utcfromtimestamp(0) - ignore_line = [] - ignored = [] - ignore_count = 0 - if not exists('cache'): os.system("mkdir -p cache") - print("Checking for updates ...") - for data in req_data: - xline = data['line'].replace(" ","") - if xline=="": continue - if xline.startswith('#'): - m = re.match('#NO_AUTO_UPDATE:((\d+):|).*', xline) - if m: - try: ignore_count = int(m.group(2)) - except: ignore_count = 1 - ignore_line = [data['line']] - elif ignore_count: ignore_line.append(" "+data['line']) - continue - p = data['data']['name'] - op = data['data']['pip_name'] - ov = data['data']['version'] - if exists("cache/%s.json" % p): - jdata = json.load(open("cache/%s.json" % p)) - else: - o = subprocess.Popen('curl -s -k -L https://pypi.python.org/pypi/%s/json' %(op), stdout=subprocess.PIPE,shell=True, universal_newlines=True) - jdata = json.loads(o.stdout.read()) - json.dump(jdata, open("cache/%s.json" % p, 'w'), sort_keys=True, indent=2) - if True: - v = jdata['info']['version'] - if ignore_count: - ignore_count-=1 - if ov!=v: - ignored.append("*** WARNING: %s: Newer version %s found (existing: %s) but not updating due to following comment in requitements.txt." % (p, v, ov)) - if ignore_line: ignored.append(" %s" % ("\n".join(ignore_line))) - ignore_line= [] - continue - if 'python' in data['data']: - py_ver = data['data']['python'] - #FIXME: Ignore python version check - if False and not check_python_require(py_ver, jdata['info']['requires_python']): - releases = [] - msg = [] - for i in jdata['releases']: - for d in jdata['releases'][i]: - if d['python_version']!='source': continue - if not check_python_require(py_ver, d['requires_python']): - msg.append(" INFO: %s: Ignoring version %s due to python requirement: %s%s" % (p,i,py_ver,d['requires_python'])) + from datetime import datetime + + epoch = datetime.utcfromtimestamp(0) + ignore_line = [] + ignored = [] + ignore_count = 0 + if not exists("cache"): + os.system("mkdir -p cache") + print("Checking for updates ...") + for data in req_data: + xline = data["line"].replace(" ", "") + if xline == "": + continue + if xline.startswith("#"): + m = re.match("#NO_AUTO_UPDATE:((\d+):|).*", xline) + if m: + try: + ignore_count = int(m.group(2)) + except: + ignore_count = 1 + ignore_line = [data["line"]] + elif ignore_count: + ignore_line.append(" " + data["line"]) + continue + p = data["data"]["name"] + op = data["data"]["pip_name"] + ov = data["data"]["version"] + if exists("cache/%s.json" % p): + jdata = json.load(open("cache/%s.json" % p)) + else: + o = subprocess.Popen( + "curl -s -k -L https://pypi.python.org/pypi/%s/json" % (op), + stdout=subprocess.PIPE, + shell=True, + universal_newlines=True, + ) + jdata = json.loads(o.stdout.read()) + json.dump(jdata, open("cache/%s.json" % p, "w"), sort_keys=True, indent=2) + if True: + v = jdata["info"]["version"] + if ignore_count: + ignore_count -= 1 + if ov != v: + ignored.append( + "*** WARNING: %s: Newer version %s found (existing: %s) but not updating due to following comment in requitements.txt." + % (p, v, ov) + ) + if ignore_line: + ignored.append(" %s" % ("\n".join(ignore_line))) + ignore_line = [] + continue + if "python" in data["data"]: + py_ver = data["data"]["python"] + # FIXME: Ignore python version check + if False and not check_python_require(py_ver, jdata["info"]["requires_python"]): + releases = [] + msg = [] + for i in jdata["releases"]: + for d in jdata["releases"][i]: + if d["python_version"] != "source": + continue + if not check_python_require(py_ver, d["requires_python"]): + msg.append( + " INFO: %s: Ignoring version %s due to python requirement: %s%s" + % (p, i, py_ver, d["requires_python"]) + ) + continue + uptime = ( + datetime.strptime(d["upload_time"], "%Y-%m-%dT%H:%M:%S") - epoch + ).total_seconds() + releases.append( + { + "version": i, + "upload": uptime, + "requires_python": d["requires_python"], + } + ) + msg.append( + " INFO: %s: Matched version %s due to python requirement: %s %s" + % (p, i, py_ver, d["requires_python"]) + ) + newlist = sorted(releases, key=lambda k: k["upload"]) + if newlist: + v = newlist[-1]["version"] + if ov != v: + for m in msg: + print(m) + if ov == v: continue - uptime = (datetime.strptime(d['upload_time'],'%Y-%m-%dT%H:%M:%S')-epoch).total_seconds() - releases.append({'version':i, 'upload': uptime, 'requires_python': d['requires_python']}) - msg.append(" INFO: %s: Matched version %s due to python requirement: %s %s" % (p,i,py_ver,d['requires_python'])) - newlist = sorted(releases, key=lambda k: k['upload']) - if newlist: v = newlist[-1]['version'] - if ov != v: - for m in msg: print(m) - if ov==v: continue - m = re.match('^\s*%s\s*==\s*%s(\s*;.+|)$' % (p,ov),data['line']) - try: - data['line'] = '%s==%s%s' % (p,v,m.group(1)) - print("NEW:",p,ov,v) - except: - print("Wrong data:",p,ov,v) - for i in ignored: print (i) + m = re.match("^\s*%s\s*==\s*%s(\s*;.+|)$" % (p, ov), data["line"]) + try: + data["line"] = "%s==%s%s" % (p, v, m.group(1)) + print("NEW:", p, ov, v) + except: + print("Wrong data:", p, ov, v) + for i in ignored: + print(i) + def rewrite_requiremets(red_data, cmsdist): - req_file = requirements_file(cmsdist) - with open(req_file, "w") as ref: - for d in req_data: - ref.write(d['line']+"\n") + req_file = requirements_file(cmsdist) + with open(req_file, "w") as ref: + for d in req_data: + ref.write(d["line"] + "\n") + if __name__ == "__main__": - from optparse import OptionParser - parser = OptionParser(usage="%prog") - parser.add_option("-C", "--clean-cache",dest="clean_cache",action="store_true", help="Cleanup cache directory and re-check PyPI for updates.", default=False) - parser.add_option("-u", "--update", dest="update", action="store_true", help="Update requirements.txt", default=False) - parser.add_option("-c", "--cmsdist", dest="cmsdist", help="cmsdist directory", type=str, default="cmsdist") - opts, args = parser.parse_args() - if opts.clean_cache: os.system("rm -rf cache") - req_data = read_requirements(opts.cmsdist) - check_updates(req_data) - if opts.update: rewrite_requiremets(req_data, opts.cmsdist) + from optparse import OptionParser + parser = OptionParser(usage="%prog") + parser.add_option( + "-C", + "--clean-cache", + dest="clean_cache", + action="store_true", + help="Cleanup cache directory and re-check PyPI for updates.", + default=False, + ) + parser.add_option( + "-u", + "--update", + dest="update", + action="store_true", + help="Update requirements.txt", + default=False, + ) + parser.add_option( + "-c", "--cmsdist", dest="cmsdist", help="cmsdist directory", type=str, default="cmsdist" + ) + opts, args = parser.parse_args() + if opts.clean_cache: + os.system("rm -rf cache") + req_data = read_requirements(opts.cmsdist) + check_updates(req_data) + if opts.update: + rewrite_requiremets(req_data, opts.cmsdist) diff --git a/watchers.yaml b/watchers.yaml index e781d599bb38..5789bdef12ff 100644 --- a/watchers.yaml +++ b/watchers.yaml @@ -1698,6 +1698,7 @@ watson-ij: - Validation/MuonGEMHits - Validation/MuonGEMRecHits - DQM/GEM +- Geometry/MuonCommonData trk-dqm: - DQM/SiOuterTracker - DQM/SiPixelCommon