")
@@ -245,25 +294,36 @@ def makeHTMLSummaryPage(self):
pkgList = sorted(self.errMap[key], key=lambda x: x.name())
for pkg in pkgList:
- if not pkg.name() in self.tagList: continue
- styleClass = 'ok'
- for cKey in self.errorKeys :
- if styleClass == 'ok' and cKey in pkg.errSummary.keys(): styleClass = self.styleClass[cKey]
- htmlFile.write('
')
- htmlFile.write('
')
- htmlFile.write('
')
- link = ' '+pkg.name()+' '+self.tagList[pkg.name()]+' '
+ if not pkg.name() in self.tagList:
+ continue
+ styleClass = "ok"
+ for cKey in self.errorKeys:
+ if styleClass == "ok" and cKey in pkg.errSummary.keys():
+ styleClass = self.styleClass[cKey]
+ htmlFile.write("
\n")
lineNo = -1
for line in logFile.readlines():
lineNo += 1
# HTML sanitisation:
- newLine = line.replace('&','&') # do this first to not escape it again in the next subs
- newLine = newLine.replace('<','<').replace('>','>')
+ newLine = line.replace(
+ "&", "&"
+ ) # do this first to not escape it again in the next subs
+ newLine = newLine.replace("<", "<").replace(">", ">")
if lineNo in pkg.errLines.keys():
- newLine = ' '+newLine+' '
- if sys.version_info[0]<3:
- htmlFile.write(newLine.decode('ascii','ignore'))
+ newLine = (
+ " "
+ + newLine
+ + " "
+ )
+ if sys.version_info[0] < 3:
+ htmlFile.write(newLine.decode("ascii", "ignore"))
else:
htmlFile.write(newLine)
htmlFile.write("
\n"
)
html_file.writelines(" \n")
diff --git a/jenkins/parser/jenkins-retry-job.py b/jenkins/parser/jenkins-retry-job.py
index 5d39d9acf818..51e4811ddb33 100755
--- a/jenkins/parser/jenkins-retry-job.py
+++ b/jenkins/parser/jenkins-retry-job.py
@@ -44,7 +44,7 @@
def findParametersAction(root):
- """ It finds Jenkins parameters under section ParametersAction in xml file."""
+ """It finds Jenkins parameters under section ParametersAction in xml file."""
if root.tag == "parameters":
return root
for x in root:
@@ -55,7 +55,7 @@ def findParametersAction(root):
def getParameters(root, payload):
- """ Append Jenkins parameters of the form parameter=value (n.text=v.text) elements to a list."""
+ """Append Jenkins parameters of the form parameter=value (n.text=v.text) elements to a list."""
n = root.find("name")
if n is not None:
if n.text is None:
@@ -123,20 +123,14 @@ def getParameters(root, payload):
# Update static webpage
-tracker_path = (
- os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/parser-web-info.html"
-)
+tracker_path = os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/parser-web-info.html"
job_url = os.environ.get("JENKINS_URL") + "job/" + job_to_retry + "/" + build_to_retry
-retry_url = (
- os.environ.get("JENKINS_URL") + "job/jenkins-test-retry/" + current_build_number
-)
+retry_url = os.environ.get("JENKINS_URL") + "job/jenkins-test-retry/" + current_build_number
retry_url_file_path = (
os.environ.get("HOME") + "/builds/jenkins-test-parser-monitor/json-retry-info.json"
)
-actions.update_retry_link_cmssdt_page(
- retry_url_file_path, job_to_retry, build_to_retry, retry_url
-)
+actions.update_retry_link_cmssdt_page(retry_url_file_path, job_to_retry, build_to_retry, retry_url)
# Format retry label depending on parser action
times = "time" if retry_counter_update == 1 else "times"
diff --git a/jenkins/parser/paser-config-unittest.py b/jenkins/parser/paser-config-unittest.py
index 5acdb1231723..117e84a9f975 100644
--- a/jenkins/parser/paser-config-unittest.py
+++ b/jenkins/parser/paser-config-unittest.py
@@ -20,9 +20,7 @@
_, output = getstatusoutput(
'curl -s https://raw.githubusercontent.com/cms-sw/cmssdt-wiki/master/jenkins_reports/All.md | grep "## \[.*\](.*"'
)
-valid_job_names = [
- re.sub("\]\(.*", "", item.replace("## [", "")) for item in output.split("\n")
-]
+valid_job_names = [re.sub("\]\(.*", "", item.replace("## [", "")) for item in output.split("\n")]
# Check that valid_job_names contains all elements of job_names
assert all(
item in valid_job_names for item in job_names
@@ -48,9 +46,7 @@
"nodeOff",
"nodeReconnect",
] # TODO: Find a better way to get all valid actions
-defined_actions = [
- error_msg[error_category]["action"] for error_category in error_msg.keys()
-]
+defined_actions = [error_msg[error_category]["action"] for error_category in error_msg.keys()]
# Check that valid_actions contains all defined actions
assert all(
item in valid_actions for item in defined_actions
diff --git a/jenkins/report-jenkins-jobs.py b/jenkins/report-jenkins-jobs.py
index b793c2fa6a17..288437f39e1c 100755
--- a/jenkins/report-jenkins-jobs.py
+++ b/jenkins/report-jenkins-jobs.py
@@ -1,82 +1,119 @@
#!/usr/bin/env python3
from __future__ import print_function
+
print("")
-print('')
+print("")
print('')
print('')
-print('')
-print('')
-print('')
+print(
+ ''
+)
+print("")
+print("")
print('
')
print('
')
print('
')
print('
CMS Jenkins Projects
')
-print('
This page displays a summary of all CMS Jenkins projects , their sub projects , upstream projects and downstream projects. To see the deatil and confgiuration of a project in Jenkins , click on project name.
')
-print('
')
-print('
')
-print('
')
+print(
+ "
This page displays a summary of all CMS Jenkins projects , their sub projects , upstream projects and downstream projects. To see the deatil and confgiuration of a project in Jenkins , click on project name.
"
+)
+print(" ")
+print(" ")
+print(" ")
from collections import defaultdict
+
parents = defaultdict(list)
import json
import time
try:
- fd = open('/tmp/report_gen.txt')
- txt = fd.read()
+ fd = open("/tmp/report_gen.txt")
+ txt = fd.read()
except Exception as e:
- print("Error reading the file")
+ print("Error reading the file")
data_uns = json.loads(txt)
-data = sorted(list(data_uns.items()),key=lambda s: s[0].lower())
+data = sorted(list(data_uns.items()), key=lambda s: s[0].lower())
for item in data:
- name = item[1]['job_name']
- if name.startswith('DMWM'):
- continue
- print('
")
- if len(item[1]['downstream']) > 0:
- d = [ x for x in item[1]['downstream'] ]
- for chd in d:
- parents[chd].append(name)
- print('
')
- print("DownStream Projects: ", ' '.join([ '' + x + '' for x in d ]) , " ")
- print('
')
- if len(item[1]['subprojects']) > 0:
- sub = [ x for x in item[1]['subprojects'] ]
- print('
')
- print("Sub Projects: ", ' '.join([ '' + x + '' for x in sub ]) , " ")
- print('
')
- for child in sub:
- parents[child].append(name)
-
- if len(item[1]['triggers_from']) > 0:
- trg = [ x for x in item[1]['triggers_from']]
- item[1]['upstream'].extend(trg)
- for ent in parents:
- if ent == name:
- item[1]['upstream'].extend(parents[name])
+ name = item[1]["job_name"]
+ if name.startswith("DMWM"):
+ continue
+ print('
")
+ if len(item[1]["downstream"]) > 0:
+ d = [x for x in item[1]["downstream"]]
+ for chd in d:
+ parents[chd].append(name)
+ print("
")
+ print(
+ "DownStream Projects: ",
+ " ".join(["" + x + "" for x in d]),
+ " ",
+ )
+ print("
")
+ if len(item[1]["subprojects"]) > 0:
+ sub = [x for x in item[1]["subprojects"]]
+ print("
")
+ print(
+ "Sub Projects: ",
+ " ".join(["" + x + "" for x in sub]),
+ " ",
+ )
+ print("
")
+ for child in sub:
+ parents[child].append(name)
+
+ if len(item[1]["triggers_from"]) > 0:
+ trg = [x for x in item[1]["triggers_from"]]
+ item[1]["upstream"].extend(trg)
+ for ent in parents:
+ if ent == name:
+ item[1]["upstream"].extend(parents[name])
- if len(item[1]['upstream']) > 0:
- item[1]['upstream'] = set(item[1]['upstream'])
- up = [ x for x in item[1]['upstream']]
- print('
')
- print("UpStream Projects: ", ' '.join([ '' + x + '' for x in up ]) , " ")
- print('
')
- print('
')
- print('
')
+ if len(item[1]["upstream"]) > 0:
+ item[1]["upstream"] = set(item[1]["upstream"])
+ up = [x for x in item[1]["upstream"]]
+ print("
")
+ print(
+ "UpStream Projects: ",
+ " ".join(["" + x + "" for x in up]),
+ " ",
+ )
+ print("
\n"
# regex
-regex_dashes = '^(-|=)*$'
-regex_td = '^[ ]*[\d *]+.*\..+$'
+regex_dashes = "^(-|=)*$"
+regex_td = "^[ ]*[\d *]+.*\..+$"
# regex_th = '^[^\d\W]+$'
-regex_th = '.*(NLOC)'
-regex_th_total = '^Total nloc'
-regex_H1_warnings = ' *^!+.*!+ *$'
-regex_H1_no_warnings = '^No thresholds exceeded \('
-regex_H1_files = '^\d+ file analyzed'
+regex_th = ".*(NLOC)"
+regex_th_total = "^Total nloc"
+regex_H1_warnings = " *^!+.*!+ *$"
+regex_H1_no_warnings = "^No thresholds exceeded \("
+regex_H1_files = "^\d+ file analyzed"
regex_split = "[ ]{2,}|[ ]*$]"
regex_split_td = "[ ]{1,}|[ ]*$]"
regex_line_to_url = "[a-zA-Z]"
@@ -84,16 +84,14 @@ def get_args():
"""
# Assign description to the help doc
- parser = argparse.ArgumentParser(
- description='Script converts lizard .txt output to .html')
+ parser = argparse.ArgumentParser(description="Script converts lizard .txt output to .html")
# Add arguments
+ parser.add_argument("-s", "--source", type=str, help="Source file", required=True)
+ parser.add_argument("-d", "--dir", type=str, help="Local output directory", required=True)
parser.add_argument(
- '-s', '--source', type=str, help='Source file', required=True)
- parser.add_argument(
- '-d', '--dir', type=str, help='Local output directory', required=True)
- parser.add_argument(
- '-l', '--link_root', type=str, help="Project's repository at Github", required=True)
+ "-l", "--link_root", type=str, help="Project's repository at Github", required=True
+ )
# Array for all arguments passed to script
args = parser.parse_args()
@@ -113,13 +111,11 @@ def text_with_href(url_base, line):
line_numbers_group = re.search(regex_has_line_numbers, line)
if bool(line_numbers_group):
lines_string = line_numbers_group.group(0)
- lines_string = lines_string.replace('@', '')
- lines = lines_string.split('-')
+ lines_string = lines_string.replace("@", "")
+ lines = lines_string.split("-")
line_split = re.split(regex_has_line_numbers, line)
- url = url_base + line_split[1] \
- + "#" \
- + "L{0}-L{1}".format(lines[0], lines[1])
+ url = url_base + line_split[1] + "#" + "L{0}-L{1}".format(lines[0], lines[1])
return a_href.format(url=url, text=line)
else:
url = url_base + line
@@ -134,11 +130,12 @@ def parse(f_out, line):
if bool(re.search(regex_dashes, line)):
return False
- elif bool(re.search(regex_H1_warnings, line)
- or (re.search(regex_H1_no_warnings, line))
- or (re.search(regex_th_total, line))
- or re.search(regex_H1_files, line)
- ):
+ elif bool(
+ re.search(regex_H1_warnings, line)
+ or (re.search(regex_H1_no_warnings, line))
+ or (re.search(regex_th_total, line))
+ or re.search(regex_H1_files, line)
+ ):
return True
elif bool(re.search(regex_th, line)):
@@ -150,9 +147,7 @@ def parse(f_out, line):
row_dataset = []
for td_val in table_row_values[:-1]:
row_dataset.append(td_val)
- row_dataset.append(
- text_with_href(g_link_root, table_row_values[-1])
- )
+ row_dataset.append(text_with_href(g_link_root, table_row_values[-1]))
g_table_data.append(row_dataset)
return False
@@ -162,12 +157,10 @@ def parse(f_out, line):
def write_table_th(f_out, line):
global g_total_col_nr
table_header_values = re.split(regex_split, line.strip())
- generated_row = ''
+ generated_row = ""
for th_val in table_header_values:
generated_row += th.format(th_val)
- f_out.write(
- '' + tr.format(generated_row) + '\n'
- )
+ f_out.write("" + tr.format(generated_row) + "\n")
g_total_col_nr = len(table_header_values) - 1
@@ -176,13 +169,12 @@ def main(source_f_path, output_d, link_root):
global g_link_root, g_table_data
g_link_root = link_root
- with open(source_f_path, 'r') as source_f:
-
+ with open(source_f_path, "r") as source_f:
do_split = False
# --- { all_functions.html }
- html_0 = open(os.path.join(output_d, 'all_functions.html'), 'w')
- html_0.write(html_start.format(title='Statistics of all functions'))
+ html_0 = open(os.path.join(output_d, "all_functions.html"), "w")
+ html_0.write(html_start.format(title="Statistics of all functions"))
html_0.write(table_start)
while do_split is False:
@@ -191,15 +183,15 @@ def main(source_f_path, output_d, link_root):
if not line:
break
html_0.write(table_end)
- html_0.write(html_end.format(data=g_table_data, comment_out_scrollX=''))
+ html_0.write(html_end.format(data=g_table_data, comment_out_scrollX=""))
html_0.close()
g_table_data = []
# --- {END all_functions.html }
# --- { file_statistics.html }
- html_0 = open(os.path.join(output_d, 'file_statistics.html'), 'w')
- html_0.write(html_start.format(title='Files statistics'))
- html_0.write(h2.format(line, klass=''))
+ html_0 = open(os.path.join(output_d, "file_statistics.html"), "w")
+ html_0.write(html_start.format(title="Files statistics"))
+ html_0.write(h2.format(line, klass=""))
html_0.write(table_start)
do_split = False
while do_split is False:
@@ -208,16 +200,16 @@ def main(source_f_path, output_d, link_root):
if not line:
break
html_0.write(table_end)
- html_0.write(html_end.format(data=g_table_data, comment_out_scrollX=''))
+ html_0.write(html_end.format(data=g_table_data, comment_out_scrollX=""))
html_0.close()
g_table_data = []
# --- {END file_statistics.html }
# --- { warnings.html }
- html_0 = open(os.path.join(output_d, 'warnings.html'), 'w')
- html_0.write(html_start.format(title='Warnings'))
+ html_0 = open(os.path.join(output_d, "warnings.html"), "w")
+ html_0.write(html_start.format(title="Warnings"))
- h1_class = ''
+ h1_class = ""
if bool(re.search(regex_H1_warnings, line)):
h1_class = 'class="alert alert-danger"'
@@ -232,14 +224,14 @@ def main(source_f_path, output_d, link_root):
break
html_0.write(table_end)
- html_0.write(html_end.format(data=g_table_data, comment_out_scrollX=''))
+ html_0.write(html_end.format(data=g_table_data, comment_out_scrollX=""))
html_0.close()
g_table_data = []
# --- {END warnings.html }
# --- { total.html }
- html_0 = open(os.path.join(output_d, 'total.html'), 'w')
- html_0.write(html_start.format(title='Total scan statistics'))
+ html_0 = open(os.path.join(output_d, "total.html"), "w")
+ html_0.write(html_start.format(title="Total scan statistics"))
html_0.write(table_start)
write_table_th(html_0, line)
do_split = False
@@ -249,11 +241,11 @@ def main(source_f_path, output_d, link_root):
if not line:
break
html_0.write(table_end)
- html_0.write(html_end.format(data=g_table_data, comment_out_scrollX='//'))
+ html_0.write(html_end.format(data=g_table_data, comment_out_scrollX="//"))
html_0.close()
g_table_data = []
# --- {END total.html }
-if __name__ == '__main__':
+if __name__ == "__main__":
main(*get_args())
diff --git a/lizard-processing/test/test_lizard_to_html.py b/lizard-processing/test/test_lizard_to_html.py
index 0122d0f54d39..1901c1db7806 100755
--- a/lizard-processing/test/test_lizard_to_html.py
+++ b/lizard-processing/test/test_lizard_to_html.py
@@ -9,21 +9,30 @@
lines_th = [
"NLOC CCN token PARAM length location ",
"NLOC Avg.NLOC AvgCCN Avg.token function_cnt file",
- "Total nloc Avg.NLOC AvgCCN Avg.token Fun Cnt Warning cnt Fun Rt nloc Rt"
+ "Total nloc Avg.NLOC AvgCCN Avg.token Fun Cnt Warning cnt Fun Rt nloc Rt",
]
-line_td = "6 3 28 0 6 AlignableDetOrUnitPtr::operator Alignable " \
- "*@20-25@cms-sw-cmssw-630acaf/Alignment/CommonAlignment/src/AlignableDetOrUnitPtr.cc "
+line_td = (
+ "6 3 28 0 6 AlignableDetOrUnitPtr::operator Alignable "
+ "*@20-25@cms-sw-cmssw-630acaf/Alignment/CommonAlignment/src/AlignableDetOrUnitPtr.cc "
+)
-line_warning = '!!!! Warnings (cyclomatic_complexity > 5 or length > 1000 or parameter_count > 100) !!!!'
-line_no_warning = 'No thresholds exceeded (cyclomatic_complexity > 15 or length > 1000 or parameter_count > 100)'
-line_files = '21 file analyzed.'
+line_warning = (
+ "!!!! Warnings (cyclomatic_complexity > 5 or length > 1000 or parameter_count > 100) !!!!"
+)
+line_no_warning = (
+ "No thresholds exceeded (cyclomatic_complexity > 15 or length > 1000 or parameter_count > 100)"
+)
+line_files = "21 file analyzed."
class TestSequenceFunctions(unittest.TestCase):
def test_main(self):
- main(os.path.join(os.path.dirname(__file__), "../", './test-data/lizard-test-output.txt'), '/tmp',
- 'https://github.com/cms-sw/cmssw/blob/master/')
+ main(
+ os.path.join(os.path.dirname(__file__), "../", "./test-data/lizard-test-output.txt"),
+ "/tmp",
+ "https://github.com/cms-sw/cmssw/blob/master/",
+ )
def test_reg_th(self):
for line in lines_th:
@@ -59,5 +68,6 @@ def test_split_1(self):
self.assertEqual(len(re.split(regex_split, lines_th[1].strip())), 6)
self.assertEqual(len(re.split(regex_split, lines_th[2].strip())), 8)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
unittest.main()
diff --git a/logRootQA.py b/logRootQA.py
index a3c105d54225..e1df780716ce 100644
--- a/logRootQA.py
+++ b/logRootQA.py
@@ -9,91 +9,109 @@
import subprocess as sub
Log_Lines_Filter = [
- ('This TensorFlow binary is optimized with'),
- ('[PostMaster', '[Error'),
- ('Initiating request to open file', 'root://'),
- ('Successfully opened file', 'root://'),
- ('Closed file', 'root://')
+ ("This TensorFlow binary is optimized with"),
+ ("[PostMaster", "[Error"),
+ ("Initiating request to open file", "root://"),
+ ("Successfully opened file", "root://"),
+ ("Closed file", "root://"),
]
+
def openfile(filename):
- if sys.version_info[0] == 2:
- return open(filename)
- return open(filename, encoding="utf8", errors='ignore')
+ if sys.version_info[0] == 2:
+ return open(filename)
+ return open(filename, encoding="utf8", errors="ignore")
+
+
+def getFiles(d, pattern):
+ return [
+ os.path.join(dp, f)
+ for dp, dn, filenames in os.walk(d)
+ for f in filenames
+ if fnmatch(f, pattern)
+ ]
+
-def getFiles(d,pattern):
- return [os.path.join(dp, f) for dp, dn, filenames in os.walk(d) for f in filenames if fnmatch(f, pattern)]
# return [ f for f in listdir(d) if isfile(join(d,f)) ]
-def getCommonFiles(d1,d2,pattern):
- l1=getFiles(d1,pattern)
- l2=getFiles(d2,pattern)
- common=[]
+
+def getCommonFiles(d1, d2, pattern):
+ l1 = getFiles(d1, pattern)
+ l2 = getFiles(d2, pattern)
+ common = []
for l in l1:
- lT=l[len(d1):]
- if 'runall' in lT or 'dasquery' in lT: continue
- if d2+lT in l2:
+ lT = l[len(d1) :]
+ if "runall" in lT or "dasquery" in lT:
+ continue
+ if d2 + lT in l2:
common.append(lT)
return common
+
def getWorkflow(f):
m = re.search("/\d+\.\d+_", f)
- if not m: return "(none)"
- return m.group().replace("/","").replace("_", "")
+ if not m:
+ return "(none)"
+ return m.group().replace("/", "").replace("_", "")
-def checkLines(l1,l2):
+def checkLines(l1, l2):
filt1 = filteredLines(l1)
filt2 = filteredLines(l2)
lines = len(filt2) - len(filt1)
- if lines>0:
- print("You added "+str(lines)+" to "+l2)
- if lines<0:
- print("You removed "+str(-1*lines)+" from "+l2)
-
+ if lines > 0:
+ print("You added " + str(lines) + " to " + l2)
+ if lines < 0:
+ print("You removed " + str(-1 * lines) + " from " + l2)
+
return (lines, filt1, filt2)
+
def filteredLines(f):
- retval={}
+ retval = {}
for l in openfile(f):
- sl=l.strip()
- skip=False
+ sl = l.strip()
+ skip = False
for data in Log_Lines_Filter:
- skip = True
- for s in data:
- if not s in sl:
- skip = False
- break
- if not skip: continue
- break
- if skip: continue
- if 'P Y T H H III A A' in l:continue
+ skip = True
+ for s in data:
+ if not s in sl:
+ skip = False
+ break
+ if not skip:
+ continue
+ break
+ if skip:
+ continue
+ if "P Y T H H III A A" in l:
+ continue
# look for and remove timestamps
- if '-' in l and ':' in l:
- sp=l.strip().split()
-
- ds=[]
- for i in range(0,len(sp)-1):
- if sp[i].count('-')==2 and sp[i+1].count(':')==2 and '-20' in sp[i]:
- ds.append(sp[i]) #its a date
- ds.append(sp[i+1]) #its a date
- if len(ds)!=0:
- sp2=l.strip().split(' ')
- sp3=[]
- for i in range(0,len(sp2)):
+ if "-" in l and ":" in l:
+ sp = l.strip().split()
+
+ ds = []
+ for i in range(0, len(sp) - 1):
+ if sp[i].count("-") == 2 and sp[i + 1].count(":") == 2 and "-20" in sp[i]:
+ ds.append(sp[i]) # its a date
+ ds.append(sp[i + 1]) # its a date
+ if len(ds) != 0:
+ sp2 = l.strip().split(" ")
+ sp3 = []
+ for i in range(0, len(sp2)):
if sp2[i] not in ds:
sp3.append(sp2[i])
- sl=' '.join(sp3)
- retval[sl]=1
+ sl = " ".join(sp3)
+ retval[sl] = 1
return retval
-def getRelevantDiff(filt1, filt2, l1, l2 ,maxInFile=20):
- nPrintTot=0
- keys1=filt1.keys()
- keys2=filt2.keys()
- newIn1=[]
- newIn2=[]
+def getRelevantDiff(filt1, filt2, l1, l2, maxInFile=20):
+ nPrintTot = 0
+
+ keys1 = filt1.keys()
+ keys2 = filt2.keys()
+ newIn1 = []
+ newIn2 = []
for k in keys1:
if k not in filt2:
newIn1.append(k)
@@ -101,95 +119,116 @@ def getRelevantDiff(filt1, filt2, l1, l2 ,maxInFile=20):
if k not in filt1:
newIn2.append(k)
- if len(newIn1)>0 or len(newIn2)>0:
- print('')
- print(len(newIn1),'Lines only in',l1)
- nPrint=0
- for l in newIn1:
- nPrint=nPrint+1
- if nPrint>maxInFile: break
- print(' ',l)
- nPrintTot=nPrint
- print(len(newIn2),'Lines only in',l2)
- nPrint=0
- for l in newIn2:
- nPrint=nPrint+1
- if nPrint>maxInFile: break
- print(' ',l)
- nPrintTot=nPrintTot+nPrint
+ if len(newIn1) > 0 or len(newIn2) > 0:
+ print("")
+ print(len(newIn1), "Lines only in", l1)
+ nPrint = 0
+ for l in newIn1:
+ nPrint = nPrint + 1
+ if nPrint > maxInFile:
+ break
+ print(" ", l)
+ nPrintTot = nPrint
+ print(len(newIn2), "Lines only in", l2)
+ nPrint = 0
+ for l in newIn2:
+ nPrint = nPrint + 1
+ if nPrint > maxInFile:
+ break
+ print(" ", l)
+ nPrintTot = nPrintTot + nPrint
return nPrintTot
-
def runCommand(c):
- p=sub.Popen(c,stdout=sub.PIPE,stderr=sub.PIPE,universal_newlines=True)
- output=p.communicate()
+ p = sub.Popen(c, stdout=sub.PIPE, stderr=sub.PIPE, universal_newlines=True)
+ output = p.communicate()
return output
-def checkEventContent(r1,r2):
- retVal=True
-
- output1=runCommand(['ls','-l',r1])
- output2=runCommand(['ls','-l',r2])
- s1=output1[0].split()[4]
- s2=output2[0].split()[4]
- if abs(float(s2)-float(s1))>0.1*float(s1):
- print("Big output file size change? in ",r1,s1,s2)
- retVal=False
-
- cmd1 = ['edmEventSize','-v',r1]
- cmd2 = ['edmEventSize','-v',r2]
- if os.path.exists(r1+'.edmEventSize'):
- cmd1 = ['cat',r1+'.edmEventSize']
- if os.path.exists(r2+'.edmEventSize'):
- cmd2 = ['cat',r2+'.edmEventSize']
- output1=runCommand(cmd1)
- output2=runCommand(cmd2)
-
- if 'contains no' in output1[1] and 'contains no' in output2[1]:
- w=1
+
+def checkEventContent(r1, r2):
+ retVal = True
+
+ output1 = runCommand(["ls", "-l", r1])
+ output2 = runCommand(["ls", "-l", r2])
+ s1 = output1[0].split()[4]
+ s2 = output2[0].split()[4]
+ if abs(float(s2) - float(s1)) > 0.1 * float(s1):
+ print("Big output file size change? in ", r1, s1, s2)
+ retVal = False
+
+ cmd1 = ["edmEventSize", "-v", r1]
+ cmd2 = ["edmEventSize", "-v", r2]
+ if os.path.exists(r1 + ".edmEventSize"):
+ cmd1 = ["cat", r1 + ".edmEventSize"]
+ if os.path.exists(r2 + ".edmEventSize"):
+ cmd2 = ["cat", r2 + ".edmEventSize"]
+ output1 = runCommand(cmd1)
+ output2 = runCommand(cmd2)
+
+ if "contains no" in output1[1] and "contains no" in output2[1]:
+ w = 1
else:
- sp=output1[0].split('\n')
- p1=[]
+ sp = output1[0].split("\n")
+ p1 = []
for p in sp:
- if len(p.split())>0:
+ if len(p.split()) > 0:
p1.append(p.split()[0])
- sp=output2[0].split('\n')
- p2=[]
+ sp = output2[0].split("\n")
+ p2 = []
for p in sp:
- if len(p.split())>0:
+ if len(p.split()) > 0:
p2.append(p.split()[0])
- common=[]
+ common = []
for p in p1:
- if p in p2: common.append(p)
- if len(common)!=len(p1) or len(common)!=len(p2):
- print('Change in products found in',r1)
+ if p in p2:
+ common.append(p)
+ if len(common) != len(p1) or len(common) != len(p2):
+ print("Change in products found in", r1)
for p in p1:
- if p not in common: print(' Product missing '+p)
+ if p not in common:
+ print(" Product missing " + p)
for p in p2:
- if p not in common: print(' Product added '+p)
- retVal=False
+ if p not in common:
+ print(" Product added " + p)
+ retVal = False
return retVal
-def checkDQMSize(r1,r2,diff, wfs):
- haveDQMChecker=False
+
+def checkDQMSize(r1, r2, diff, wfs):
+ haveDQMChecker = False
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
-# print(path)
- exe_file = os.path.join(path, 'dqmMemoryStats.py')
+ # print(path)
+ exe_file = os.path.join(path, "dqmMemoryStats.py")
if os.path.isfile(exe_file) and os.access(exe_file, os.X_OK):
- haveDQMChecker=True
+ haveDQMChecker = True
break
- if not haveDQMChecker:
- print('Missing dqmMemoryStats in this release')
+ if not haveDQMChecker:
+ print("Missing dqmMemoryStats in this release")
return -1
- output,error=runCommand(['dqmMemoryStats.py','-x','-u','KiB','-p3','-c0','-d2','--summary','-r',r1,'-i',r2])
+ output, error = runCommand(
+ [
+ "dqmMemoryStats.py",
+ "-x",
+ "-u",
+ "KiB",
+ "-p3",
+ "-c0",
+ "-d2",
+ "--summary",
+ "-r",
+ r1,
+ "-i",
+ r2,
+ ]
+ )
lines = output.splitlines()
total = re.search("-?\d+\.\d+", lines[-1])
if not total:
- print('Weird output',r1)
+ print("Weird output", r1)
print(output)
return -2
kib = float(total.group())
@@ -197,80 +236,90 @@ def checkDQMSize(r1,r2,diff, wfs):
print(lines, diff)
maxdiff = 10
for line in lines:
- if re.match("\s*-?\d+.*", line): # normal output line
+ if re.match("\s*-?\d+.*", line): # normal output line
if line not in diff:
if len(diff) == maxdiff:
- diff.append(" ... ");
+ diff.append(" ... ")
wfs.append(getWorkflow(r1))
- if len(diff) >= maxdiff: continue # limit amount of output
+ if len(diff) >= maxdiff:
+ continue # limit amount of output
diff.append(line)
wfs.append(getWorkflow(r1))
else:
idx = diff.index(line)
if not wfs[idx].endswith(",..."):
wfs[idx] += ",..."
-
+
return kib
def summaryJR(jrDir):
- nDiff=0
+ nDiff = 0
print(jrDir)
- dirs=[]
- #find directories at top level
+ dirs = []
+ # find directories at top level
for root, dirs, _ in os.walk(jrDir):
break
- nAll=0
- nOK=0
+ nAll = 0
+ nOK = 0
for d, subdir, files in os.walk(jrDir):
- if not d.split('/')[-1].startswith('all_'): continue
- if not '_' in d: continue
- relative_d = d.replace(root,'')
- diffs=[file for file in files if file.endswith('.png')]
- if len(diffs)>0:
- print('JR results differ',len(diffs),relative_d)
- nDiff=nDiff+len(diffs)
- logs=[file for file in files if file.endswith('.log')]
- nAll+=len(logs)
+ if not d.split("/")[-1].startswith("all_"):
+ continue
+ if not "_" in d:
+ continue
+ relative_d = d.replace(root, "")
+ diffs = [file for file in files if file.endswith(".png")]
+ if len(diffs) > 0:
+ print("JR results differ", len(diffs), relative_d)
+ nDiff = nDiff + len(diffs)
+ logs = [file for file in files if file.endswith(".log")]
+ nAll += len(logs)
for log in logs:
- log = os.path.join(d,log)
- output=runCommand(['grep','DONE calling validate',log])
- if len(output[0])>0:
- nOK+=1
+ log = os.path.join(d, log)
+ output = runCommand(["grep", "DONE calling validate", log])
+ if len(output[0]) > 0:
+ nOK += 1
else:
- print('JR results failed',relative_d)
- return nDiff,nAll,nOK
+ print("JR results failed", relative_d)
+ return nDiff, nAll, nOK
+
def parseNum(s):
- return int(s[1:-1].split('/')[0])
-
+ return int(s[1:-1].split("/")[0])
+
def summaryComp(compDir):
print(compDir)
- files=[]
+ files = []
for root, dirs, files in os.walk(compDir):
break
- comps=[]
+ comps = []
for f in files:
- if 'log' in f[-3:]:
- comps.append(root+'/'+f)
+ if "log" in f[-3:]:
+ comps.append(root + "/" + f)
- results=[0,0,0,0,0,0,0]
+ results = [0, 0, 0, 0, 0, 0, 0]
for comp in comps:
- loc=[0,0,0,0,0,0]
+ loc = [0, 0, 0, 0, 0, 0]
for l in open(comp):
- if '- summary of' in l: loc[0]=int(l.split()[3])
- if 'o Failiures:' in l: loc[1]=parseNum(l.split()[3])
- if 'o Nulls:' in l: loc[2]=parseNum(l.split()[3])
- if 'o Successes:' in l: loc[3]=parseNum(l.split()[3])
- if 'o Skipped:' in l: loc[4]=parseNum(l.split()[3])
- if 'o Missing objects:' in l: loc[5]=int(l.split()[3])
- print('Histogram comparison details',comp,loc)
- for i in range(0,5):
- results[i]=results[i]+loc[i]
- results[6]=results[6]+1
+ if "- summary of" in l:
+ loc[0] = int(l.split()[3])
+ if "o Failiures:" in l:
+ loc[1] = parseNum(l.split()[3])
+ if "o Nulls:" in l:
+ loc[2] = parseNum(l.split()[3])
+ if "o Successes:" in l:
+ loc[3] = parseNum(l.split()[3])
+ if "o Skipped:" in l:
+ loc[4] = parseNum(l.split()[3])
+ if "o Missing objects:" in l:
+ loc[5] = int(l.split()[3])
+ print("Histogram comparison details", comp, loc)
+ for i in range(0, 5):
+ results[i] = results[i] + loc[i]
+ results[6] = results[6] + 1
return results
@@ -278,137 +327,172 @@ def summaryComp(compDir):
#
#
#
-qaIssues=False
+qaIssues = False
# one way to set up for local tests..
-#login to ssh cmssdt server (see CMSSDT_SERVER in ./cmssdt.sh for server name)
-#copy out data from a recent pull request comparison
-#cd /data/sdt/SDT/jenkins-artifacts/ib-baseline-tests/CMSSW_10_0_X_2017-11-05-2300/slc6_amd64_gcc630/-GenuineIntel
-#scp -r matrix-results/ dlange@cmsdev01:/build/dlange/171103/t1/
-#cd ../../../../pull-request-integration/PR-21181/24200/
-#scp -r runTheMatrix-results/ dlange@cmsdev01:/build/dlange/171103/t1/.
-#cd ../../../../baseLineComparions/CMSSW_10_0_X_2017-11-05-2300+21181/
-#scp -r 23485 dlange@cmsdev01:/build/dlange/171103/t1/.
-
-#https://cmssdt.cern.ch/SDT/jenkins-artifacts/baseLineComparisons/CMSSW_9_0_X_2017-03-22-1100+18042/18957/validateJR/
-baseDir='../t1/runTheMatrix-results'
-testDir='../t1/matrix-results'
-jrDir='../t1/23485/validateJR'
-compDir='../t1/23485'
-
-run="all"
-if len(sys.argv)==6:
- run = sys.argv[5]
-if len(sys.argv)>=5:
- baseDir=sys.argv[1].rstrip("/")
- testDir=sys.argv[2].rstrip("/")
- jrDir=sys.argv[3].rstrip("/")
- compDir=sys.argv[4].rstrip("/")
+# login to ssh cmssdt server (see CMSSDT_SERVER in ./cmssdt.sh for server name)
+# copy out data from a recent pull request comparison
+# cd /data/sdt/SDT/jenkins-artifacts/ib-baseline-tests/CMSSW_10_0_X_2017-11-05-2300/slc6_amd64_gcc630/-GenuineIntel
+# scp -r matrix-results/ dlange@cmsdev01:/build/dlange/171103/t1/
+# cd ../../../../pull-request-integration/PR-21181/24200/
+# scp -r runTheMatrix-results/ dlange@cmsdev01:/build/dlange/171103/t1/.
+# cd ../../../../baseLineComparions/CMSSW_10_0_X_2017-11-05-2300+21181/
+# scp -r 23485 dlange@cmsdev01:/build/dlange/171103/t1/.
+
+# https://cmssdt.cern.ch/SDT/jenkins-artifacts/baseLineComparisons/CMSSW_9_0_X_2017-03-22-1100+18042/18957/validateJR/
+baseDir = "../t1/runTheMatrix-results"
+testDir = "../t1/matrix-results"
+jrDir = "../t1/23485/validateJR"
+compDir = "../t1/23485"
+
+run = "all"
+if len(sys.argv) == 6:
+ run = sys.argv[5]
+if len(sys.argv) >= 5:
+ baseDir = sys.argv[1].rstrip("/")
+ testDir = sys.argv[2].rstrip("/")
+ jrDir = sys.argv[3].rstrip("/")
+ compDir = sys.argv[4].rstrip("/")
#### check the printouts
-lines=0
-lChanges=False
-nLog=0
-nPrintTot=0
-stopPrint=0
-sameEvts=True
-nRoot=0
-newDQM=0
-nDQM=0
-diff,wfs=[],[]
-if run in ['all', 'events']:
- if not os.path.exists('comparison-events.json'):
- for l in getCommonFiles(baseDir,testDir,'step*.log'):
- lCount, filt1, filt2 = checkLines(baseDir+l,testDir+l)
- lines=lines+lCount
- if nPrintTot<1000:
- nprint=getRelevantDiff(filt1, filt2, baseDir+l, testDir+l)
- nPrintTot=nPrintTot+nprint
- else:
- if stopPrint==0:
- print('Skipping further diff comparisons. Too many diffs')
- stopPrint=1
- nLog=nLog+1
- if lines>0:
- lChanges=True
- #### compare edmEventSize on each to look for new missing candidates
- for r in getCommonFiles(baseDir,testDir,'step*.root'):
- if 'inDQM.root' not in r:
- checkResult=checkEventContent(baseDir+r,testDir+r)
- sameEvts=sameEvts and checkResult
- nRoot=nRoot+1
- for r in getCommonFiles(baseDir,testDir,'DQM*.root'):
- t=checkDQMSize(baseDir+r,testDir+r,diff,wfs)
- print(r,t)
- newDQM=newDQM+t
- nDQM=nDQM+1
- with open('comparison-events.json', 'w') as f:
- json.dump([lines, lChanges, nLog, nPrintTot, stopPrint, sameEvts, nRoot, newDQM, nDQM, diff, wfs], f)
- else:
- with open('comparison-events.json') as f:
- (lines, lChanges, nLog, nPrintTot, stopPrint, sameEvts, nRoot, newDQM, nDQM, diff, wfs) = json.load(f)
-
- print("Logs:", lines, lChanges, nLog, nPrintTot, stopPrint)
- print("Events:", sameEvts, nRoot, newDQM, nDQM, diff, wfs)
- if lines>0 :
- print("SUMMARY You potentially added "+str(lines)+" lines to the logs")
- elif lines<0 :
- print("SUMMARY You potentially removed "+str(-1*lines)+" lines from the logs")
- else:
- print("SUMMARY No significant changes to the logs found")
-
- if lChanges:
- qaIssues=True
-
- if not sameEvts:
- qaIssues=True
- print('SUMMARY ROOTFileChecks: Some differences in event products or their sizes found')
- print('\n')
- if run == "events":
- sys.exit(0)
+lines = 0
+lChanges = False
+nLog = 0
+nPrintTot = 0
+stopPrint = 0
+sameEvts = True
+nRoot = 0
+newDQM = 0
+nDQM = 0
+diff, wfs = [], []
+if run in ["all", "events"]:
+ if not os.path.exists("comparison-events.json"):
+ for l in getCommonFiles(baseDir, testDir, "step*.log"):
+ lCount, filt1, filt2 = checkLines(baseDir + l, testDir + l)
+ lines = lines + lCount
+ if nPrintTot < 1000:
+ nprint = getRelevantDiff(filt1, filt2, baseDir + l, testDir + l)
+ nPrintTot = nPrintTot + nprint
+ else:
+ if stopPrint == 0:
+ print("Skipping further diff comparisons. Too many diffs")
+ stopPrint = 1
+ nLog = nLog + 1
+ if lines > 0:
+ lChanges = True
+ #### compare edmEventSize on each to look for new missing candidates
+ for r in getCommonFiles(baseDir, testDir, "step*.root"):
+ if "inDQM.root" not in r:
+ checkResult = checkEventContent(baseDir + r, testDir + r)
+ sameEvts = sameEvts and checkResult
+ nRoot = nRoot + 1
+ for r in getCommonFiles(baseDir, testDir, "DQM*.root"):
+ t = checkDQMSize(baseDir + r, testDir + r, diff, wfs)
+ print(r, t)
+ newDQM = newDQM + t
+ nDQM = nDQM + 1
+ with open("comparison-events.json", "w") as f:
+ json.dump(
+ [
+ lines,
+ lChanges,
+ nLog,
+ nPrintTot,
+ stopPrint,
+ sameEvts,
+ nRoot,
+ newDQM,
+ nDQM,
+ diff,
+ wfs,
+ ],
+ f,
+ )
+ else:
+ with open("comparison-events.json") as f:
+ (
+ lines,
+ lChanges,
+ nLog,
+ nPrintTot,
+ stopPrint,
+ sameEvts,
+ nRoot,
+ newDQM,
+ nDQM,
+ diff,
+ wfs,
+ ) = json.load(f)
+
+ print("Logs:", lines, lChanges, nLog, nPrintTot, stopPrint)
+ print("Events:", sameEvts, nRoot, newDQM, nDQM, diff, wfs)
+ if lines > 0:
+ print("SUMMARY You potentially added " + str(lines) + " lines to the logs")
+ elif lines < 0:
+ print("SUMMARY You potentially removed " + str(-1 * lines) + " lines from the logs")
+ else:
+ print("SUMMARY No significant changes to the logs found")
+
+ if lChanges:
+ qaIssues = True
+
+ if not sameEvts:
+ qaIssues = True
+ print("SUMMARY ROOTFileChecks: Some differences in event products or their sizes found")
+ print("\n")
+ if run == "events":
+ sys.exit(0)
# now check the JR comparisons for differences
nDiff = 0
nAll = 0
nOK = 0
-if run in ['all', 'JR']:
- if not os.path.exists('comparison-JR.json'):
- nDiff,nAll,nOK=summaryJR(jrDir)
- with open('comparison-JR.json', 'w') as f:
- json.dump([nDiff,nAll,nOK], f)
- else:
- with open('comparison-JR.json') as f:
- (nDiff,nAll,nOK) = json.load(f)
- print('SUMMARY Reco comparison results:',nDiff,'differences found in the comparisons')
- if nAll!=nOK:
- print('SUMMARY Reco comparison had ',nAll-nOK,'failed jobs')
- print('\n')
- if run == "JR":
- sys.exit(0)
+if run in ["all", "JR"]:
+ if not os.path.exists("comparison-JR.json"):
+ nDiff, nAll, nOK = summaryJR(jrDir)
+ with open("comparison-JR.json", "w") as f:
+ json.dump([nDiff, nAll, nOK], f)
+ else:
+ with open("comparison-JR.json") as f:
+ (nDiff, nAll, nOK) = json.load(f)
+ print("SUMMARY Reco comparison results:", nDiff, "differences found in the comparisons")
+ if nAll != nOK:
+ print("SUMMARY Reco comparison had ", nAll - nOK, "failed jobs")
+ print("\n")
+ if run == "JR":
+ sys.exit(0)
# not check for default comparison
compSummary = []
-if not os.path.exists('comparison-comp.json'):
- compSummary=summaryComp(compDir)
- with open('comparison-comp.json', 'w') as f:
- json.dump(compSummary, f)
+if not os.path.exists("comparison-comp.json"):
+ compSummary = summaryComp(compDir)
+ with open("comparison-comp.json", "w") as f:
+ json.dump(compSummary, f)
else:
- with open('comparison-comp.json') as f:
+ with open("comparison-comp.json") as f:
compSummary = json.load(f)
-print('SUMMARY DQMHistoTests: Total files compared:',compSummary[6])
-print('SUMMARY DQMHistoTests: Total histograms compared:',compSummary[0])
-print('SUMMARY DQMHistoTests: Total failures:',compSummary[1])
-print('SUMMARY DQMHistoTests: Total nulls:',compSummary[2])
-print('SUMMARY DQMHistoTests: Total successes:',compSummary[3])
-print('SUMMARY DQMHistoTests: Total skipped:',compSummary[4])
-print('SUMMARY DQMHistoTests: Total Missing objects:',compSummary[5])
+print("SUMMARY DQMHistoTests: Total files compared:", compSummary[6])
+print("SUMMARY DQMHistoTests: Total histograms compared:", compSummary[0])
+print("SUMMARY DQMHistoTests: Total failures:", compSummary[1])
+print("SUMMARY DQMHistoTests: Total nulls:", compSummary[2])
+print("SUMMARY DQMHistoTests: Total successes:", compSummary[3])
+print("SUMMARY DQMHistoTests: Total skipped:", compSummary[4])
+print("SUMMARY DQMHistoTests: Total Missing objects:", compSummary[5])
-print('SUMMARY DQMHistoSizes: Histogram memory added:',newDQM,'KiB(',nDQM,'files compared)')
-for line, wf in zip(diff,wfs):
- print('SUMMARY DQMHistoSizes: changed (',wf,'):',line)
+print("SUMMARY DQMHistoSizes: Histogram memory added:", newDQM, "KiB(", nDQM, "files compared)")
+for line, wf in zip(diff, wfs):
+ print("SUMMARY DQMHistoSizes: changed (", wf, "):", line)
#### conclude
-print("SUMMARY Checked",nLog,"log files,",nRoot,"edm output root files,",compSummary[6],"DQM output files")
+print(
+ "SUMMARY Checked",
+ nLog,
+ "log files,",
+ nRoot,
+ "edm output root files,",
+ compSummary[6],
+ "DQM output files",
+)
if not qaIssues:
print("No potential problems in log/root QA checks!")
diff --git a/logUpdater.py b/logUpdater.py
index b19f0e66a097..819c03c8f624 100755
--- a/logUpdater.py
+++ b/logUpdater.py
@@ -11,112 +11,147 @@
from os.path import dirname, abspath, join
from cmsutils import doCmd, getIBReleaseInfo
from time import sleep
-SCRIPT_DIR=dirname(abspath(__file__))
-class LogUpdater(object):
+SCRIPT_DIR = dirname(abspath(__file__))
+
+class LogUpdater(object):
def __init__(self, dirIn=None, dryRun=False, remote=None, webDir="/data/sdt/buildlogs/"):
if not remote:
- with open(join(SCRIPT_DIR,"cmssdt.sh")) as ref:
- remote = "cmsbuild@"+[ line.split("=")[-1].strip() for line in ref.readlines() if "CMSSDT_SERVER=" in line][0]
+ with open(join(SCRIPT_DIR, "cmssdt.sh")) as ref:
+ remote = (
+ "cmsbuild@"
+ + [
+ line.split("=")[-1].strip()
+ for line in ref.readlines()
+ if "CMSSDT_SERVER=" in line
+ ][0]
+ )
self.dryRun = dryRun
self.remote = remote
self.cmsswBuildDir = dirIn
rel = os.path.basename(dirIn)
self.release = rel
rc, day, hour = getIBReleaseInfo(rel)
- self.webTargetDir = webDir + "/" + os.environ[
- "SCRAM_ARCH"] + "/www/" + day + "/" + rc + "-" + day + "-" + hour + "/" + self.release
+ self.webTargetDir = (
+ webDir
+ + "/"
+ + os.environ["SCRAM_ARCH"]
+ + "/www/"
+ + day
+ + "/"
+ + rc
+ + "-"
+ + day
+ + "-"
+ + hour
+ + "/"
+ + self.release
+ )
self.ssh_opt = "-o CheckHostIP=no -o ConnectTimeout=60 -o ConnectionAttempts=5 -o StrictHostKeyChecking=no -o BatchMode=yes -o PasswordAuthentication=no"
return
def updateUnitTestLogs(self, subdir=""):
-
- print("\n--> going to copy unit test logs to", self.webTargetDir, '... \n')
+ print("\n--> going to copy unit test logs to", self.webTargetDir, "... \n")
# copy back the test and relval logs to the install area
# check size first ... sometimes the log _grows_ to tens of GB !!
- testLogs = ['unitTestLogs.zip', 'unitTests-summary.log', 'unitTestResults.pkl', 'unitTests1.log']
+ testLogs = [
+ "unitTestLogs.zip",
+ "unitTests-summary.log",
+ "unitTestResults.pkl",
+ "unitTests1.log",
+ ]
for tl in testLogs:
- self.copyLogs(tl, '.', self.webTargetDir + "/" + subdir)
+ self.copyLogs(tl, ".", self.webTargetDir + "/" + subdir)
return
def updateGeomTestLogs(self):
- print("\n--> going to copy Geom test logs to", self.webTargetDir, '... \n')
- testLogs = ['dddreport.log', 'domcount.log']
+ print("\n--> going to copy Geom test logs to", self.webTargetDir, "... \n")
+ testLogs = ["dddreport.log", "domcount.log"]
for tl in testLogs:
- self.copyLogs(tl, '.', self.webTargetDir)
- self.copyLogs(tl, '.', os.path.join(self.webTargetDir, 'testLogs'))
+ self.copyLogs(tl, ".", self.webTargetDir)
+ self.copyLogs(tl, ".", os.path.join(self.webTargetDir, "testLogs"))
return
def updateDupDictTestLogs(self):
- print("\n--> going to copy dup dict test logs to", self.webTargetDir, '... \n')
- testLogs = ['dupDict-*.log']
+ print("\n--> going to copy dup dict test logs to", self.webTargetDir, "... \n")
+ testLogs = ["dupDict-*.log"]
for tl in testLogs:
- self.copyLogs(tl, '.', self.webTargetDir)
- self.copyLogs(tl, '.', os.path.join(self.webTargetDir, 'testLogs'))
+ self.copyLogs(tl, ".", self.webTargetDir)
+ self.copyLogs(tl, ".", os.path.join(self.webTargetDir, "testLogs"))
return
def updateLogFile(self, fileIn, subTrgDir=None):
desdir = self.webTargetDir
- if subTrgDir: desdir = os.path.join(desdir, subTrgDir)
- print("\n--> going to copy " + fileIn + " log to ", desdir, '... \n')
- self.copyLogs(fileIn, '.', desdir)
+ if subTrgDir:
+ desdir = os.path.join(desdir, subTrgDir)
+ print("\n--> going to copy " + fileIn + " log to ", desdir, "... \n")
+ self.copyLogs(fileIn, ".", desdir)
return
def updateCodeRulesCheckerLogs(self):
- print("\n--> going to copy cms code rules logs to", self.webTargetDir, '... \n')
- self.copyLogs('codeRules', '.', self.webTargetDir)
+ print("\n--> going to copy cms code rules logs to", self.webTargetDir, "... \n")
+ self.copyLogs("codeRules", ".", self.webTargetDir)
return
def updateRelValMatrixPartialLogs(self, partialSubDir, dirToSend):
- destination = os.path.join(self.webTargetDir, 'pyRelValPartialLogs')
- print("\n--> going to copy pyrelval partial matrix logs to", destination, '... \n')
+ destination = os.path.join(self.webTargetDir, "pyRelValPartialLogs")
+ print("\n--> going to copy pyrelval partial matrix logs to", destination, "... \n")
self.copyLogs(dirToSend, partialSubDir, destination)
self.runRemoteCmd("touch " + os.path.join(destination, dirToSend, "wf.done"))
return
def getDoneRelvals(self):
wfDoneFile = "wf.done"
- destination = os.path.join(self.webTargetDir, 'pyRelValPartialLogs', "*", wfDoneFile)
+ destination = os.path.join(self.webTargetDir, "pyRelValPartialLogs", "*", wfDoneFile)
code, out = self.runRemoteCmd("ls " + destination, debug=False)
- return [ wf.split("/")[-2].split("_")[0] for wf in out.split("\n") if wf.endswith(wfDoneFile)]
+ return [
+ wf.split("/")[-2].split("_")[0] for wf in out.split("\n") if wf.endswith(wfDoneFile)
+ ]
def relvalAlreadyDone(self, wf):
wfDoneFile = "wf.done"
- destination = os.path.join(self.webTargetDir, 'pyRelValPartialLogs', str(wf) + "_*", wfDoneFile)
+ destination = os.path.join(
+ self.webTargetDir, "pyRelValPartialLogs", str(wf) + "_*", wfDoneFile
+ )
code, out = self.runRemoteCmd("ls -d " + destination)
- return ((code == 0) and out.endswith(wfDoneFile))
+ return (code == 0) and out.endswith(wfDoneFile)
def updateAddOnTestsLogs(self):
- print("\n--> going to copy addOn logs to", self.webTargetDir, '... \n')
- self.copyLogs('addOnTests.log', '.', self.webTargetDir)
- self.copyLogs('addOnTests.zip', 'addOnTests/logs', self.webTargetDir)
- self.copyLogs('addOnTests.pkl', 'addOnTests/logs', os.path.join(self.webTargetDir, 'addOnTests/logs'))
+ print("\n--> going to copy addOn logs to", self.webTargetDir, "... \n")
+ self.copyLogs("addOnTests.log", ".", self.webTargetDir)
+ self.copyLogs("addOnTests.zip", "addOnTests/logs", self.webTargetDir)
+ self.copyLogs(
+ "addOnTests.pkl", "addOnTests/logs", os.path.join(self.webTargetDir, "addOnTests/logs")
+ )
return
def updateIgnominyLogs(self):
- print("\n--> going to copy ignominy logs to", self.webTargetDir, '... \n')
- testLogs = ['dependencies.txt.gz', 'products.txt.gz', 'logwarnings.gz', 'metrics']
+ print("\n--> going to copy ignominy logs to", self.webTargetDir, "... \n")
+ testLogs = ["dependencies.txt.gz", "products.txt.gz", "logwarnings.gz", "metrics"]
for tl in testLogs:
- self.copyLogs(tl, 'igRun', os.path.join(self.webTargetDir, 'igRun'))
+ self.copyLogs(tl, "igRun", os.path.join(self.webTargetDir, "igRun"))
return
def updateProductionRelValLogs(self, workFlows):
- print("\n--> going to copy Production RelVals logs to", self.webTargetDir, '... \n')
- wwwProdDir = os.path.join(self.webTargetDir, 'prodRelVal')
- self.copyLogs('prodRelVal.log', '.', wwwProdDir)
+ print("\n--> going to copy Production RelVals logs to", self.webTargetDir, "... \n")
+ wwwProdDir = os.path.join(self.webTargetDir, "prodRelVal")
+ self.copyLogs("prodRelVal.log", ".", wwwProdDir)
for wf in workFlows:
- self.copyLogs('timingInfo.txt', 'prodRelVal/wf/' + wf, os.path.join(wwwProdDir, 'wf', wf))
+ self.copyLogs(
+ "timingInfo.txt", "prodRelVal/wf/" + wf, os.path.join(wwwProdDir, "wf", wf)
+ )
return
- def updateBuildSetLogs(self, appType='fwlite'):
- print("\n--> going to copy BuildSet logs to", self.webTargetDir, '... \n')
- wwwBSDir = os.path.join(self.webTargetDir, 'BuildSet')
- self.copyLogs(appType, 'BuildSet', wwwBSDir)
+ def updateBuildSetLogs(self, appType="fwlite"):
+ print("\n--> going to copy BuildSet logs to", self.webTargetDir, "... \n")
+ wwwBSDir = os.path.join(self.webTargetDir, "BuildSet")
+ self.copyLogs(appType, "BuildSet", wwwBSDir)
return
def copyLogs(self, what, logSubDir="", tgtDirIn=None):
- if not tgtDirIn: tgtDirIn = self.webTargetDir
+ if not tgtDirIn:
+ tgtDirIn = self.webTargetDir
self.runRemoteCmd("mkdir -p " + tgtDirIn)
self.copy2Remote(os.path.join(self.cmsswBuildDir, logSubDir, what), tgtDirIn + "/")
@@ -134,9 +169,11 @@ def runRemoteHostCmd(self, cmd, host, debug=True):
else:
for i in range(10):
err, out = doCmd(cmd, debug=debug)
- if not err: return (err, out)
+ if not err:
+ return (err, out)
for l in out.split("\n"):
- if "CONNECTION=OK" in l: return (err, out)
+ if "CONNECTION=OK" in l:
+ return (err, out)
sleep(60)
return doCmd(cmd, debug=debug)
except Exception as e:
@@ -151,7 +188,8 @@ def copy2RemoteHost(self, src, des, host):
else:
for i in range(10):
err, out = doCmd(cmd)
- if not err: return (err, out)
+ if not err:
+ return (err, out)
sleep(60)
return doCmd(cmd)
except Exception as e:
diff --git a/logreaderUtils.py b/logreaderUtils.py
index 5eb621ebb071..1653200c31d2 100644
--- a/logreaderUtils.py
+++ b/logreaderUtils.py
@@ -12,9 +12,7 @@ class ResultTypeEnum(object):
# Do not forget to include to list if ResultTypeEnum is updated
# Will be same ordering as in Log reader interface
-all_controls = [
- ResultTypeEnum.ISSUE, ResultTypeEnum.TEST
-]
+all_controls = [ResultTypeEnum.ISSUE, ResultTypeEnum.TEST]
def add_exception_to_config(line, index, config_list, custom_rule_list=[]):
@@ -23,33 +21,33 @@ def add_exception_to_config(line, index, config_list, custom_rule_list=[]):
# will ignore " IgnoreCompletely" messages
"str_to_match": "Begin(?! IgnoreCompletely)(.*Exception)",
"name": "{0}",
- "control_type": ResultTypeEnum.ISSUE
+ "control_type": ResultTypeEnum.ISSUE,
},
{
"str_to_match": "edm::service::InitRootHandlers",
"name": "Segmentation fault",
- "control_type": ResultTypeEnum.ISSUE
+ "control_type": ResultTypeEnum.ISSUE,
},
{
"str_to_match": "sig_dostack_then_abort",
"name": "sig_dostack_then_abort",
- "control_type": ResultTypeEnum.ISSUE
+ "control_type": ResultTypeEnum.ISSUE,
},
{
"str_to_match": ": runtime error:",
"name": "Runtime error",
- "control_type": ResultTypeEnum.ISSUE
+ "control_type": ResultTypeEnum.ISSUE,
},
{
"str_to_match": ": Assertion .* failed",
"name": "Assertion failure",
- "control_type": ResultTypeEnum.ISSUE
+ "control_type": ResultTypeEnum.ISSUE,
},
{
"str_to_match": "==ERROR: AddressSanitizer:",
"name": "Address Sanitizer error",
- "control_type": ResultTypeEnum.ISSUE
- }
+ "control_type": ResultTypeEnum.ISSUE,
+ },
]
line_nr = index + 1
@@ -64,7 +62,7 @@ def add_exception_to_config(line, index, config_list, custom_rule_list=[]):
"lineStart": line_nr,
"lineEnd": line_nr,
"name": name + " at line #" + str(line_nr),
- "control_type": rule["control_type"]
+ "control_type": rule["control_type"],
}
config_list.append(new_exception_config)
return config_list
diff --git a/logwatch.py b/logwatch.py
index 0ccd5b0b932c..66c8cde15c22 100755
--- a/logwatch.py
+++ b/logwatch.py
@@ -6,70 +6,86 @@
from hashlib import sha256
from time import time
-LOGWATCH_APACHE_IGNORE_AGENTS = ["www.google.com/bot.html", "ahrefs.com", "yandex.com", "www.exabot.com", "www.bing.com"]
+LOGWATCH_APACHE_IGNORE_AGENTS = [
+ "www.google.com/bot.html",
+ "ahrefs.com",
+ "yandex.com",
+ "www.exabot.com",
+ "www.bing.com",
+]
-def run_cmd (cmd, exit_on_error=True):
- err, out = getstatusoutput(cmd)
- if err and exit_on_error:
- print(out)
- exit (1)
- return out
-class logwatch (object):
- def __init__ (self, service, log_dir="/var/log"):
- self.log_dir = join(log_dir,"logwatch_" + service)
+def run_cmd(cmd, exit_on_error=True):
+ err, out = getstatusoutput(cmd)
+ if err and exit_on_error:
+ print(out)
+ exit(1)
+ return out
- def process(self, logs, callback, **kwrds):
- if not logs: return True, 0
- info_file = join(self.log_dir, "info")
- if not exists ("%s/logs" % self.log_dir): run_cmd ("mkdir -p %s/logs" % self.log_dir)
- prev_lnum, prev_hash, count, data = 1, "", 0, []
- if exists(info_file):
- prev_hash,ln = run_cmd("head -1 %s" % info_file).strip().split(" ",1)
- prev_lnum = int(ln)
- if prev_lnum<1: prev_lnum=1
- found = False
- for log in reversed(logs):
- service_log = join (self.log_dir, "logs", basename(log))
- if (len(data)>0) and ((time()-getmtime(log))<600):return True, 0
- if found:
- if exists (service_log):
- run_cmd("rm -f %s" % service_log)
- continue
- else: break
- run_cmd ("rsync -a %s %s" % (log, service_log))
- cur_hash = sha256(run_cmd("head -1 %s" % service_log).encode()).hexdigest()
- data.insert(0,[log , service_log, 1, cur_hash, False])
- if cur_hash == prev_hash:
- found = True
- data[0][2] = prev_lnum
- data[-1][4] = True
- for item in data:
- lnum, service_log = item[2], item[1]
- get_lines_cmd = "tail -n +%s %s" % (str(lnum), service_log)
- if lnum<=1: get_lines_cmd = "cat %s" % service_log
- print("Processing %s:%s" % (item[0], str(lnum)))
- lnum -= 1
- xlines = 0
- for line in run_cmd (get_lines_cmd).split ("\n"):
- count += 1
- lnum += 1
- xlines += 1
- try: ok = callback(line, count, **kwrds)
- except: ok = False
- if not ok:
- if (prev_lnum!=lnum) or (prev_hash!=item[3]):
- run_cmd("echo '%s %s' > %s" % (item[3], str(lnum),info_file))
- return ok, count
- if (xlines%1000)==0:
- prev_lnum = lnum
- prev_hash = item[3]
- run_cmd("echo '%s %s' > %s" % (item[3], str(lnum),info_file))
- if (prev_lnum!=lnum) or (prev_hash!=item[3]):
- prev_lnum=lnum
- prev_hash=item[3]
- cmd = "echo '%s %s' > %s" % (item[3], str(lnum),info_file)
- if not item[4]: cmd = cmd + " && rm -f %s" % service_log
- run_cmd(cmd)
- return True, count
+class logwatch(object):
+ def __init__(self, service, log_dir="/var/log"):
+ self.log_dir = join(log_dir, "logwatch_" + service)
+
+ def process(self, logs, callback, **kwrds):
+ if not logs:
+ return True, 0
+ info_file = join(self.log_dir, "info")
+ if not exists("%s/logs" % self.log_dir):
+ run_cmd("mkdir -p %s/logs" % self.log_dir)
+ prev_lnum, prev_hash, count, data = 1, "", 0, []
+ if exists(info_file):
+ prev_hash, ln = run_cmd("head -1 %s" % info_file).strip().split(" ", 1)
+ prev_lnum = int(ln)
+ if prev_lnum < 1:
+ prev_lnum = 1
+ found = False
+ for log in reversed(logs):
+ service_log = join(self.log_dir, "logs", basename(log))
+ if (len(data) > 0) and ((time() - getmtime(log)) < 600):
+ return True, 0
+ if found:
+ if exists(service_log):
+ run_cmd("rm -f %s" % service_log)
+ continue
+ else:
+ break
+ run_cmd("rsync -a %s %s" % (log, service_log))
+ cur_hash = sha256(run_cmd("head -1 %s" % service_log).encode()).hexdigest()
+ data.insert(0, [log, service_log, 1, cur_hash, False])
+ if cur_hash == prev_hash:
+ found = True
+ data[0][2] = prev_lnum
+ data[-1][4] = True
+ for item in data:
+ lnum, service_log = item[2], item[1]
+ get_lines_cmd = "tail -n +%s %s" % (str(lnum), service_log)
+ if lnum <= 1:
+ get_lines_cmd = "cat %s" % service_log
+ print("Processing %s:%s" % (item[0], str(lnum)))
+ lnum -= 1
+ xlines = 0
+ for line in run_cmd(get_lines_cmd).split("\n"):
+ count += 1
+ lnum += 1
+ xlines += 1
+ try:
+ ok = callback(line, count, **kwrds)
+ except:
+ ok = False
+ if not ok:
+ if (prev_lnum != lnum) or (prev_hash != item[3]):
+ run_cmd("echo '%s %s' > %s" % (item[3], str(lnum), info_file))
+ return ok, count
+ if (xlines % 1000) == 0:
+ prev_lnum = lnum
+ prev_hash = item[3]
+ run_cmd("echo '%s %s' > %s" % (item[3], str(lnum), info_file))
+ if (prev_lnum != lnum) or (prev_hash != item[3]):
+ prev_lnum = lnum
+ prev_hash = item[3]
+ cmd = "echo '%s %s' > %s" % (item[3], str(lnum), info_file)
+ if not item[4]:
+ cmd = cmd + " && rm -f %s" % service_log
+ run_cmd(cmd)
+ return True, count
diff --git a/lxr/checkout-version.py b/lxr/checkout-version.py
index b8947ff9872d..16ae4f0c49e1 100755
--- a/lxr/checkout-version.py
+++ b/lxr/checkout-version.py
@@ -3,25 +3,28 @@
from sys import exit
from os.path import isfile, islink
from subprocess import getstatusoutput as cmd
-e, total =cmd("find . -type f | grep -v '/.git/' |wc -l")
-e, o = cmd ('git log --name-only --pretty=format:"T:%at"')
+
+e, total = cmd("find . -type f | grep -v '/.git/' |wc -l")
+e, o = cmd('git log --name-only --pretty=format:"T:%at"')
if e:
- print (o)
- exit(1)
+ print(o)
+ exit(1)
cache = {}
-time=0
-cnt=0
+time = 0
+cnt = 0
for l in o.split("\n"):
- if not l: continue
- if l[:2]=='T:':
- time=int(l[2:])
- continue
- if l in cache: continue
- if isfile(l) and not islink(l):
- cnt += 1
- cache[l]=time
- utime(l, (time, time))
- print ("[%s/%s] %s: %s" % (cnt, total, l, time))
- else:
- cache[l]=0
+ if not l:
+ continue
+ if l[:2] == "T:":
+ time = int(l[2:])
+ continue
+ if l in cache:
+ continue
+ if isfile(l) and not islink(l):
+ cnt += 1
+ cache[l] = time
+ utime(l, (time, time))
+ print("[%s/%s] %s: %s" % (cnt, total, l, time))
+ else:
+ cache[l] = 0
diff --git a/mark_commit_status.py b/mark_commit_status.py
index 86dfed31b503..22a2dd7507d5 100755
--- a/mark_commit_status.py
+++ b/mark_commit_status.py
@@ -8,33 +8,92 @@
from __future__ import print_function
from optparse import OptionParser
-from github_utils import api_rate_limits, mark_commit_status, get_combined_statuses, get_pr_latest_commit
+from github_utils import (
+ api_rate_limits,
+ mark_commit_status,
+ get_combined_statuses,
+ get_pr_latest_commit,
+)
from sys import exit
if __name__ == "__main__":
- parser = OptionParser(usage="%prog")
- parser.add_option("-c", "--commit", dest="commit", help="git commit for which set the status", type=str, default=None)
- parser.add_option("-p", "--pr", dest="pr", help="github pr for which set the status", type=str, default=None)
- parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw")
- parser.add_option("-d", "--description", dest="description", help="Description of the status", type=str, default="Test running")
- parser.add_option("-C", "--context", dest="context", help="Status context", type=str, default="default")
- parser.add_option("-u", "--url", dest="url", help="Status results URL", type=str, default="")
- parser.add_option("-s", "--state", dest="state", help="State of the status e.g. pending, failure, error or success", type=str, default='pending')
- parser.add_option("-R", "--reset-all", dest="reset_all", help="Reset all matching contexts", action="store_true", default=False)
- parser.add_option("-e", "--if-exists", dest="if_exists", help="Only set the status if context already exists", action="store_true", default=False)
- opts, args = parser.parse_args()
-
- if opts.pr:
- opts.commit = get_pr_latest_commit(opts.pr, opts.repository)
- if opts.if_exists:
- statues = get_combined_statuses(opts.commit, opts.repository)
- if 'statuses' in statues:
- found = False
- for s in statues['statuses']:
- if s['context'] != opts.context:
- continue
- found = True
- break
- if not found: exit(0)
- mark_commit_status(opts.commit, opts.repository, opts.context, opts.state, opts.url, opts.description, reset=opts.reset_all)
+ parser = OptionParser(usage="%prog")
+ parser.add_option(
+ "-c",
+ "--commit",
+ dest="commit",
+ help="git commit for which set the status",
+ type=str,
+ default=None,
+ )
+ parser.add_option(
+ "-p", "--pr", dest="pr", help="github pr for which set the status", type=str, default=None
+ )
+ parser.add_option(
+ "-r",
+ "--repository",
+ dest="repository",
+ help="Github Repositoy name e.g. cms-sw/cmssw.",
+ type=str,
+ default="cms-sw/cmssw",
+ )
+ parser.add_option(
+ "-d",
+ "--description",
+ dest="description",
+ help="Description of the status",
+ type=str,
+ default="Test running",
+ )
+ parser.add_option(
+ "-C", "--context", dest="context", help="Status context", type=str, default="default"
+ )
+ parser.add_option("-u", "--url", dest="url", help="Status results URL", type=str, default="")
+ parser.add_option(
+ "-s",
+ "--state",
+ dest="state",
+ help="State of the status e.g. pending, failure, error or success",
+ type=str,
+ default="pending",
+ )
+ parser.add_option(
+ "-R",
+ "--reset-all",
+ dest="reset_all",
+ help="Reset all matching contexts",
+ action="store_true",
+ default=False,
+ )
+ parser.add_option(
+ "-e",
+ "--if-exists",
+ dest="if_exists",
+ help="Only set the status if context already exists",
+ action="store_true",
+ default=False,
+ )
+ opts, args = parser.parse_args()
+ if opts.pr:
+ opts.commit = get_pr_latest_commit(opts.pr, opts.repository)
+ if opts.if_exists:
+ statues = get_combined_statuses(opts.commit, opts.repository)
+ if "statuses" in statues:
+ found = False
+ for s in statues["statuses"]:
+ if s["context"] != opts.context:
+ continue
+ found = True
+ break
+ if not found:
+ exit(0)
+ mark_commit_status(
+ opts.commit,
+ opts.repository,
+ opts.context,
+ opts.state,
+ opts.url,
+ opts.description,
+ reset=opts.reset_all,
+ )
diff --git a/material_budget_ref.py b/material_budget_ref.py
index cc61e1510c0c..02247cf6ce3d 100644
--- a/material_budget_ref.py
+++ b/material_budget_ref.py
@@ -1,23 +1,26 @@
from __future__ import print_function
+
MATERIAL_BUDGET_REF = {
- "CMSSW_8_1_X" : "CMSSW_8_1_X_2017-03-12-0000",
- "CMSSW_9_0_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_9_1_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_9_2_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_9_3_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_9_4_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_10_0_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_10_1_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_10_2_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_10_3_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_10_4_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_10_5_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_10_6_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_11_0_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_11_1_X" : "CMSSW_9_0_X_2017-03-14-1100",
- "CMSSW_11_2_X" : "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_8_1_X": "CMSSW_8_1_X_2017-03-12-0000",
+ "CMSSW_9_0_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_9_1_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_9_2_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_9_3_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_9_4_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_10_0_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_10_1_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_10_2_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_10_3_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_10_4_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_10_5_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_10_6_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_11_0_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_11_1_X": "CMSSW_9_0_X_2017-03-14-1100",
+ "CMSSW_11_2_X": "CMSSW_9_0_X_2017-03-14-1100",
}
+
def get_ref():
- from os import environ
- print(MATERIAL_BUDGET_REF["_".join(environ['CMSSW_VERSION'].split("_")[0:3])+"_X"])
+ from os import environ
+
+ print(MATERIAL_BUDGET_REF["_".join(environ["CMSSW_VERSION"].split("_")[0:3]) + "_X"])
diff --git a/milestones.py b/milestones.py
index be39edf22a7b..3bfbba15034a 100644
--- a/milestones.py
+++ b/milestones.py
@@ -1,65 +1,65 @@
-#Map of cmssw branch to milestone
+# Map of cmssw branch to milestone
RELEASE_BRANCH_MILESTONE = {
- "CMSSW_9_2_6_patchX": 70,
- "CMSSW_9_2_3_patchX": 68,
- "CMSSW_9_2_0_patchX": 67,
- "CMSSW_8_0_10_patchX": 63,
- "CMSSW_8_0_8_patchX": 62,
- "CMSSW_7_5_5_patchX": 58,
- "CMSSW_8_0_X": 57,
- "CMSSW_7_6_X": 55,
- "CMSSW_7_5_X": 51,
- "CMSSW_7_4_X": 50,
- "CMSSW_7_3_X": 49,
- "CMSSW_7_0_X": 38,
- "CMSSW_7_1_X": 47,
- "CMSSW_7_2_X": 48,
- "CMSSW_6_2_X": 21,
- "CMSSW_6_2_X_SLHC": 9,
- "CMSSW_5_3_X": 20,
- "CMSSW_4_4_X": 8,
- "CMSSW_4_2_X": 35,
- "CMSSW_4_1_X": 7,
- "CMSSW_6_2_SLHCDEV_X": 52,
- "CMSSW_7_1_4_patchX": 53,
- "CMSSW_7_4_1_patchX": 54,
- "CMSSW_7_4_12_patchX": 56,
+ "CMSSW_9_2_6_patchX": 70,
+ "CMSSW_9_2_3_patchX": 68,
+ "CMSSW_9_2_0_patchX": 67,
+ "CMSSW_8_0_10_patchX": 63,
+ "CMSSW_8_0_8_patchX": 62,
+ "CMSSW_7_5_5_patchX": 58,
+ "CMSSW_8_0_X": 57,
+ "CMSSW_7_6_X": 55,
+ "CMSSW_7_5_X": 51,
+ "CMSSW_7_4_X": 50,
+ "CMSSW_7_3_X": 49,
+ "CMSSW_7_0_X": 38,
+ "CMSSW_7_1_X": 47,
+ "CMSSW_7_2_X": 48,
+ "CMSSW_6_2_X": 21,
+ "CMSSW_6_2_X_SLHC": 9,
+ "CMSSW_5_3_X": 20,
+ "CMSSW_4_4_X": 8,
+ "CMSSW_4_2_X": 35,
+ "CMSSW_4_1_X": 7,
+ "CMSSW_6_2_SLHCDEV_X": 52,
+ "CMSSW_7_1_4_patchX": 53,
+ "CMSSW_7_4_1_patchX": 54,
+ "CMSSW_7_4_12_patchX": 56,
}
-#PR created for these BRANCHES will be closed by cms-bot
+# PR created for these BRANCHES will be closed by cms-bot
RELEASE_BRANCH_CLOSED = [
- "CMSSW_4_1_X",
- "CMSSW_4_2_X",
- "CMSSW_4_4_X",
- "CMSSW_6_1_X",
- "CMSSW_6_1_X_SLHC",
- "CMSSW_6_2_X",
- "CMSSW_7_0_X",
- "CMSSW_.+_Patatrack_X",
+ "CMSSW_4_1_X",
+ "CMSSW_4_2_X",
+ "CMSSW_4_4_X",
+ "CMSSW_6_1_X",
+ "CMSSW_6_1_X_SLHC",
+ "CMSSW_6_2_X",
+ "CMSSW_7_0_X",
+ "CMSSW_.+_Patatrack_X",
]
-#All these releases require ORP signicatures
+# All these releases require ORP signicatures
RELEASE_BRANCH_PRODUCTION = [
- "CMSSW_8_0_X",
- "CMSSW_7_6_X",
- "CMSSW_7_5_X",
- "CMSSW_7_4_X",
- "CMSSW_7_3_X",
- "CMSSW_7_2_X",
- "CMSSW_7_1_X",
- "CMSSW_7_0_X",
- "CMSSW_6_2_X_SLHC",
- "CMSSW_5_3_X",
+ "CMSSW_8_0_X",
+ "CMSSW_7_6_X",
+ "CMSSW_7_5_X",
+ "CMSSW_7_4_X",
+ "CMSSW_7_3_X",
+ "CMSSW_7_2_X",
+ "CMSSW_7_1_X",
+ "CMSSW_7_0_X",
+ "CMSSW_6_2_X_SLHC",
+ "CMSSW_5_3_X",
]
SPECIAL_RELEASE_MANAGERS = []
-RELEASE_MANAGERS={}
-RELEASE_MANAGERS["CMSSW_.+_Patatrack_X"]=["fwyzard"]
+RELEASE_MANAGERS = {}
+RELEASE_MANAGERS["CMSSW_.+_Patatrack_X"] = ["fwyzard"]
######################################################################
# Automatically added by cms-bot for CMSSW_8_1_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_8_1_X"]=59
+RELEASE_BRANCH_MILESTONE["CMSSW_8_1_X"] = 59
RELEASE_BRANCH_PRODUCTION.append("CMSSW_8_1_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_8_1_DEVEL_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_8_1_ROOT6_X")
@@ -67,33 +67,33 @@
######################################################################
# Automatically added by cms-bot for CMSSW_8_0_0_patchX release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_8_0_0_patchX"]=60
+RELEASE_BRANCH_MILESTONE["CMSSW_8_0_0_patchX"] = 60
-#CMSSW_9_0_X release cycle
-RELEASE_BRANCH_MILESTONE["CMSSW_9_0_X"]=64
+# CMSSW_9_0_X release cycle
+RELEASE_BRANCH_MILESTONE["CMSSW_9_0_X"] = 64
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_0_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_0_ROOT6_X")
-#CMSSW_9_1_X release cycle
-RELEASE_BRANCH_MILESTONE["CMSSW_9_1_X"]=65
+# CMSSW_9_1_X release cycle
+RELEASE_BRANCH_MILESTONE["CMSSW_9_1_X"] = 65
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_1_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_1_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_1_DEVEL_X")
-#CMSSW_9_2_X release cycle
-RELEASE_BRANCH_MILESTONE["CMSSW_9_2_X"]=66
+# CMSSW_9_2_X release cycle
+RELEASE_BRANCH_MILESTONE["CMSSW_9_2_X"] = 66
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_2_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_2_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_2_DEVEL_X")
-#CMSSW_9_3_X release cycle
-RELEASE_BRANCH_MILESTONE["CMSSW_9_3_X"]=69
+# CMSSW_9_3_X release cycle
+RELEASE_BRANCH_MILESTONE["CMSSW_9_3_X"] = 69
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_3_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_3_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_3_DEVEL_X")
-#CMSSW_9_4_X release cycle
-RELEASE_BRANCH_MILESTONE["CMSSW_9_4_X"]=71
+# CMSSW_9_4_X release cycle
+RELEASE_BRANCH_MILESTONE["CMSSW_9_4_X"] = 71
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_4_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_4_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_4_DEVEL_X")
@@ -101,7 +101,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_10_0_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_10_0_X"]=72
+RELEASE_BRANCH_MILESTONE["CMSSW_10_0_X"] = 72
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_0_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_0_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_0_DEVEL_X")
@@ -109,7 +109,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_10_1_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_10_1_X"]=73
+RELEASE_BRANCH_MILESTONE["CMSSW_10_1_X"] = 73
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_1_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_1_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_1_DEVEL_X")
@@ -117,20 +117,20 @@
######################################################################
# Manually added by Shahzad MUZAFFAR for CMSSW_9_4_MAOD_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_9_4_MAOD_X"]=74
+RELEASE_BRANCH_MILESTONE["CMSSW_9_4_MAOD_X"] = 74
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_4_MAOD_X")
######################################################################
# Manually added by Shahzad MUZAFFAR for CMSSW_9_4_AN_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_9_4_AN_X"]=75
+RELEASE_BRANCH_MILESTONE["CMSSW_9_4_AN_X"] = 75
RELEASE_BRANCH_PRODUCTION.append("CMSSW_9_4_AN_X")
######################################################################
# Automatically added by cms-bot for CMSSW_10_2_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_10_2_X"]=76
+RELEASE_BRANCH_MILESTONE["CMSSW_10_2_X"] = 76
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_2_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_2_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_2_DEVEL_X")
@@ -138,7 +138,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_10_3_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_10_3_X"]=77
+RELEASE_BRANCH_MILESTONE["CMSSW_10_3_X"] = 77
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_3_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_3_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_3_DEVEL_X")
@@ -146,7 +146,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_10_4_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_10_4_X"]=78
+RELEASE_BRANCH_MILESTONE["CMSSW_10_4_X"] = 78
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_4_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_4_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_4_DEVEL_X")
@@ -155,19 +155,19 @@
######################################################################
# Automatically added by cms-bot for CMSSW_10_5_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_10_5_X"]=79
+RELEASE_BRANCH_MILESTONE["CMSSW_10_5_X"] = 79
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_5_X")
######################################################################
# Automatically added by cms-bot for CMSSW_10_6_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_10_6_X"]=80
+RELEASE_BRANCH_MILESTONE["CMSSW_10_6_X"] = 80
RELEASE_BRANCH_PRODUCTION.append("CMSSW_10_6_X")
######################################################################
# Automatically added by cms-bot for CMSSW_11_0_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_11_0_X"]=81
+RELEASE_BRANCH_MILESTONE["CMSSW_11_0_X"] = 81
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_0_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_0_DEVEL_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_0_CXXMODULE_X")
@@ -176,14 +176,14 @@
######################################################################
# Automatically added by cms-bot for CMSSW_11_1_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_11_1_X"]=82
+RELEASE_BRANCH_MILESTONE["CMSSW_11_1_X"] = 82
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_1_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_1_DEVEL_X")
######################################################################
# Automatically added by cms-bot for CMSSW_11_2_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_11_2_X"]=83
+RELEASE_BRANCH_MILESTONE["CMSSW_11_2_X"] = 83
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_2_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_2_CLANG_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_2_Patatrack_X")
@@ -192,7 +192,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_11_3_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_11_3_X"]=84
+RELEASE_BRANCH_MILESTONE["CMSSW_11_3_X"] = 84
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_3_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_3_CLANG_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_11_3_DEVEL_X")
@@ -201,7 +201,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_12_0_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_12_0_X"]=85
+RELEASE_BRANCH_MILESTONE["CMSSW_12_0_X"] = 85
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_0_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_0_Patatrack_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_0_GEANT4_X")
@@ -210,7 +210,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_12_1_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_12_1_X"]=86
+RELEASE_BRANCH_MILESTONE["CMSSW_12_1_X"] = 86
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_1_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_1_GEANT4_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_1_DEVEL_X")
@@ -219,7 +219,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_12_2_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_12_2_X"]=87
+RELEASE_BRANCH_MILESTONE["CMSSW_12_2_X"] = 87
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_2_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_2_DEVEL_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_2_ROOT6_X")
@@ -227,7 +227,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_12_3_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_12_3_X"]=88
+RELEASE_BRANCH_MILESTONE["CMSSW_12_3_X"] = 88
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_3_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_3_DEVEL_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_3_ROOT6_X")
@@ -235,7 +235,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_12_4_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_12_4_X"]=89
+RELEASE_BRANCH_MILESTONE["CMSSW_12_4_X"] = 89
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_4_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_4_DEVEL_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_4_ROOT6_X")
@@ -243,7 +243,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_12_5_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_12_5_X"]=90
+RELEASE_BRANCH_MILESTONE["CMSSW_12_5_X"] = 90
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_5_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_5_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_5_DEVEL_X")
@@ -251,7 +251,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_12_6_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_12_6_X"]=91
+RELEASE_BRANCH_MILESTONE["CMSSW_12_6_X"] = 91
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_6_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_12_6_DEVEL_X")
@@ -259,7 +259,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_13_0_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_13_0_X"]=92
+RELEASE_BRANCH_MILESTONE["CMSSW_13_0_X"] = 92
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_0_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_0_ROOT6_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_0_DEVEL_X")
@@ -267,7 +267,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_13_1_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_13_1_X"]=93
+RELEASE_BRANCH_MILESTONE["CMSSW_13_1_X"] = 93
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_1_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_1_DEVEL_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_1_ROOT6_X")
@@ -275,7 +275,7 @@
######################################################################
# Automatically added by cms-bot for CMSSW_13_2_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_13_2_X"]=94
+RELEASE_BRANCH_MILESTONE["CMSSW_13_2_X"] = 94
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_2_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_2_DEVEL_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_2_ROOT6_X")
@@ -283,6 +283,6 @@
######################################################################
# Automatically added by cms-bot for CMSSW_13_3_X release cycle
######################################################################
-RELEASE_BRANCH_MILESTONE["CMSSW_13_3_X"]=95
+RELEASE_BRANCH_MILESTONE["CMSSW_13_3_X"] = 95
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_3_X")
RELEASE_BRANCH_PRODUCTION.append("CMSSW_13_3_DEVEL_X")
diff --git a/modify_comment.py b/modify_comment.py
index ee4882ddca35..754e375e01cc 100755
--- a/modify_comment.py
+++ b/modify_comment.py
@@ -7,38 +7,79 @@
from optparse import OptionParser
import sys
from socket import setdefaulttimeout
+
setdefaulttimeout(120)
SCRIPT_DIR = dirname(abspath(sys.argv[0]))
valid_types = {}
-valid_types['JENKINS_TEST_URL']=[ "", None ]
-valid_types['JENKINS_STYLE_URL']=[ "", None ]
+valid_types["JENKINS_TEST_URL"] = ["", None]
+valid_types["JENKINS_STYLE_URL"] = ["", None]
all_types = "|".join(valid_types)
if __name__ == "__main__":
- parser = OptionParser(usage="%prog [-n|--dry-run] [-r|--repository ] -t|--type "+all_types+" -m|--message ")
- parser.add_option("-n", "--dry-run", dest="dryRun", action="store_true", help="Do not modify Github", default=False)
- parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw")
- parser.add_option("-t", "--type", dest="msgtype", help="Message type e.g. JENKINS_TEST_URL", type=str, default=None)
- parser.add_option("-m", "--message", dest="message", help="Message to be appened to the existing comment e.g. url of jenkins test job.", type=str, default=None)
+ parser = OptionParser(
+ usage="%prog [-n|--dry-run] [-r|--repository ] -t|--type "
+ + all_types
+ + " -m|--message "
+ )
+ parser.add_option(
+ "-n",
+ "--dry-run",
+ dest="dryRun",
+ action="store_true",
+ help="Do not modify Github",
+ default=False,
+ )
+ parser.add_option(
+ "-r",
+ "--repository",
+ dest="repository",
+ help="Github Repositoy name e.g. cms-sw/cmssw.",
+ type=str,
+ default="cms-sw/cmssw",
+ )
+ parser.add_option(
+ "-t",
+ "--type",
+ dest="msgtype",
+ help="Message type e.g. JENKINS_TEST_URL",
+ type=str,
+ default=None,
+ )
+ parser.add_option(
+ "-m",
+ "--message",
+ dest="message",
+ help="Message to be appened to the existing comment e.g. url of jenkins test job.",
+ type=str,
+ default=None,
+ )
+
+ opts, args = parser.parse_args()
+ if len(args) != 1:
+ parser.error("Too many/few arguments")
+ if not opts.message:
+ parser.error("Missing message to append")
+ if not opts.msgtype:
+ parser.error("Missing message type")
+ if not opts.msgtype in valid_types:
+ parser.error("Invalid message type " + opts.msgtype)
+
+ repo_dir = join(SCRIPT_DIR, "repos", opts.repository.replace("-", "_"))
+ if exists(join(repo_dir, "repo_config.py")):
+ sys.path.insert(0, repo_dir)
+ import repo_config
+ from process_pr import modify_comment, find_last_comment
+ from process_pr import TRIGERING_TESTS_MSG, TRIGERING_STYLE_TEST_MSG
- opts, args = parser.parse_args()
- if len(args) != 1: parser.error("Too many/few arguments")
- if not opts.message: parser.error("Missing message to append")
- if not opts.msgtype: parser.error("Missing message type")
- if not opts.msgtype in valid_types: parser.error("Invalid message type "+opts.msgtype)
-
- repo_dir = join(SCRIPT_DIR,'repos',opts.repository.replace("-","_"))
- if exists(join(repo_dir,"repo_config.py")): sys.path.insert(0,repo_dir)
- import repo_config
- from process_pr import modify_comment, find_last_comment
- from process_pr import TRIGERING_TESTS_MSG, TRIGERING_STYLE_TEST_MSG
- valid_types['JENKINS_TEST_URL']=[ "^\s*"+TRIGERING_TESTS_MSG+".*$", None ]
- valid_types['JENKINS_STYLE_URL']=[ "^\s*"+TRIGERING_STYLE_TEST_MSG+".*$", None ]
- gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip())
- issue = gh.get_repo(opts.repository).get_issue(int(args[0]))
- last_comment = find_last_comment(issue, repo_config.CMSBUILD_USER ,valid_types[opts.msgtype][0])
- if not last_comment:
- print("Warning: Not comment matched")
- sys.exit(1)
- print(last_comment.body)
- sys.exit(modify_comment(last_comment,valid_types[opts.msgtype][1],opts.message,opts.dryRun))
+ valid_types["JENKINS_TEST_URL"] = ["^\s*" + TRIGERING_TESTS_MSG + ".*$", None]
+ valid_types["JENKINS_STYLE_URL"] = ["^\s*" + TRIGERING_STYLE_TEST_MSG + ".*$", None]
+ gh = Github(login_or_token=open(expanduser(repo_config.GH_TOKEN)).read().strip())
+ issue = gh.get_repo(opts.repository).get_issue(int(args[0]))
+ last_comment = find_last_comment(
+ issue, repo_config.CMSBUILD_USER, valid_types[opts.msgtype][0]
+ )
+ if not last_comment:
+ print("Warning: Not comment matched")
+ sys.exit(1)
+ print(last_comment.body)
+ sys.exit(modify_comment(last_comment, valid_types[opts.msgtype][1], opts.message, opts.dryRun))
diff --git a/monitor_workflow.py b/monitor_workflow.py
index b03c7eab9196..27a482346b49 100755
--- a/monitor_workflow.py
+++ b/monitor_workflow.py
@@ -6,65 +6,100 @@
from threading import Thread
import subprocess
-job = {'exit_code':0, 'command':'true'}
-def run_job(job): job['exit_code']=subprocess.call(job['command'])
+job = {"exit_code": 0, "command": "true"}
+
+
+def run_job(job):
+ job["exit_code"] = subprocess.call(job["command"])
+
def update_stats(proc):
- stats = {"rss":0, "vms":0, "shared":0, "data":0, "uss":0, "pss":0,"num_fds":0,"num_threads":0, "processes":0, "cpu": 0}
- children = proc.children(recursive=True)
- clds = len(children)
- if clds==0: return stats
- stats['processes'] = clds
- for cld in children:
- try:
- cld.cpu_percent(interval=None)
- sleep(0.1)
- stats['cpu'] += int(cld.cpu_percent(interval=None))
- stats['num_fds'] += cld.num_fds()
- stats['num_threads'] += cld.num_threads()
- mem = None
- try:
- mem = cld.memory_full_info()
- for a in ["uss", "pss"]: stats[a]+=getattr(mem,a)
- except:
- try: mem = cld.memory_info()
- except: mem = cld.memory_info_ex()
- for a in ["rss", "vms", "shared", "data"]: stats[a]+=getattr(mem,a)
- except: pass
- return stats
+ stats = {
+ "rss": 0,
+ "vms": 0,
+ "shared": 0,
+ "data": 0,
+ "uss": 0,
+ "pss": 0,
+ "num_fds": 0,
+ "num_threads": 0,
+ "processes": 0,
+ "cpu": 0,
+ }
+ children = proc.children(recursive=True)
+ clds = len(children)
+ if clds == 0:
+ return stats
+ stats["processes"] = clds
+ for cld in children:
+ try:
+ cld.cpu_percent(interval=None)
+ sleep(0.1)
+ stats["cpu"] += int(cld.cpu_percent(interval=None))
+ stats["num_fds"] += cld.num_fds()
+ stats["num_threads"] += cld.num_threads()
+ mem = None
+ try:
+ mem = cld.memory_full_info()
+ for a in ["uss", "pss"]:
+ stats[a] += getattr(mem, a)
+ except:
+ try:
+ mem = cld.memory_info()
+ except:
+ mem = cld.memory_info_ex()
+ for a in ["rss", "vms", "shared", "data"]:
+ stats[a] += getattr(mem, a)
+ except:
+ pass
+ return stats
+
def monitor(stop):
- stime = int(time())
- p = psutil.Process(getpid())
- cmdline = " ".join(p.parent().cmdline())
- if "cmsDriver.py " in cmdline:
- cmdargs=cmdline.split("cmsDriver.py ",1)[1].strip()
- step=None
- if cmdargs.startswith("step"):
- step=cmdargs.split(" ")[0]
- elif ' --fileout ' in cmdargs:
- step =cmdargs.split(' --fileout ',1)[1].strip().split(" ")[0].replace("file:","").replace(".root","")
- if not "step" in step: step="step1"
- else: step=stime
- data = []
- sleep_time = 1
- while not stop():
- try:
- stats = update_stats(p)
- if stats['processes']==0: break
- sleep_time = 1.0-stats['processes']*0.1
- stats['time'] = int(time()-stime)
- data.append(stats)
- except: pass
- if sleep_time>0.1: sleep(sleep_time)
- from json import dump
- stat_file =open("wf_stats-%s.json" % step,"w")
- dump(data, stat_file)
- stat_file.close()
- return
+ stime = int(time())
+ p = psutil.Process(getpid())
+ cmdline = " ".join(p.parent().cmdline())
+ if "cmsDriver.py " in cmdline:
+ cmdargs = cmdline.split("cmsDriver.py ", 1)[1].strip()
+ step = None
+ if cmdargs.startswith("step"):
+ step = cmdargs.split(" ")[0]
+ elif " --fileout " in cmdargs:
+ step = (
+ cmdargs.split(" --fileout ", 1)[1]
+ .strip()
+ .split(" ")[0]
+ .replace("file:", "")
+ .replace(".root", "")
+ )
+ if not "step" in step:
+ step = "step1"
+ else:
+ step = stime
+ data = []
+ sleep_time = 1
+ while not stop():
+ try:
+ stats = update_stats(p)
+ if stats["processes"] == 0:
+ break
+ sleep_time = 1.0 - stats["processes"] * 0.1
+ stats["time"] = int(time() - stime)
+ data.append(stats)
+ except:
+ pass
+ if sleep_time > 0.1:
+ sleep(sleep_time)
+ from json import dump
+
+ stat_file = open("wf_stats-%s.json" % step, "w")
+ dump(data, stat_file)
+ stat_file.close()
+ return
+
stop_monitoring = False
-job['command']=argv[1:]
+job["command"] = argv[1:]
job_thd = Thread(target=run_job, args=(job,))
mon_thd = Thread(target=monitor, args=(lambda: stop_monitoring,))
job_thd.start()
@@ -73,5 +108,4 @@ def monitor(stop):
job_thd.join()
stop_monitoring = True
mon_thd.join()
-exit(job['exit_code'])
-
+exit(job["exit_code"])
diff --git a/package2category.py b/package2category.py
index f1702d153c9e..1200f503a02d 100755
--- a/package2category.py
+++ b/package2category.py
@@ -3,14 +3,19 @@
from categories_map import CMSSW_CATEGORIES
import sys
+
def package2category(filename):
- if not filename: return
- file_pack = '/'.join(filename.split('/')[:2])
- cat = 'unknown'
- if file_pack in pack2cat: cat = '-'.join(sorted(pack2cat[file_pack]))
- if not cat in cats: cats[cat] = {}
+ if not filename:
+ return
+ file_pack = "/".join(filename.split("/")[:2])
+ cat = "unknown"
+ if file_pack in pack2cat:
+ cat = "-".join(sorted(pack2cat[file_pack]))
+ if not cat in cats:
+ cats[cat] = {}
cats[cat][file_pack] = 1
+
pack2cat = {}
for cat in CMSSW_CATEGORIES:
for pack in CMSSW_CATEGORIES[cat]:
@@ -26,4 +31,4 @@ def package2category(filename):
package2category(line.strip())
for cat in cats:
- print ("%s %s" % (cat, " ".join(cats[cat].keys())))
+ print("%s %s" % (cat, " ".join(cats[cat].keys())))
diff --git a/parse_iwyu_logs.py b/parse_iwyu_logs.py
index dc09eb457999..7d3103938a38 100755
--- a/parse_iwyu_logs.py
+++ b/parse_iwyu_logs.py
@@ -1,14 +1,16 @@
#!/bin/env python
from __future__ import print_function
-import sys , json
-fd=open(sys.argv[1],'r')
+import sys, json
+
+fd = open(sys.argv[1], "r")
info = {}
-includes=0
-excludes=0
-pkg_name = '/'.join(sys.argv[1].split('/')[-3:-1])
-files=0
-splitline = sys.argv[2] + '/src/'
-print("""
+includes = 0
+excludes = 0
+pkg_name = "/".join(sys.argv[1].split("/")[-3:-1])
+files = 0
+splitline = sys.argv[2] + "/src/"
+print(
+ """
-""")
-print('' + 'Access BuildLog' + ' ')
+"""
+)
+print("" + "Access BuildLog" + " ")
print('
')
lines_seen = set()
for l in fd:
- if 'remove these lines' in l and l not in lines_seen:
- lines_seen.add(l)
- sec=iter(fd)
- line=next(sec)
- line=line.rstrip()
- if len(line):
- files += 1
- items = l.split(splitline)[-1].split(" ",1)
- print('
')
-
- elif 'add these lines' in l and l not in lines_seen:
- lines_seen.add(l)
- sec=iter(fd)
- line=next(sec)
- line=line.rstrip()
- if len(line):
- files += 1
- items = l.split(splitline)[-1].split(" ",1)
- print('
')
-stat = [ files , includes , excludes ]
+ if "remove these lines" in l and l not in lines_seen:
+ lines_seen.add(l)
+ sec = iter(fd)
+ line = next(sec)
+ line = line.rstrip()
+ if len(line):
+ files += 1
+ items = l.split(splitline)[-1].split(" ", 1)
+ print(
+ '
"
+ + items[0]
+ + " "
+ + items[1]
+ + ""
+ )
+ while len(line):
+ excludes += 1
+ line = line.replace("<", "<")
+ line = line.replace(">", ">")
+ print(" " + line)
+ line = next(sec)
+ line = line.rstrip()
+ print("
")
+
+ elif "add these lines" in l and l not in lines_seen:
+ lines_seen.add(l)
+ sec = iter(fd)
+ line = next(sec)
+ line = line.rstrip()
+ if len(line):
+ files += 1
+ items = l.split(splitline)[-1].split(" ", 1)
+ print(
+ '
"
+ + items[0]
+ + " "
+ + items[1]
+ + ""
+ )
+ while len(line):
+ includes += 1
+ line = line.replace("<", "<")
+ line = line.replace(">", ">")
+ print(" " + line)
+ line = next(sec)
+ line = line.rstrip()
+ print("
")
+print("
")
+stat = [files, includes, excludes]
info[pkg_name] = stat
-output_file = open('stats.json', 'a')
+output_file = open("stats.json", "a")
output_file.write(json.dumps(info))
output_file.close()
diff --git a/parse_jenkins_builds.py b/parse_jenkins_builds.py
index 31eed14bc51d..d8c0a49afab7 100755
--- a/parse_jenkins_builds.py
+++ b/parse_jenkins_builds.py
@@ -1,69 +1,90 @@
#!/usr/bin/env python3
from __future__ import print_function
from hashlib import sha1
-import os , re , sys , json, datetime, time, functools
+import os, re, sys, json, datetime, time, functools
import xml.etree.ElementTree as ET
import subprocess
-from es_utils import send_payload,get_payload,resend_payload,get_payload_wscroll
+from es_utils import send_payload, get_payload, resend_payload, get_payload_wscroll
+
+JENKINS_PREFIX = "jenkins"
+try:
+ JENKINS_PREFIX = os.environ["JENKINS_URL"].strip("/").split("/")[-1]
+except:
+ JENKINS_PREFIX = "jenkins"
+LOCAL_JENKINS_URL = os.environ["LOCAL_JENKINS_URL"]
-JENKINS_PREFIX="jenkins"
-try: JENKINS_PREFIX=os.environ['JENKINS_URL'].strip("/").split("/")[-1]
-except: JENKINS_PREFIX="jenkins"
-LOCAL_JENKINS_URL = os.environ['LOCAL_JENKINS_URL']
def findParametersAction(root):
- if root.tag=='parameters': return root
- for x in root:
- p=findParametersAction(x)
- if p is not None: return p
- return None
+ if root.tag == "parameters":
+ return root
+ for x in root:
+ p = findParametersAction(x)
+ if p is not None:
+ return p
+ return None
+
def getParameters(root, payload):
- n=root.find('name')
- if n is not None:
- if n.text is None: return
- v=root.find('value')
- vv = "None"
- if v is not None: vv = str(v.text)
- payload['parameter_'+n.text]=vv
- else:
- for x in root: getParameters(x, payload)
+ n = root.find("name")
+ if n is not None:
+ if n.text is None:
+ return
+ v = root.find("value")
+ vv = "None"
+ if v is not None:
+ vv = str(v.text)
+ payload["parameter_" + n.text] = vv
+ else:
+ for x in root:
+ getParameters(x, payload)
+
def get_current_time():
- """Returns current time in milliseconds. """
+ """Returns current time in milliseconds."""
current_time = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)
- current_time = round(current_time.total_seconds()*1000)
+ current_time = round(current_time.total_seconds() * 1000)
return current_time
+
def display_build_info(build_id, build_payload):
- """Display id, job name, build number and waiting time for a conrete build in queue. """
- print("==> ", str(build_id) + " " + str(build_payload["job_name"]) + " #" + str(build_payload["queue_id"]))
- wait_time = build_payload["wait_time"]/1000
- print("Time in queue (minutes): ", str(wait_time/60))
+ """Display id, job name, build number and waiting time for a conrete build in queue."""
+ print(
+ "==> ",
+ str(build_id)
+ + " "
+ + str(build_payload["job_name"])
+ + " #"
+ + str(build_payload["queue_id"]),
+ )
+ wait_time = build_payload["wait_time"] / 1000
+ print("Time in queue (minutes): ", str(wait_time / 60))
+
def update_payload_timestamp(build_id, queue):
- """Updates timestamp for a given payload. """
+ """Updates timestamp for a given payload."""
id = build_id
payload = queue[id]
current_time = get_current_time()
- payload['@timestamp'] = current_time
+ payload["@timestamp"] = current_time
return id, payload
+
def process_queue_reason(labels):
if "already in progress" in labels:
reason = "concurrent builds not allowed"
elif "Waiting for next available executor on" in labels:
- node = labels.split(" on ")[1].encode('ascii', errors='ignore').decode("ascii", "ignore")
+ node = labels.split(" on ")[1].encode("ascii", errors="ignore").decode("ascii", "ignore")
reason = node + "-busy"
elif "is offline;" in labels:
reason = "multiple-offline"
elif "is offline" in labels:
- node = labels.split(" is ")[0].encode('ascii', errors='ignore').decode("ascii", "ignore")
+ node = labels.split(" is ")[0].encode("ascii", errors="ignore").decode("ascii", "ignore")
reason = node + "-offline"
else:
reason = "other"
return reason
+
def grep(filename, pattern, verbose=False):
"""Bash-like grep function. Set verbose=True to print the line match."""
if not os.path.exists(filename):
@@ -76,18 +97,25 @@ def grep(filename, pattern, verbose=False):
else:
return True
-query_running_builds = """{
+
+query_running_builds = (
+ """{
"query": {"bool": {"must": {"query_string": {"query": "job_status:Running AND jenkins_server:%s", "default_operator": "AND"}}}},
"from": 0,
"size": 10000
-}""" % JENKINS_PREFIX
+}"""
+ % JENKINS_PREFIX
+)
# Query job with in_queue=1
-query_inqueue1 = """{
+query_inqueue1 = (
+ """{
"query": {"bool": {"must": {"query_string": {"query": "in_queue: 1 AND start_time: 0 AND jenkins_server: %s", "default_operator": "AND"}}}},
"from": 0,
"size": 10000
-}""" % JENKINS_PREFIX
+}"""
+ % JENKINS_PREFIX
+)
# Query jobs with in_queue=0
query_inqueue0 = """{
@@ -97,7 +125,7 @@ def grep(filename, pattern, verbose=False):
}"""
# Get jobs in queue from elastic search
-queue_index = 'cmssdt-jenkins-queue*'
+queue_index = "cmssdt-jenkins-queue*"
try:
elements_inqueue = get_payload_wscroll(queue_index, query_inqueue1)
except ValueError:
@@ -106,15 +134,19 @@ def grep(filename, pattern, verbose=False):
es_queue = dict()
es_indexes = dict()
if elements_inqueue:
- if (not 'hits' in elements_inqueue) or (not 'hits' in elements_inqueue['hits']):
- print("ERROR: ", elements_inqueue)
- for entry in elements_inqueue['hits']['hits']:
- es_indexes[entry['_id']] = entry['_index']
- es_queue[entry['_id']] = entry['_source']
+ if (not "hits" in elements_inqueue) or (not "hits" in elements_inqueue["hits"]):
+ print("ERROR: ", elements_inqueue)
+ for entry in elements_inqueue["hits"]["hits"]:
+ es_indexes[entry["_id"]] = entry["_index"]
+ es_queue[entry["_id"]] = entry["_source"]
# Get jenkins queue and construct payload to be send to elastic search
-que_cmd='curl -s -H "OIDC_CLAIM_CERN_UPN: cmssdt; charset=UTF-8" "' + LOCAL_JENKINS_URL + '/queue/api/json?pretty=true"'
-jque_res = subprocess.run(que_cmd,shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+que_cmd = (
+ 'curl -s -H "OIDC_CLAIM_CERN_UPN: cmssdt; charset=UTF-8" "'
+ + LOCAL_JENKINS_URL
+ + '/queue/api/json?pretty=true"'
+)
+jque_res = subprocess.run(que_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
queue_json = json.loads(jque_res.stdout)
jenkins_queue = dict()
@@ -128,7 +160,7 @@ def grep(filename, pattern, verbose=False):
labels = element["why"].encode("ascii", "ignore").decode("ascii", "ignore")
reason = process_queue_reason(labels)
- payload['jenkins_server'] = JENKINS_PREFIX
+ payload["jenkins_server"] = JENKINS_PREFIX
payload["in_queue_since"] = queue_time
payload["queue_id"] = queue_id
payload["job_name"] = job_name
@@ -137,11 +169,13 @@ def grep(filename, pattern, verbose=False):
payload["wait_time"] = current_time - queue_time
payload["start_time"] = 0
- unique_id = JENKINS_PREFIX + ":/build/builds/" + job_name + "/" + str(queue_id) # Not a real path
+ unique_id = (
+ JENKINS_PREFIX + ":/build/builds/" + job_name + "/" + str(queue_id)
+ ) # Not a real path
id = sha1(unique_id.encode()).hexdigest()
jenkins_queue[id] = payload
-queue_index="cmssdt-jenkins-queue-"+str(int(((current_time/86400000)+4)/7))
+queue_index = "cmssdt-jenkins-queue-" + str(int(((current_time / 86400000) + 4) / 7))
queue_document = "queue-data"
# Update information in elastic search
@@ -150,7 +184,7 @@ def grep(filename, pattern, verbose=False):
for build_id in new_inqueue:
id, payload = update_payload_timestamp(build_id, jenkins_queue)
display_build_info(id, payload)
- send_payload(queue_index,queue_document,id,json.dumps(payload))
+ send_payload(queue_index, queue_document, id, json.dumps(payload))
still_inqueue = [x for x in jenkins_queue.keys() if x in es_queue.keys()]
print("[INFO] Updating waiting time for build that are still in queue ...")
@@ -158,15 +192,20 @@ def grep(filename, pattern, verbose=False):
id, payload = update_payload_timestamp(build_id, jenkins_queue)
payload["wait_time"] = current_time - payload["in_queue_since"]
display_build_info(id, payload)
- send_payload(es_indexes[id],queue_document,id,json.dumps(payload))
+ send_payload(es_indexes[id], queue_document, id, json.dumps(payload))
no_inqueue = [str(y) for y in es_queue.keys() if y not in jenkins_queue.keys()]
print("[INFO] Updating builds that are no longer in queue ...")
for build_id in no_inqueue:
id, payload = update_payload_timestamp(build_id, es_queue)
- payload['in_queue'] = 0
- print("==> Cleaning up ",es_indexes[id],"/", str(id) + " " + str(payload["job_name"]) + " #" + str(payload["queue_id"]))
- send_payload(es_indexes[id],queue_document,id,json.dumps(payload))
+ payload["in_queue"] = 0
+ print(
+ "==> Cleaning up ",
+ es_indexes[id],
+ "/",
+ str(id) + " " + str(payload["job_name"]) + " #" + str(payload["queue_id"]),
+ )
+ send_payload(es_indexes[id], queue_document, id, json.dumps(payload))
time.sleep(10)
@@ -174,71 +213,88 @@ def grep(filename, pattern, verbose=False):
queue_content_hash = get_payload_wscroll("cmssdt-jenkins-queue*", query_inqueue0)
es_queue = dict()
es_indexes = dict()
-for entry in queue_content_hash['hits']['hits']:
- if not 'queue_id' in entry['_source']: continue
- queue_id = entry['_source']['queue_id']
- entry['_source']['queue_hash'] = entry['_id']
- es_indexes[queue_id] = entry['_index']
- es_queue[queue_id] = entry['_source']
+for entry in queue_content_hash["hits"]["hits"]:
+ if not "queue_id" in entry["_source"]:
+ continue
+ queue_id = entry["_source"]["queue_id"]
+ entry["_source"]["queue_hash"] = entry["_id"]
+ es_indexes[queue_id] = entry["_index"]
+ es_queue[queue_id] = entry["_source"]
print("[INFO] Checking status of running/finished builds ...")
all_local = []
-path = '/build/builds'
+path = "/build/builds"
document = "builds-data"
rematch = re.compile(".*/\d+$")
for root, dirs, files in os.walk(path):
- if rematch.match(root):
- logFile = root + '/build.xml'
- flagFile = root + '/check.done'
- if os.path.exists(logFile) and not os.path.exists(flagFile):
- payload = {}
- job_info = root.split('/')
- payload['job_name'] = '/'.join(job_info[3:-1])
- payload['build_number'] = job_info[-1]
- payload['url'] = "https://cmssdt.cern.ch/"+JENKINS_PREFIX+"/job/" + '/job/'.join(job_info[3:-1]) + "/" + job_info[-1] + "/"
- id = sha1((JENKINS_PREFIX+":"+root).encode()).hexdigest()
- try:
- tree = ET.parse(logFile)
- root = tree.getroot()
- pa=findParametersAction(root)
- if pa is not None: getParameters(pa, payload)
- jstime = root.find('startTime').text
- payload['@timestamp'] = int(jstime)
- try:
- payload['slave_node'] = root.find('builtOn').text
- except:
- payload['slave_node'] = 'unknown'
- try:
- payload['queue_id'] = root.find('queueId').text
- except:
- payload['queue_id'] = 'unknown'
- payload['jenkins_server'] = JENKINS_PREFIX
- build_result = root.find('result')
- if build_result is not None:
- payload['build_result'] = build_result.text
- payload['build_duration'] = int(int(root.find('duration').text)/1000)
- payload['job_status'] = 'Finished'
- os.system('touch "' + flagFile + '"')
- else:
- payload['job_status'] = 'Running'
-
- # Check if job has been in queue, and update queue waiting time
- queue_id = int(payload['queue_id'])
- if queue_id in es_queue.keys():
- queue_payload = es_queue[queue_id]
- queue_payload['start_time'] = int(jstime) # start time in millisec
- queue_payload['wait_time'] = int(jstime) - queue_payload["in_queue_since"]
- queue_payload['build_number'] = payload['build_number']
-
- print("==> Sending payload for ", queue_payload['queue_hash'])
- send_payload(es_indexes[queue_id], queue_document, queue_payload['queue_hash'], json.dumps(queue_payload))
-
- all_local.append(id)
- weekindex="jenkins-jobs-"+str(int((((int(jstime)/1000)/86400)+4)/7))
- print("==>",id,payload['job_name'],payload['build_number'],payload['job_status'])
- send_payload(weekindex,document,id,json.dumps(payload))
- except Exception as e:
- print("Xml parsing error",logFile , e)
+ if rematch.match(root):
+ logFile = root + "/build.xml"
+ flagFile = root + "/check.done"
+ if os.path.exists(logFile) and not os.path.exists(flagFile):
+ payload = {}
+ job_info = root.split("/")
+ payload["job_name"] = "/".join(job_info[3:-1])
+ payload["build_number"] = job_info[-1]
+ payload["url"] = (
+ "https://cmssdt.cern.ch/"
+ + JENKINS_PREFIX
+ + "/job/"
+ + "/job/".join(job_info[3:-1])
+ + "/"
+ + job_info[-1]
+ + "/"
+ )
+ id = sha1((JENKINS_PREFIX + ":" + root).encode()).hexdigest()
+ try:
+ tree = ET.parse(logFile)
+ root = tree.getroot()
+ pa = findParametersAction(root)
+ if pa is not None:
+ getParameters(pa, payload)
+ jstime = root.find("startTime").text
+ payload["@timestamp"] = int(jstime)
+ try:
+ payload["slave_node"] = root.find("builtOn").text
+ except:
+ payload["slave_node"] = "unknown"
+ try:
+ payload["queue_id"] = root.find("queueId").text
+ except:
+ payload["queue_id"] = "unknown"
+ payload["jenkins_server"] = JENKINS_PREFIX
+ build_result = root.find("result")
+ if build_result is not None:
+ payload["build_result"] = build_result.text
+ payload["build_duration"] = int(int(root.find("duration").text) / 1000)
+ payload["job_status"] = "Finished"
+ os.system('touch "' + flagFile + '"')
+ else:
+ payload["job_status"] = "Running"
+
+ # Check if job has been in queue, and update queue waiting time
+ queue_id = int(payload["queue_id"])
+ if queue_id in es_queue.keys():
+ queue_payload = es_queue[queue_id]
+ queue_payload["start_time"] = int(jstime) # start time in millisec
+ queue_payload["wait_time"] = int(jstime) - queue_payload["in_queue_since"]
+ queue_payload["build_number"] = payload["build_number"]
+
+ print("==> Sending payload for ", queue_payload["queue_hash"])
+ send_payload(
+ es_indexes[queue_id],
+ queue_document,
+ queue_payload["queue_hash"],
+ json.dumps(queue_payload),
+ )
+
+ all_local.append(id)
+ weekindex = "jenkins-jobs-" + str(int((((int(jstime) / 1000) / 86400) + 4) / 7))
+ print(
+ "==>", id, payload["job_name"], payload["build_number"], payload["job_status"]
+ )
+ send_payload(weekindex, document, id, json.dumps(payload))
+ except Exception as e:
+ print("Xml parsing error", logFile, e)
# Check remaining elements in the queue (to catch jobs that enter the queue and finish on the same iter)
print("[INFO] Checking remaining elements in queue ...")
@@ -252,32 +308,53 @@ def grep(filename, pattern, verbose=False):
queue_id = grep(file_path, str(es_queue[entry]["queue_id"]), True)
if queue_id != None:
queue_id.replace("", "").replace("", "").replace("\n", "")
- jstime = grep(file_path, str(""), True).replace("", "").replace("", "").replace("\n", "")
+ jstime = (
+ grep(file_path, str(""), True)
+ .replace("", "")
+ .replace("", "")
+ .replace("\n", "")
+ )
es_queue[entry]["start_time"] = int(jstime)
es_queue[entry]["wait_time"] = int(jstime) - es_queue[entry]["in_queue_since"]
- print("==> Sending payload for ", es_queue[entry]['queue_hash'])
- send_payload(es_indexes[entry], queue_document, es_queue[entry]['queue_hash'], json.dumps(es_queue[entry]))
+ print("==> Sending payload for ", es_queue[entry]["queue_hash"])
+ send_payload(
+ es_indexes[entry],
+ queue_document,
+ es_queue[entry]["queue_hash"],
+ json.dumps(es_queue[entry]),
+ )
-running_builds_elastic={}
-content_hash = get_payload_wscroll('jenkins-*',query_running_builds)
+running_builds_elastic = {}
+content_hash = get_payload_wscroll("jenkins-*", query_running_builds)
if not content_hash:
- running_builds_elastic = {}
+ running_builds_elastic = {}
else:
- if (not 'hits' in content_hash) or (not 'hits' in content_hash['hits']):
- print("ERROR: ",content_hash)
- sys.exit(1)
- print("Found:", len(content_hash['hits']['hits']))
- for hit in content_hash['hits']['hits']:
- if hit["_index"].startswith("cmssdt-jenkins-jobs-"):
- if not "jenkins_server" in hit["_source"]: hit["_source"]["jenkins_server"] = JENKINS_PREFIX
- if hit["_source"]["jenkins_server"]!=JENKINS_PREFIX: continue
- try:print("Running:",hit["_source"]["jenkins_server"],":",hit["_source"]['job_name'],hit["_source"]['build_number'],hit["_index"],hit['_id'])
- except Exception as e: print("Error:", e)
- running_builds_elastic[hit['_id']]=hit
+ if (not "hits" in content_hash) or (not "hits" in content_hash["hits"]):
+ print("ERROR: ", content_hash)
+ sys.exit(1)
+ print("Found:", len(content_hash["hits"]["hits"]))
+ for hit in content_hash["hits"]["hits"]:
+ if hit["_index"].startswith("cmssdt-jenkins-jobs-"):
+ if not "jenkins_server" in hit["_source"]:
+ hit["_source"]["jenkins_server"] = JENKINS_PREFIX
+ if hit["_source"]["jenkins_server"] != JENKINS_PREFIX:
+ continue
+ try:
+ print(
+ "Running:",
+ hit["_source"]["jenkins_server"],
+ ":",
+ hit["_source"]["job_name"],
+ hit["_source"]["build_number"],
+ hit["_index"],
+ hit["_id"],
+ )
+ except Exception as e:
+ print("Error:", e)
+ running_builds_elastic[hit["_id"]] = hit
for build in running_builds_elastic:
- if build not in all_local:
- hit = running_builds_elastic[build]
- hit["_source"]["job_status"]="Failed"
- resend_payload(hit)
- print("job status marked as Failed")
-
+ if build not in all_local:
+ hit = running_builds_elastic[build]
+ hit["_source"]["job_status"] = "Failed"
+ resend_payload(hit)
+ print("job status marked as Failed")
diff --git a/parse_workflow_time.py b/parse_workflow_time.py
index 37febe88e126..ce446b284528 100755
--- a/parse_workflow_time.py
+++ b/parse_workflow_time.py
@@ -1,27 +1,28 @@
#!/bin/env python
from datetime import datetime
-import re , json
+import re, json
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-i", "--input")
parser.add_argument("-o", "--output")
args = parser.parse_args()
-fd_read = open(args.input,'r')
+fd_read = open(args.input, "r")
dict_store = {}
for line in fd_read:
- workflow = line.split('_')[0]
- match_date = re.findall(r'[A-Z]{3}\s+[\d]{2}\s+[\d]{2}:[\d]{2}:[\d]{2}\s+[\d]{4}', line, re.IGNORECASE)
- if len(match_date)!=2:
- continue
-
- t1 = datetime.strptime(match_date[1],'%b %d %H:%M:%S %Y')
- t2 = datetime.strptime(match_date[0],'%b %d %H:%M:%S %Y')
- delta = t2-t1
- dict_store[workflow] = delta.seconds
+ workflow = line.split("_")[0]
+ match_date = re.findall(
+ r"[A-Z]{3}\s+[\d]{2}\s+[\d]{2}:[\d]{2}:[\d]{2}\s+[\d]{4}", line, re.IGNORECASE
+ )
+ if len(match_date) != 2:
+ continue
-fd_read.close
-with open(args.output, 'w') as outfile:
- json.dump(dict_store, outfile)
+ t1 = datetime.strptime(match_date[1], "%b %d %H:%M:%S %Y")
+ t2 = datetime.strptime(match_date[0], "%b %d %H:%M:%S %Y")
+ delta = t2 - t1
+ dict_store[workflow] = delta.seconds
+fd_read.close
+with open(args.output, "w") as outfile:
+ json.dump(dict_store, outfile)
diff --git a/port-pull-request.py b/port-pull-request.py
index 2e6e115a4ff7..9aeff997cabb 100755
--- a/port-pull-request.py
+++ b/port-pull-request.py
@@ -6,24 +6,52 @@
from github import Github
from github_utils import port_pr
from socket import setdefaulttimeout
+
setdefaulttimeout(120)
if __name__ == "__main__":
- parser = OptionParser( usage="%prog " )
- parser.add_option( "-n" , "--dry-run" , dest="dryRun" , action="store_true", help="Do not post on Github", default=False )
- parser.add_option( "-p", "--pull_request", dest="pull_request" , action="store" , help="Pull request number to be ported", type=int )
- parser.add_option( "-b", "--branch", dest="branch" , action="store" , help="Git branch where this PR should be ported to e.g. CMSSW_7_6_X")
- parser.add_option( "-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default=gh_user+"/"+gh_cmssw)
+ parser = OptionParser(usage="%prog ")
+ parser.add_option(
+ "-n",
+ "--dry-run",
+ dest="dryRun",
+ action="store_true",
+ help="Do not post on Github",
+ default=False,
+ )
+ parser.add_option(
+ "-p",
+ "--pull_request",
+ dest="pull_request",
+ action="store",
+ help="Pull request number to be ported",
+ type=int,
+ )
+ parser.add_option(
+ "-b",
+ "--branch",
+ dest="branch",
+ action="store",
+ help="Git branch where this PR should be ported to e.g. CMSSW_7_6_X",
+ )
+ parser.add_option(
+ "-r",
+ "--repository",
+ dest="repository",
+ help="Github Repositoy name e.g. cms-sw/cmssw.",
+ type=str,
+ default=gh_user + "/" + gh_cmssw,
+ )
- opts, args = parser.parse_args( )
+ opts, args = parser.parse_args()
- if len( args ) != 0:
- parser.print_help()
- parser.error( "Too many arguments" )
+ if len(args) != 0:
+ parser.print_help()
+ parser.error("Too many arguments")
- if not opts.pull_request or not opts.branch:
- parser.print_help()
- parser.error("Too few arguments")
+ if not opts.pull_request or not opts.branch:
+ parser.print_help()
+ parser.error("Too few arguments")
- gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip())
- port_pr(gh.get_repo(opts.repository), opts.pull_request , opts.branch, opts.dryRun)
+ gh = Github(login_or_token=open(expanduser("~/.github-token")).read().strip())
+ port_pr(gh.get_repo(opts.repository), opts.pull_request, opts.branch, opts.dryRun)
diff --git a/pr-checks/find-changed-workflows.py b/pr-checks/find-changed-workflows.py
index 5390f1284c79..5bdb12ab95f8 100755
--- a/pr-checks/find-changed-workflows.py
+++ b/pr-checks/find-changed-workflows.py
@@ -1,47 +1,53 @@
#!/usr/bin/env python
from __future__ import print_function
-import sys,re
+import sys, re
+
def read_workflows(wfile):
- fin = open(wfile)
- data = {}
- wf = ""
- for line in fin.readlines():
- m = re.match("^([^[]+)\[(\d+)]:\s+(.+)",line)
- if m:
- cmd = re.sub("\s\s+"," ",m.group(3).strip())
- if m.group(1).strip():
- wf = m.group(1).strip().split(" ",1)[0]
- data [wf] = []
- data[wf].append(cmd)
- return data
+ fin = open(wfile)
+ data = {}
+ wf = ""
+ for line in fin.readlines():
+ m = re.match("^([^[]+)\[(\d+)]:\s+(.+)", line)
+ if m:
+ cmd = re.sub("\s\s+", " ", m.group(3).strip())
+ if m.group(1).strip():
+ wf = m.group(1).strip().split(" ", 1)[0]
+ data[wf] = []
+ data[wf].append(cmd)
+ return data
+
orig = sys.argv[1]
new = sys.argv[2]
-odata= read_workflows(orig)
+odata = read_workflows(orig)
ndata = read_workflows(new)
cdata = {}
for wf in ndata:
- cdata[wf] = []
- if not wf in odata:
- cdata[wf] = ["New workflow"]
- continue
- nlen = len(ndata[wf])
- olen = len(odata[wf])
- if nlen!=olen:
- cdata[wf] = ["Number of Steps changed: %s vs %s" % (olen, nlen)]
- else:
- for i in range(nlen):
- if ndata[wf][i]!=odata[wf][i]:
- cdata[wf].append("\n - **step%s**\n```\n- %s\n+ %s\n```" % (i+1, ndata[wf][i],odata[wf][i]))
+ cdata[wf] = []
+ if not wf in odata:
+ cdata[wf] = ["New workflow"]
+ continue
+ nlen = len(ndata[wf])
+ olen = len(odata[wf])
+ if nlen != olen:
+ cdata[wf] = ["Number of Steps changed: %s vs %s" % (olen, nlen)]
+ else:
+ for i in range(nlen):
+ if ndata[wf][i] != odata[wf][i]:
+ cdata[wf].append(
+ "\n - **step%s**\n```\n- %s\n+ %s\n```"
+ % (i + 1, ndata[wf][i], odata[wf][i])
+ )
wfs = sorted(cdata, key=float)
for wf in wfs:
- if not cdata[wf]: continue
- if len(cdata[wf])==1:
- print (" - **%s**: %s" % (wf, cdata[wf][0]))
- else:
- print (" - **%s**:" % wf)
- for c in cdata[wf]:
- print (c)
+ if not cdata[wf]:
+ continue
+ if len(cdata[wf]) == 1:
+ print(" - **%s**: %s" % (wf, cdata[wf][0]))
+ else:
+ print(" - **%s**:" % wf)
+ for c in cdata[wf]:
+ print(c)
diff --git a/pr_testing/get-merged-prs.py b/pr_testing/get-merged-prs.py
index cc599b2811da..a0b94b88543b 100755
--- a/pr_testing/get-merged-prs.py
+++ b/pr_testing/get-merged-prs.py
@@ -8,45 +8,91 @@
from __future__ import print_function
from os import environ
-from os.path import dirname,basename,abspath,join
+from os.path import dirname, basename, abspath, join
from json import dumps, dump, load
from optparse import OptionParser
import sys
-sys.path.append(dirname(dirname(abspath(__file__))))
+
+sys.path.append(dirname(dirname(abspath(__file__))))
from github_utils import get_merge_prs
-parser = OptionParser( usage="%prog " )
-parser.add_option( "-s", "--start-tag", dest="start_tag" , action="store" , help="Starting tag, default is CMSSW_VERSION environment.", default=None)
-parser.add_option( "-e", "--end-tag", dest="end_tag" , action="store" , help="Ending tag, default is HEAD.", default='HEAD')
-parser.add_option( "-g", "--git-directory", dest="git_dir" , action="store" , help=".git directory, default is CMSSW_BASE/src/.git", default=None)
-parser.add_option( "-c", "--cache-directory", dest="cache_dir" , action="store" , help="Path to cms-prs cache directory", default=None)
-parser.add_option( "-o", "--out-file", dest="out_file" , action="store" , help="Outpu json file name", default=None)
-parser.add_option( "-r", "--repository", dest="repository" , action="store" , help="Repository e.g. cms-sw/cmssw or cms-sw/cmsdist", default="cms-sw/cmssw")
-parser.add_option( "-i", "--ignore-prs", dest="ignore" , action="store" , help="Comma separated list of PRs to ignore", default="")
-opts, args = parser.parse_args( )
-if len( args ) != 0:
- parser.print_help()
- parser.error( "Too many arguments" )
+parser = OptionParser(usage="%prog ")
+parser.add_option(
+ "-s",
+ "--start-tag",
+ dest="start_tag",
+ action="store",
+ help="Starting tag, default is CMSSW_VERSION environment.",
+ default=None,
+)
+parser.add_option(
+ "-e",
+ "--end-tag",
+ dest="end_tag",
+ action="store",
+ help="Ending tag, default is HEAD.",
+ default="HEAD",
+)
+parser.add_option(
+ "-g",
+ "--git-directory",
+ dest="git_dir",
+ action="store",
+ help=".git directory, default is CMSSW_BASE/src/.git",
+ default=None,
+)
+parser.add_option(
+ "-c",
+ "--cache-directory",
+ dest="cache_dir",
+ action="store",
+ help="Path to cms-prs cache directory",
+ default=None,
+)
+parser.add_option(
+ "-o", "--out-file", dest="out_file", action="store", help="Outpu json file name", default=None
+)
+parser.add_option(
+ "-r",
+ "--repository",
+ dest="repository",
+ action="store",
+ help="Repository e.g. cms-sw/cmssw or cms-sw/cmsdist",
+ default="cms-sw/cmssw",
+)
+parser.add_option(
+ "-i",
+ "--ignore-prs",
+ dest="ignore",
+ action="store",
+ help="Comma separated list of PRs to ignore",
+ default="",
+)
+opts, args = parser.parse_args()
+if len(args) != 0:
+ parser.print_help()
+ parser.error("Too many arguments")
if not opts.start_tag:
- opts.start_tag = environ['CMSSW_VERSION']
+ opts.start_tag = environ["CMSSW_VERSION"]
if not opts.git_dir:
- opts.git_dir = environ['CMSSW_BASE']+"/src/.git"
+ opts.git_dir = environ["CMSSW_BASE"] + "/src/.git"
if not opts.cache_dir:
- parser.error( "Please pass -c|--cache-directory /path/to/cms-prs" )
+ parser.error("Please pass -c|--cache-directory /path/to/cms-prs")
prs = {}
if opts.out_file:
- with open(opts.out_file) as ref:
- prs = load(ref)
-prs[opts.repository] = get_merge_prs(opts.start_tag, opts.end_tag, opts.git_dir,opts.cache_dir,{},basename(opts.repository))
+ with open(opts.out_file) as ref:
+ prs = load(ref)
+prs[opts.repository] = get_merge_prs(
+ opts.start_tag, opts.end_tag, opts.git_dir, opts.cache_dir, {}, basename(opts.repository)
+)
for ignore in [int(i) for i in opts.ignore.split(",") if i]:
- if ignore in prs[opts.repository]:
- del prs[opts.repository][ignore]
+ if ignore in prs[opts.repository]:
+ del prs[opts.repository][ignore]
if not prs[opts.repository]:
- del prs[opts.repository]
+ del prs[opts.repository]
if opts.out_file:
- with open(opts.out_file,"w") as ref:
- dump(prs, ref,sort_keys=True, indent=4, separators=(',', ': '))
+ with open(opts.out_file, "w") as ref:
+ dump(prs, ref, sort_keys=True, indent=4, separators=(",", ": "))
else:
- print(dumps(prs,sort_keys=True, indent=4, separators=(',', ': ')))
-
+ print(dumps(prs, sort_keys=True, indent=4, separators=(",", ": ")))
diff --git a/pr_testing/run-das-query.py b/pr_testing/run-das-query.py
index 01bbbe855ffd..a567abde5d52 100755
--- a/pr_testing/run-das-query.py
+++ b/pr_testing/run-das-query.py
@@ -8,31 +8,44 @@
from __future__ import print_function
import os, sys
-BOT_DIR=os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
-sys.path.insert(0,BOT_DIR)
+
+BOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
+sys.path.insert(0, BOT_DIR)
from _py2with3compatibility import run_cmd
from cmsutils import MachineCPUCount
from RelValArgs import GetMatrixOptions
os.environ["PATH"] = "%s/das-utils:%s" % (BOT_DIR, os.environ["PATH"])
-cmd = "runTheMatrix.py -j %s --maxSteps=0 %s" % (MachineCPUCount, GetMatrixOptions(os.environ["CMSSW_VERSION"], os.environ["SCRAM_ARCH"]))
-print("Running ",cmd)
-e, o = run_cmd("touch runall-report-step123-.log ; rm -rf rel; mkdir rel; cd rel; %s; [ -f runall-report-step123-.log ] && cp runall-report-step123-.log ../" % cmd)
+cmd = "runTheMatrix.py -j %s --maxSteps=0 %s" % (
+ MachineCPUCount,
+ GetMatrixOptions(os.environ["CMSSW_VERSION"], os.environ["SCRAM_ARCH"]),
+)
+print("Running ", cmd)
+e, o = run_cmd(
+ "touch runall-report-step123-.log ; rm -rf rel; mkdir rel; cd rel; %s; [ -f runall-report-step123-.log ] && cp runall-report-step123-.log ../"
+ % cmd
+)
print(o)
-err=0
-if e: err=1
-if os.getenv("MATRIX_EXTRAS",""):
- e, o = run_cmd("grep -E '^[1-9][0-9]*(\.[0-9]*|)_' runall-report-step123-.log | sed 's|_.*||'")
- all_wfs = [wf for wf in o.split('\n') if wf]
- print("All WFS:",all_wfs)
- new_wfs = []
- for wf in os.getenv("MATRIX_EXTRAS","").split(","):
- if wf and (not wf in all_wfs) and (not wf in new_wfs): new_wfs.append(wf)
- print("New WFs:",new_wfs)
- if new_wfs:
- cmd = "%s -l %s %s" % (cmd, ','.join(new_wfs), os.getenv("EXTRA_MATRIX_ARGS",""))
- print("Running ",cmd)
- e, o = run_cmd("rm -rf rel; mkdir rel; cd rel; %s ; [ -f runall-report-step123-.log ] && cat runall-report-step123-.log >> ../runall-report-step123-.log" % cmd)
- print(o)
- if e: err=1
+err = 0
+if e:
+ err = 1
+if os.getenv("MATRIX_EXTRAS", ""):
+ e, o = run_cmd("grep -E '^[1-9][0-9]*(\.[0-9]*|)_' runall-report-step123-.log | sed 's|_.*||'")
+ all_wfs = [wf for wf in o.split("\n") if wf]
+ print("All WFS:", all_wfs)
+ new_wfs = []
+ for wf in os.getenv("MATRIX_EXTRAS", "").split(","):
+ if wf and (not wf in all_wfs) and (not wf in new_wfs):
+ new_wfs.append(wf)
+ print("New WFs:", new_wfs)
+ if new_wfs:
+ cmd = "%s -l %s %s" % (cmd, ",".join(new_wfs), os.getenv("EXTRA_MATRIX_ARGS", ""))
+ print("Running ", cmd)
+ e, o = run_cmd(
+ "rm -rf rel; mkdir rel; cd rel; %s ; [ -f runall-report-step123-.log ] && cat runall-report-step123-.log >> ../runall-report-step123-.log"
+ % cmd
+ )
+ print(o)
+ if e:
+ err = 1
sys.exit(err)
diff --git a/process-partial-logs-relval.py b/process-partial-logs-relval.py
index 5a5a47ed7faa..09c3f07ed304 100755
--- a/process-partial-logs-relval.py
+++ b/process-partial-logs-relval.py
@@ -3,10 +3,10 @@
import os, sys
from runPyRelValThread import PyRelValsThread
-path=sys.argv[1]
-newloc = os.path.dirname(path) + '/pyRelValMatrixLogs/run'
-os.system('mkdir -p ' + newloc)
-ProcessLogs = PyRelValsThread(1,path,"1of1",newloc)
+path = sys.argv[1]
+newloc = os.path.dirname(path) + "/pyRelValMatrixLogs/run"
+os.system("mkdir -p " + newloc)
+ProcessLogs = PyRelValsThread(1, path, "1of1", newloc)
print("Generating runall log file: %s" % path)
ProcessLogs.update_runall()
print("Generating relval time info")
diff --git a/process_pr.py b/process_pr.py
index b08cf16179af..96821a717994 100644
--- a/process_pr.py
+++ b/process_pr.py
@@ -1,9 +1,27 @@
-from categories import CMSSW_L2, CMSSW_L1, TRIGGER_PR_TESTS, CMSSW_ISSUES_TRACKERS, PR_HOLD_MANAGERS, EXTERNAL_REPOS,CMSDIST_REPOS
+from categories import (
+ CMSSW_L2,
+ CMSSW_L1,
+ TRIGGER_PR_TESTS,
+ CMSSW_ISSUES_TRACKERS,
+ PR_HOLD_MANAGERS,
+ EXTERNAL_REPOS,
+ CMSDIST_REPOS,
+)
from categories import CMSSW_CATEGORIES
from releases import RELEASE_BRANCH_MILESTONE, RELEASE_BRANCH_PRODUCTION, CMSSW_DEVEL_BRANCH
-from cms_static import VALID_CMSDIST_BRANCHES, NEW_ISSUE_PREFIX, NEW_PR_PREFIX, ISSUE_SEEN_MSG, BUILD_REL, \
- GH_CMSSW_REPO, GH_CMSDIST_REPO, CMSBOT_IGNORE_MSG, VALID_CMS_SW_REPOS_FOR_TESTS, CREATE_REPO
-from cms_static import BACKPORT_STR,GH_CMSSW_ORGANIZATION, CMSBOT_NO_NOTIFY_MSG
+from cms_static import (
+ VALID_CMSDIST_BRANCHES,
+ NEW_ISSUE_PREFIX,
+ NEW_PR_PREFIX,
+ ISSUE_SEEN_MSG,
+ BUILD_REL,
+ GH_CMSSW_REPO,
+ GH_CMSDIST_REPO,
+ CMSBOT_IGNORE_MSG,
+ VALID_CMS_SW_REPOS_FOR_TESTS,
+ CREATE_REPO,
+)
+from cms_static import BACKPORT_STR, GH_CMSSW_ORGANIZATION, CMSBOT_NO_NOTIFY_MSG
from githublabels import TYPE_COMMANDS
from repo_config import GH_REPO_ORGANIZATION
import re, time
@@ -17,1520 +35,1982 @@
from json import dumps, load
try:
- from categories import CMSSW_LABELS
+ from categories import CMSSW_LABELS
except:
- CMSSW_LABELS = {}
+ CMSSW_LABELS = {}
try:
- from categories import get_dpg_pog
+ from categories import get_dpg_pog
except:
- def get_dpg_pog(*args): return {}
+
+ def get_dpg_pog(*args):
+ return {}
+
+
try:
- from categories import external_to_package
+ from categories import external_to_package
except:
- def external_to_package(*args):
- return ''
+
+ def external_to_package(*args):
+ return ""
+
+
try:
- from releases import get_release_managers, is_closed_branch
+ from releases import get_release_managers, is_closed_branch
except:
- def get_release_managers(*args):
- return []
- def is_closed_branch(*args):
- return False
+
+ def get_release_managers(*args):
+ return []
+
+ def is_closed_branch(*args):
+ return False
dpg_pog = get_dpg_pog()
for l in CMSSW_LABELS.keys():
- if not l in dpg_pog:
- del CMSSW_LABELS[l]
- else:
- CMSSW_LABELS[l] = [re.compile('^('+p+').*$') for p in CMSSW_LABELS[l]]
+ if not l in dpg_pog:
+ del CMSSW_LABELS[l]
+ else:
+ CMSSW_LABELS[l] = [re.compile("^(" + p + ").*$") for p in CMSSW_LABELS[l]]
setdefaulttimeout(300)
-CMSDIST_REPO_NAME=join(GH_REPO_ORGANIZATION, GH_CMSDIST_REPO)
-CMSSW_REPO_NAME=join(GH_REPO_ORGANIZATION, GH_CMSSW_REPO)
+CMSDIST_REPO_NAME = join(GH_REPO_ORGANIZATION, GH_CMSDIST_REPO)
+CMSSW_REPO_NAME = join(GH_REPO_ORGANIZATION, GH_CMSSW_REPO)
+
# Prepare various comments regardless of whether they will be made or not.
-def format(s, **kwds): return s % kwds
-
-TRIGERING_TESTS_MSG = 'The tests are being triggered in jenkins.'
-TRIGERING_TESTS_MSG1 = 'Jenkins tests started for '
-TRIGERING_STYLE_TEST_MSG = 'The project style tests are being triggered in jenkins.'
-IGNORING_TESTS_MSG = 'Ignoring test request.'
-TESTS_RESULTS_MSG = '^\s*([-|+]1|I had the issue.*)\s*$'
-FAILED_TESTS_MSG = 'The jenkins tests job failed, please try again.'
-PUSH_TEST_ISSUE_MSG='^\[Jenkins CI\] Testing commit: [0-9a-f]+$'
+def format(s, **kwds):
+ return s % kwds
+
+
+TRIGERING_TESTS_MSG = "The tests are being triggered in jenkins."
+TRIGERING_TESTS_MSG1 = "Jenkins tests started for "
+TRIGERING_STYLE_TEST_MSG = "The project style tests are being triggered in jenkins."
+IGNORING_TESTS_MSG = "Ignoring test request."
+TESTS_RESULTS_MSG = "^\s*([-|+]1|I had the issue.*)\s*$"
+FAILED_TESTS_MSG = "The jenkins tests job failed, please try again."
+PUSH_TEST_ISSUE_MSG = "^\[Jenkins CI\] Testing commit: [0-9a-f]+$"
HOLD_MSG = "Pull request has been put on hold by "
-#Regexp to match the test requests
-CODE_CHECKS_REGEXP=re.compile("code-checks(\s+with\s+cms.week[0-9].PR_[0-9a-f]{8}/[^\s]+|)(\s+and\s+apply\s+patch|)$")
-WF_PATTERN="[1-9][0-9]*(\.[0-9]+|)"
-CMSSW_QUEUE_PATTERN='CMSSW_[0-9]+_[0-9]+_(X|[A-Z][A-Z0-9]+_X|[0-9]+(_[a-zA-Z0-9_]+|))'
-CMSSW_PACKAGE_PATTERN='[A-Z][a-zA-Z0-9]+(/[a-zA-Z0-9]+|)'
-ARCH_PATTERN='[a-z0-9]+_[a-z0-9]+_[a-z0-9]+'
-CMSSW_RELEASE_QUEUE_PATTERN=format('(%(cmssw)s|%(arch)s|%(cmssw)s/%(arch)s)', cmssw=CMSSW_QUEUE_PATTERN, arch=ARCH_PATTERN)
-RELVAL_OPTS="[-][a-zA-Z0-9_.,\s/'-]+"
-CLOSE_REQUEST=re.compile('^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)close\s*$',re.I)
-REOPEN_REQUEST=re.compile('^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)(re|)open\s*$',re.I)
-CMS_PR_PATTERN=format('(#[1-9][0-9]*|(%(cmsorgs)s)/+[a-zA-Z0-9_-]+#[1-9][0-9]*|https://+github.com/+(%(cmsorgs)s)/+[a-zA-Z0-9_-]+/+pull/+[1-9][0-9]*)',
- cmsorgs='|'.join(EXTERNAL_REPOS))
-TEST_REGEXP = format("^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)test(\s+workflow(s|)\s+(%(workflow)s(\s*,\s*%(workflow)s|)*)|)(\s+with\s+(%(cms_pr)s(\s*,\s*%(cms_pr)s)*)|)(\s+for\s+%(release_queue)s|)(\s+using\s+full\s+cmssw|\s+using\s+(cms-|)addpkg\s+(%(pkg)s(,%(pkg)s)*)|)\s*$",
- workflow=WF_PATTERN,
- cms_pr=CMS_PR_PATTERN,
- pkg=CMSSW_PACKAGE_PATTERN,
- release_queue=CMSSW_RELEASE_QUEUE_PATTERN)
+# Regexp to match the test requests
+CODE_CHECKS_REGEXP = re.compile(
+ "code-checks(\s+with\s+cms.week[0-9].PR_[0-9a-f]{8}/[^\s]+|)(\s+and\s+apply\s+patch|)$"
+)
+WF_PATTERN = "[1-9][0-9]*(\.[0-9]+|)"
+CMSSW_QUEUE_PATTERN = "CMSSW_[0-9]+_[0-9]+_(X|[A-Z][A-Z0-9]+_X|[0-9]+(_[a-zA-Z0-9_]+|))"
+CMSSW_PACKAGE_PATTERN = "[A-Z][a-zA-Z0-9]+(/[a-zA-Z0-9]+|)"
+ARCH_PATTERN = "[a-z0-9]+_[a-z0-9]+_[a-z0-9]+"
+CMSSW_RELEASE_QUEUE_PATTERN = format(
+ "(%(cmssw)s|%(arch)s|%(cmssw)s/%(arch)s)", cmssw=CMSSW_QUEUE_PATTERN, arch=ARCH_PATTERN
+)
+RELVAL_OPTS = "[-][a-zA-Z0-9_.,\s/'-]+"
+CLOSE_REQUEST = re.compile("^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)close\s*$", re.I)
+REOPEN_REQUEST = re.compile("^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)(re|)open\s*$", re.I)
+CMS_PR_PATTERN = format(
+ "(#[1-9][0-9]*|(%(cmsorgs)s)/+[a-zA-Z0-9_-]+#[1-9][0-9]*|https://+github.com/+(%(cmsorgs)s)/+[a-zA-Z0-9_-]+/+pull/+[1-9][0-9]*)",
+ cmsorgs="|".join(EXTERNAL_REPOS),
+)
+TEST_REGEXP = format(
+ "^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)test(\s+workflow(s|)\s+(%(workflow)s(\s*,\s*%(workflow)s|)*)|)(\s+with\s+(%(cms_pr)s(\s*,\s*%(cms_pr)s)*)|)(\s+for\s+%(release_queue)s|)(\s+using\s+full\s+cmssw|\s+using\s+(cms-|)addpkg\s+(%(pkg)s(,%(pkg)s)*)|)\s*$",
+ workflow=WF_PATTERN,
+ cms_pr=CMS_PR_PATTERN,
+ pkg=CMSSW_PACKAGE_PATTERN,
+ release_queue=CMSSW_RELEASE_QUEUE_PATTERN,
+)
AUTO_TEST_REPOS = ["cms-sw/cmssw"]
REGEX_TEST_REG = re.compile(TEST_REGEXP, re.I)
-REGEX_TEST_ABORT = re.compile("^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)abort(\s+test|)$", re.I)
-TEST_WAIT_GAP=720
+REGEX_TEST_ABORT = re.compile(
+ "^\s*((@|)cmsbuild\s*[,]*\s+|)(please\s*[,]*\s+|)abort(\s+test|)$", re.I
+)
+TEST_WAIT_GAP = 720
ALL_CHECK_FUNCTIONS = None
EXTRA_RELVALS_TESTS = ["threading", "gpu", "high-stats", "nano"]
-EXTRA_RELVALS_TESTS_OPTS ="_" + "|_".join(EXTRA_RELVALS_TESTS)
+EXTRA_RELVALS_TESTS_OPTS = "_" + "|_".join(EXTRA_RELVALS_TESTS)
EXTRA_TESTS = "|".join(EXTRA_RELVALS_TESTS) + "|profiling|none"
-SKIP_TESTS = "|".join(["static","header"])
+SKIP_TESTS = "|".join(["static", "header"])
ENABLE_TEST_PTRN = "enable(_test(s|)|)"
-JENKINS_NODES = '[a-zA-Z0-9_|&\s()-]+'
+JENKINS_NODES = "[a-zA-Z0-9_|&\s()-]+"
MULTILINE_COMMENTS_MAP = {
- "(workflow|relval)(s|)("+EXTRA_RELVALS_TESTS_OPTS+"|)": [format('%(workflow)s(\s*,\s*%(workflow)s|)*', workflow= WF_PATTERN), "MATRIX_EXTRAS"],
- "(workflow|relval)(s|)_profiling": [format('%(workflow)s(\s*,\s*%(workflow)s|)*', workflow= WF_PATTERN),"PROFILING_WORKFLOWS"],
- "pull_request(s|)": [format('%(cms_pr)s(,%(cms_pr)s)*', cms_pr=CMS_PR_PATTERN ), "PULL_REQUESTS"],
- "full_cmssw|full": ['true|false', "BUILD_FULL_CMSSW"],
- "disable_poison": ['true|false', "DISABLE_POISON"],
- "use_ib_tag": ['true|false', "USE_IB_TAG"],
- "baseline": ['self|default', "USE_BASELINE"],
- "skip_test(s|)": [format("(%(tests)s)(\s*,\s*(%(tests)s))*",tests=SKIP_TESTS), "SKIP_TESTS"],
- "dry_run": ['true|false', "DRY_RUN"],
- "jenkins_(slave|node)": [JENKINS_NODES , "RUN_ON_SLAVE"],
- "(arch(itecture(s|))|release|release/arch)" : [ CMSSW_RELEASE_QUEUE_PATTERN, "RELEASE_FORMAT"],
- ENABLE_TEST_PTRN: [format("(%(tests)s)(\s*,\s*(%(tests)s))*",tests=EXTRA_TESTS), "ENABLE_BOT_TESTS"],
- "ignore_test(s|)": ["build-warnings|clang-warnings", "IGNORE_BOT_TESTS"],
- "container": ["[a-zA-Z][a-zA-Z0-9_-]+/[a-zA-Z][a-zA-Z0-9_-]+(:[a-zA-Z0-9_-]+|)", "DOCKER_IMGAGE"],
- "cms-addpkg|addpkg":[format('%(pkg)s(,%(pkg)s)*', pkg=CMSSW_PACKAGE_PATTERN), "EXTRA_CMSSW_PACKAGES"],
- "build_verbose": ['true|false', "BUILD_VERBOSE"],
- "(workflow|relval)(s|)_opt(ion|)(s|)("+EXTRA_RELVALS_TESTS_OPTS+"|_input|)": [RELVAL_OPTS,"EXTRA_MATRIX_ARGS",True],
- "(workflow|relval)(s|)_command_opt(ion|)(s|)("+EXTRA_RELVALS_TESTS_OPTS+"|_input|)": [RELVAL_OPTS,"EXTRA_MATRIX_COMMAND_ARGS",True]
- }
+ "(workflow|relval)(s|)("
+ + EXTRA_RELVALS_TESTS_OPTS
+ + "|)": [format("%(workflow)s(\s*,\s*%(workflow)s|)*", workflow=WF_PATTERN), "MATRIX_EXTRAS"],
+ "(workflow|relval)(s|)_profiling": [
+ format("%(workflow)s(\s*,\s*%(workflow)s|)*", workflow=WF_PATTERN),
+ "PROFILING_WORKFLOWS",
+ ],
+ "pull_request(s|)": [
+ format("%(cms_pr)s(,%(cms_pr)s)*", cms_pr=CMS_PR_PATTERN),
+ "PULL_REQUESTS",
+ ],
+ "full_cmssw|full": ["true|false", "BUILD_FULL_CMSSW"],
+ "disable_poison": ["true|false", "DISABLE_POISON"],
+ "use_ib_tag": ["true|false", "USE_IB_TAG"],
+ "baseline": ["self|default", "USE_BASELINE"],
+ "skip_test(s|)": [format("(%(tests)s)(\s*,\s*(%(tests)s))*", tests=SKIP_TESTS), "SKIP_TESTS"],
+ "dry_run": ["true|false", "DRY_RUN"],
+ "jenkins_(slave|node)": [JENKINS_NODES, "RUN_ON_SLAVE"],
+ "(arch(itecture(s|))|release|release/arch)": [CMSSW_RELEASE_QUEUE_PATTERN, "RELEASE_FORMAT"],
+ ENABLE_TEST_PTRN: [
+ format("(%(tests)s)(\s*,\s*(%(tests)s))*", tests=EXTRA_TESTS),
+ "ENABLE_BOT_TESTS",
+ ],
+ "ignore_test(s|)": ["build-warnings|clang-warnings", "IGNORE_BOT_TESTS"],
+ "container": [
+ "[a-zA-Z][a-zA-Z0-9_-]+/[a-zA-Z][a-zA-Z0-9_-]+(:[a-zA-Z0-9_-]+|)",
+ "DOCKER_IMGAGE",
+ ],
+ "cms-addpkg|addpkg": [
+ format("%(pkg)s(,%(pkg)s)*", pkg=CMSSW_PACKAGE_PATTERN),
+ "EXTRA_CMSSW_PACKAGES",
+ ],
+ "build_verbose": ["true|false", "BUILD_VERBOSE"],
+ "(workflow|relval)(s|)_opt(ion|)(s|)("
+ + EXTRA_RELVALS_TESTS_OPTS
+ + "|_input|)": [RELVAL_OPTS, "EXTRA_MATRIX_ARGS", True],
+ "(workflow|relval)(s|)_command_opt(ion|)(s|)("
+ + EXTRA_RELVALS_TESTS_OPTS
+ + "|_input|)": [RELVAL_OPTS, "EXTRA_MATRIX_COMMAND_ARGS", True],
+}
L2_DATA = {}
+
def init_l2_data(cms_repo):
- l2_data = {}
- if cms_repo:
- with open(join(dirname(__file__),"cmssw_l2","l2.json")) as ref:
- l2_data = load(ref)
- for user in CMSSW_L2:
- if (user in l2_data) and ('end_date' in l2_data[user][-1]):
- del l2_data[user][-1]['end_date']
- else:
- for user in CMSSW_L2:
- l2_data[user] = [{'start_date': 0, 'category': CMSSW_L2[user]}]
- return l2_data
+ l2_data = {}
+ if cms_repo:
+ with open(join(dirname(__file__), "cmssw_l2", "l2.json")) as ref:
+ l2_data = load(ref)
+ for user in CMSSW_L2:
+ if (user in l2_data) and ("end_date" in l2_data[user][-1]):
+ del l2_data[user][-1]["end_date"]
+ else:
+ for user in CMSSW_L2:
+ l2_data[user] = [{"start_date": 0, "category": CMSSW_L2[user]}]
+ return l2_data
+
def get_commenter_categories(commenter, comment_date):
- if commenter not in L2_DATA: return []
- for item in L2_DATA[commenter]:
- if (comment_date0)
- create_test_property = False
- repo_cache = {repository: repo}
- packages = set([])
- chg_files = []
- package_categories = {}
- extra_labels = {'mtype':[]}
- add_external_category = False
- signing_categories = set([])
- new_package_message = ""
- mustClose = False
- reOpen = False
- releaseManagers = []
- signatures = {}
- watchers = []
- #Process Pull Request
- pkg_categories = set([])
- REGEX_TYPE_CMDS="^type\s+(([-+]|)[a-z][a-z0-9-]+)(\s*,\s*([-+]|)[a-z][a-z0-9-]+)*$"
- REGEX_EX_CMDS="^urgent$|^backport\s+(of\s+|)(#|http(s|):/+github\.com/+%s/+pull/+)\d+$" % (repo.full_name)
- known_ignore_tests="%s" % MULTILINE_COMMENTS_MAP["ignore_test(s|)"][0]
- REGEX_EX_IGNORE_CHKS='^ignore\s+((%s)(\s*,\s*(%s))*|none)$' % (known_ignore_tests, known_ignore_tests)
- REGEX_EX_ENABLE_TESTS='^enable\s+(%s)$' % MULTILINE_COMMENTS_MAP[ENABLE_TEST_PTRN][0]
- L2_DATA = init_l2_data (cms_repo)
- last_commit_date = None
- last_commit_obj = None
- push_test_issue = False
- requestor = issue.user.login.encode("ascii", "ignore").decode()
- ignore_tests = ''
- enable_tests = ''
- commit_statuses = None
- bot_status_name = "bot/jenkins"
- bot_ack_name = "bot/ack"
- bot_test_param_name = "bot/test_parameters"
- cms_status_prefix = "cms"
- bot_status = None
- code_checks_status = []
- pre_checks_state = {}
- default_pre_checks = ["code-checks"]
- #For future pre_checks
- #if prId>=somePRNumber: default_pre_checks+=["some","new","checks"]
- pre_checks_url = {}
- if issue.pull_request:
- pr = repo.get_pull(prId)
- if pr.changed_files==0:
- print("Ignoring: PR with no files changed")
- return
- if cmssw_repo and cms_repo and (pr.base.ref == CMSSW_DEVEL_BRANCH):
- if pr.state != "closed":
- print("This pull request must go in to master branch")
- if not dryRun:
- edit_pr(repo.full_name, prId, base="master")
- msg = format("%(gh_user_char)s%(user)s, %(dev_branch)s branch is closed for direct updates. cms-bot is going to move this PR to master branch.\n"
- "In future, please use cmssw master branch to submit your changes.\n",
- user=requestor,
- gh_user_char=gh_user_char,
- dev_branch=CMSSW_DEVEL_BRANCH)
- issue.create_comment(msg)
- return
- # A pull request is by default closed if the branch is a closed one.
- if is_closed_branch(pr.base.ref): mustClose = True
- # Process the changes for the given pull request so that we can determine the
- # signatures it requires.
- if cmssw_repo or not external_repo:
- if cmssw_repo:
- if (pr.base.ref=="master"): signing_categories.add("code-checks")
- updateMilestone(repo, issue, pr, dryRun)
- chg_files = get_changed_files(repo, pr)
- packages = sorted([x for x in set([cmssw_file2Package(repo_config, f)
- for f in chg_files])])
- for pkg_file in chg_files:
- for ex_lab, pkgs_regexp in list(CMSSW_LABELS.items()):
- for regex in pkgs_regexp:
- if regex.match(pkg_file):
- extra_labels['mtype'].append(ex_lab)
- print("Non-Blocking label:%s:%s:%s" % (ex_lab,regex.pattern,pkg_file))
- break
- if not extra_labels['mtype']: del extra_labels['mtype']
- print("Extra non-blocking labels:",extra_labels)
- print("First Package: ",packages[0])
- create_test_property = True
- else:
- add_external_category = True
- packages = set (["externals/"+repository])
- ex_pkg = external_to_package(repository)
- if ex_pkg: packages.add(ex_pkg)
- if (repo_org!=GH_CMSSW_ORGANIZATION) or (repo_name in VALID_CMS_SW_REPOS_FOR_TESTS):
- create_test_property = True
- if (repo_name == GH_CMSDIST_REPO) and (not re.match(VALID_CMSDIST_BRANCHES,pr.base.ref)):
- print("Skipping PR as it does not belong to valid CMSDIST branch")
- return
-
- print("Following packages affected:")
- print("\n".join(packages))
- for package in packages:
- package_categories[package] = set([])
- for category in get_package_categories(package):
- package_categories[package].add(category)
- pkg_categories.add(category)
- signing_categories.update(pkg_categories)
-
- # For PR, we always require tests.
- signing_categories.add("tests")
- if add_external_category: signing_categories.add("externals")
- if cms_repo:
- print("This pull request requires ORP approval")
- signing_categories.add("orp")
-
- print("Following categories affected:")
- print("\n".join(signing_categories))
-
- if cmssw_repo:
- # If there is a new package, add also a dummy "new" category.
- all_packages = [package for category_packages in list(CMSSW_CATEGORIES.values())
- for package in category_packages]
- has_category = all([package in all_packages for package in packages])
- if not has_category:
- new_package_message = "\nThe following packages do not have a category, yet:\n\n"
- new_package_message += "\n".join([package for package in packages if not package in all_packages]) + "\n"
- new_package_message += "Please create a PR for https://github.com/cms-sw/cms-bot/blob/master/categories_map.py to assign category\n"
- print(new_package_message)
- signing_categories.add("new-package")
-
- # Add watchers.yaml information to the WATCHERS dict.
- WATCHERS = read_repo_file(repo_config, "watchers.yaml", {})
- # Given the files modified by the PR, check if there are additional developers watching one or more.
- author = pr.user.login
- watchers = set([user for chg_file in chg_files
- for user, watched_regexp in list(WATCHERS.items())
- for regexp in watched_regexp
- if re.match("^" + regexp + ".*", chg_file) and user != author])
- #Handle category watchers
- catWatchers = read_repo_file(repo_config, "category-watchers.yaml", {})
- non_block_cats = [] if not 'mtype' in extra_labels else extra_labels['mtype']
- for user, cats in list(catWatchers.items()):
- for cat in cats:
- if (cat in signing_categories) or (cat in non_block_cats):
- print("Added ",user, " to watch due to cat",cat)
- watchers.add(user)
-
- # Handle watchers
- watchingGroups = read_repo_file(repo_config, "groups.yaml", {})
- for watcher in [x for x in watchers]:
- if not watcher in watchingGroups: continue
- watchers.remove(watcher)
- watchers.update(set(watchingGroups[watcher]))
- watchers = set([gh_user_char + u for u in watchers])
- print("Watchers " + ", ".join(watchers))
- last_commit_obj = get_last_commit(pr)
- if last_commit_obj is None: return
- last_commit = last_commit_obj.commit
- commit_statuses = last_commit_obj.get_combined_status().statuses
- bot_status = get_status(bot_status_name, commit_statuses)
- if not bot_status:
- bot_status_name = "bot/%s/jenkins" % prId
- bot_ack_name = "bot/%s/ack" % prId
- bot_test_param_name = "bot/%s/test_parameters" % prId
- cms_status_prefix = "cms/%s" % prId
- bot_status = get_status(bot_status_name, commit_statuses)
- code_checks_status = [s for s in commit_statuses if s.context == "%s/code-checks" % cms_status_prefix]
- print("PR Statuses:",commit_statuses)
- print(len(commit_statuses))
- last_commit_date = last_commit.committer.date
- print("Latest commit by ",last_commit.committer.name.encode("ascii", "ignore").decode()," at ",last_commit_date)
- print("Latest commit message: ",last_commit.message.encode("ascii", "ignore").decode())
- print("Latest commit sha: ",last_commit.sha)
- print("PR update time",pr.updated_at)
- print("Time UTC:",datetime.utcnow())
- if last_commit_date>datetime.utcnow():
- print("==== Future commit found ====")
- add_labels = True
- try: add_labels = repo_config.ADD_LABELS
- except: pass
- if (not dryRun) and add_labels:
- labels = [x.name.encode("ascii", "ignore").decode() for x in issue.labels]
- if not 'future-commit' in labels:
- labels.append('future-commit')
- issue.edit(labels=labels)
- return
- extra_rm = get_release_managers (pr.base.ref)
- if repository==CMSDIST_REPO_NAME:
- br = "_".join(pr.base.ref.split("/")[:2][-1].split("_")[:3])+"_X"
- if br: extra_rm=extra_rm+get_release_managers (br)
- releaseManagers=list(set(extra_rm+CMSSW_L1))
- else:
+ global L2_DATA
+ if (not force) and ignore_issue(repo_config, repo, issue):
+ return
+ gh_user_char = "@"
+ if not notify_user(issue):
+ gh_user_char = ""
+ api_rate_limits(gh)
+ prId = issue.number
+ repository = repo.full_name
+ repo_org, repo_name = repository.split("/", 1)
+ auto_test_repo = AUTO_TEST_REPOS
try:
- if (repo_config.OPEN_ISSUE_FOR_PUSH_TESTS) and (requestor == cmsbuild_user) and re.match(PUSH_TEST_ISSUE_MSG,issue.title):
+ if repo_config.AUTO_TEST_REPOS:
+ auto_test_repo = [repository]
+ else:
+ auto_test_repo = []
+ except:
+ pass
+ if not cmsbuild_user:
+ cmsbuild_user = repo_config.CMSBUILD_USER
+ print("Working on ", repo.full_name, " for PR/Issue ", prId, "with admin user", cmsbuild_user)
+ print("Notify User: ", gh_user_char)
+ set_gh_user(cmsbuild_user)
+ cmssw_repo = repo_name == GH_CMSSW_REPO
+ cms_repo = repo_org in EXTERNAL_REPOS
+ external_repo = (repository != CMSSW_REPO_NAME) and (
+ len([e for e in EXTERNAL_REPOS if repo_org == e]) > 0
+ )
+ create_test_property = False
+ repo_cache = {repository: repo}
+ packages = set([])
+ chg_files = []
+ package_categories = {}
+ extra_labels = {"mtype": []}
+ add_external_category = False
+ signing_categories = set([])
+ new_package_message = ""
+ mustClose = False
+ reOpen = False
+ releaseManagers = []
+ signatures = {}
+ watchers = []
+ # Process Pull Request
+ pkg_categories = set([])
+ REGEX_TYPE_CMDS = "^type\s+(([-+]|)[a-z][a-z0-9-]+)(\s*,\s*([-+]|)[a-z][a-z0-9-]+)*$"
+ REGEX_EX_CMDS = "^urgent$|^backport\s+(of\s+|)(#|http(s|):/+github\.com/+%s/+pull/+)\d+$" % (
+ repo.full_name
+ )
+ known_ignore_tests = "%s" % MULTILINE_COMMENTS_MAP["ignore_test(s|)"][0]
+ REGEX_EX_IGNORE_CHKS = "^ignore\s+((%s)(\s*,\s*(%s))*|none)$" % (
+ known_ignore_tests,
+ known_ignore_tests,
+ )
+ REGEX_EX_ENABLE_TESTS = "^enable\s+(%s)$" % MULTILINE_COMMENTS_MAP[ENABLE_TEST_PTRN][0]
+ L2_DATA = init_l2_data(cms_repo)
+ last_commit_date = None
+ last_commit_obj = None
+ push_test_issue = False
+ requestor = issue.user.login.encode("ascii", "ignore").decode()
+ ignore_tests = ""
+ enable_tests = ""
+ commit_statuses = None
+ bot_status_name = "bot/jenkins"
+ bot_ack_name = "bot/ack"
+ bot_test_param_name = "bot/test_parameters"
+ cms_status_prefix = "cms"
+ bot_status = None
+ code_checks_status = []
+ pre_checks_state = {}
+ default_pre_checks = ["code-checks"]
+ # For future pre_checks
+ # if prId>=somePRNumber: default_pre_checks+=["some","new","checks"]
+ pre_checks_url = {}
+ if issue.pull_request:
+ pr = repo.get_pull(prId)
+ if pr.changed_files == 0:
+ print("Ignoring: PR with no files changed")
+ return
+ if cmssw_repo and cms_repo and (pr.base.ref == CMSSW_DEVEL_BRANCH):
+ if pr.state != "closed":
+ print("This pull request must go in to master branch")
+ if not dryRun:
+ edit_pr(repo.full_name, prId, base="master")
+ msg = format(
+ "%(gh_user_char)s%(user)s, %(dev_branch)s branch is closed for direct updates. cms-bot is going to move this PR to master branch.\n"
+ "In future, please use cmssw master branch to submit your changes.\n",
+ user=requestor,
+ gh_user_char=gh_user_char,
+ dev_branch=CMSSW_DEVEL_BRANCH,
+ )
+ issue.create_comment(msg)
+ return
+ # A pull request is by default closed if the branch is a closed one.
+ if is_closed_branch(pr.base.ref):
+ mustClose = True
+ # Process the changes for the given pull request so that we can determine the
+ # signatures it requires.
+ if cmssw_repo or not external_repo:
+ if cmssw_repo:
+ if pr.base.ref == "master":
+ signing_categories.add("code-checks")
+ updateMilestone(repo, issue, pr, dryRun)
+ chg_files = get_changed_files(repo, pr)
+ packages = sorted(
+ [x for x in set([cmssw_file2Package(repo_config, f) for f in chg_files])]
+ )
+ for pkg_file in chg_files:
+ for ex_lab, pkgs_regexp in list(CMSSW_LABELS.items()):
+ for regex in pkgs_regexp:
+ if regex.match(pkg_file):
+ extra_labels["mtype"].append(ex_lab)
+ print(
+ "Non-Blocking label:%s:%s:%s" % (ex_lab, regex.pattern, pkg_file)
+ )
+ break
+ if not extra_labels["mtype"]:
+ del extra_labels["mtype"]
+ print("Extra non-blocking labels:", extra_labels)
+ print("First Package: ", packages[0])
+ create_test_property = True
+ else:
+ add_external_category = True
+ packages = set(["externals/" + repository])
+ ex_pkg = external_to_package(repository)
+ if ex_pkg:
+ packages.add(ex_pkg)
+ if (repo_org != GH_CMSSW_ORGANIZATION) or (repo_name in VALID_CMS_SW_REPOS_FOR_TESTS):
+ create_test_property = True
+ if (repo_name == GH_CMSDIST_REPO) and (
+ not re.match(VALID_CMSDIST_BRANCHES, pr.base.ref)
+ ):
+ print("Skipping PR as it does not belong to valid CMSDIST branch")
+ return
+
+ print("Following packages affected:")
+ print("\n".join(packages))
+ for package in packages:
+ package_categories[package] = set([])
+ for category in get_package_categories(package):
+ package_categories[package].add(category)
+ pkg_categories.add(category)
+ signing_categories.update(pkg_categories)
+
+ # For PR, we always require tests.
signing_categories.add("tests")
- push_test_issue = True
- except: pass
- if repository==CMSSW_REPO_NAME and re.match(CREATE_REPO, issue.title):
- with open("query-new-data-repo-issues-" +str(issue.number) + ".properties", "w") as f:
- f.write("ISSUE_NUMBER="+str(issue.number)+"\n")
-
- # Process the issue comments
- signatures = dict([(x, "pending") for x in signing_categories])
- extra_pre_checks = []
- pre_checks = []
- if issue.pull_request:
- pre_checks = [c for c in signing_categories if c in default_pre_checks]
- for pre_check in pre_checks+["code-checks"]:
- pre_checks_state[pre_check] = get_status_state("%s/%s" % (cms_status_prefix, pre_check), commit_statuses)
- print("Pre check status:",pre_checks_state)
- already_seen = None
- pull_request_updated = False
- comparison_done = False
- comparison_notrun = False
- mustMerge = False
- release_queue = ''
- release_arch = ''
- cmssw_prs = ''
- extra_wfs = ''
- global_test_params = {}
- assign_cats = {}
- hold = {}
- last_test_start_time = None
- abort_test = None
- need_external = False
- backport_pr_num = ""
- comp_warnings = False
- extra_testers = []
- all_comments = [issue]
- code_checks_tools = ""
- new_bot_tests = True
- test_comment = None
- trigger_test = False
- ack_comment = None
- test_params_msg = ""
- test_params_comment = None
- code_check_apply_patch = False
-
- #start of parsing comments section
- for c in issue.get_comments(): all_comments.append(c)
- for comment in all_comments:
- ack_comment = comment
- commenter = comment.user.login.encode("ascii", "ignore").decode()
- commenter_categories = get_commenter_categories(commenter, int(comment.created_at.strftime('%s')))
- valid_commenter = (commenter in TRIGGER_PR_TESTS + releaseManagers + [repo_org]) or (len(commenter_categories)>0)
- if (not valid_commenter) and (requestor!=commenter): continue
- comment_msg = comment.body.encode("ascii", "ignore").decode() if comment.body else ""
- # The first line is an invariant.
- comment_lines = [ l.strip() for l in comment_msg.split("\n") if l.strip() ]
- first_line = comment_lines[0:1]
- if not first_line: continue
- first_line = first_line[0]
- if (commenter == cmsbuild_user) and re.match(ISSUE_SEEN_MSG, first_line):
- already_seen = comment
- backport_pr_num = get_backported_pr(comment_msg)
- if issue.pull_request and last_commit_date:
- if (comment.created_at >= last_commit_date): pull_request_updated = False
- else: pull_request_updated = True
- continue
-
- assign_type, new_cats = get_assign_categories(first_line)
- if new_cats:
- if (assign_type == "new categories assigned:") and (commenter == cmsbuild_user):
- for ex_cat in new_cats:
- if ex_cat in assign_cats: assign_cats[ex_cat] = 1
- if commenter_categories or (commenter in CMSSW_ISSUES_TRACKERS):
- if assign_type == "assign":
- for ex_cat in new_cats:
- if not ex_cat in signing_categories:
- assign_cats[ex_cat] = 0
- signing_categories.add(ex_cat)
- signatures[ex_cat]="pending"
- elif assign_type == "unassign":
- for ex_cat in new_cats:
- if ex_cat in assign_cats:
- assign_cats.pop(ex_cat)
- signing_categories.remove(ex_cat)
- signatures.pop(ex_cat)
- continue
-
- # Some of the special users can say "hold" prevent automatic merging of
- # fully signed PRs.
- if re.match("^hold$", first_line, re.I):
- if commenter_categories or (commenter in releaseManagers + PR_HOLD_MANAGERS): hold[commenter]=1
- continue
- if re.match(REGEX_EX_CMDS, first_line, re.I):
- if commenter_categories or (commenter in releaseManagers + [requestor]):
- check_extra_labels(first_line.lower(), extra_labels)
- continue
- if re.match(REGEX_TYPE_CMDS, first_line, re.I):
- if commenter_categories or (commenter in releaseManagers + [requestor]):
- valid_labs = check_type_labels(first_line.lower(), extra_labels)
- if not dryRun:
- if valid_labs: set_comment_emoji(comment.id, repository, emoji="+1")
- else: set_comment_emoji(comment.id, repository, emoji="-1")
- if re.match(REGEX_EX_IGNORE_CHKS, first_line, re.I):
- if valid_commenter:
- ignore_tests = check_ignore_bot_tests (first_line.split(" ",1)[-1])
- continue
- if re.match(REGEX_EX_ENABLE_TESTS, first_line, re.I):
- if valid_commenter:
- enable_tests, ignore = check_enable_bot_tests (first_line.split(" ",1)[-1])
- if not dryRun:
- set_comment_emoji(comment.id, repository, emoji="+1")
- continue
- if re.match('^allow\s+@([^ ]+)\s+test\s+rights$',first_line, re.I):
- if commenter_categories or (commenter in releaseManagers):
- tester = first_line.split("@",1)[-1].split(" ",1)[0]
- if not tester in TRIGGER_PR_TESTS:
- TRIGGER_PR_TESTS.append(tester)
- extra_testers.append(tester)
- print("Added user in test category:",tester)
- continue
- if re.match("^unhold$", first_line, re.I):
- if 'orp' in commenter_categories:
- hold = {}
- elif commenter_categories or (commenter in releaseManagers + PR_HOLD_MANAGERS):
- if commenter in hold: del hold[commenter]
- continue
- if (commenter == cmsbuild_user) and (re.match("^"+HOLD_MSG+".+", first_line)):
- for u in first_line.split(HOLD_MSG,2)[1].split(","):
- u = u.strip().lstrip("@")
- if u in hold: hold[u]=0
- if CLOSE_REQUEST.match(first_line):
- if (commenter_categories or (commenter in releaseManagers)) or \
- ((not issue.pull_request) and (commenter in CMSSW_ISSUES_TRACKERS)):
- reOpen = False
- if issue.state == "open":
- mustClose = True
- print("==>Closing request received from %s" % commenter)
- continue
- if REOPEN_REQUEST.match(first_line):
- if (commenter_categories or (commenter in releaseManagers)) or \
- ((not issue.pull_request) and (commenter in CMSSW_ISSUES_TRACKERS)):
- mustClose = False
- if (issue.state == "closed") and (comment.created_at >= issue.closed_at):
- reOpen = True
- print("==>Reopen request received from %s" % commenter)
- continue
- if valid_commenter:
- valid_multiline_comment , test_params, test_params_m = multiline_check_function(first_line, comment_lines, repository)
- if test_params_m:
- test_params_msg = str(comment.id) + ":" + test_params_m
- test_params_comment = comment
- elif valid_multiline_comment:
- test_params_comment = comment
- global_test_params = dict(test_params)
- if 'ENABLE_BOT_TESTS' in global_test_params:
- enable_tests = global_test_params['ENABLE_BOT_TESTS']
- test_params_msg = str(comment.id) + ":" + dumps(global_test_params, sort_keys=True)
- continue
+ if add_external_category:
+ signing_categories.add("externals")
+ if cms_repo:
+ print("This pull request requires ORP approval")
+ signing_categories.add("orp")
+
+ print("Following categories affected:")
+ print("\n".join(signing_categories))
+
+ if cmssw_repo:
+ # If there is a new package, add also a dummy "new" category.
+ all_packages = [
+ package
+ for category_packages in list(CMSSW_CATEGORIES.values())
+ for package in category_packages
+ ]
+ has_category = all([package in all_packages for package in packages])
+ if not has_category:
+ new_package_message = "\nThe following packages do not have a category, yet:\n\n"
+ new_package_message += (
+ "\n".join([package for package in packages if not package in all_packages])
+ + "\n"
+ )
+ new_package_message += "Please create a PR for https://github.com/cms-sw/cms-bot/blob/master/categories_map.py to assign category\n"
+ print(new_package_message)
+ signing_categories.add("new-package")
+
+ # Add watchers.yaml information to the WATCHERS dict.
+ WATCHERS = read_repo_file(repo_config, "watchers.yaml", {})
+ # Given the files modified by the PR, check if there are additional developers watching one or more.
+ author = pr.user.login
+ watchers = set(
+ [
+ user
+ for chg_file in chg_files
+ for user, watched_regexp in list(WATCHERS.items())
+ for regexp in watched_regexp
+ if re.match("^" + regexp + ".*", chg_file) and user != author
+ ]
+ )
+ # Handle category watchers
+ catWatchers = read_repo_file(repo_config, "category-watchers.yaml", {})
+ non_block_cats = [] if not "mtype" in extra_labels else extra_labels["mtype"]
+ for user, cats in list(catWatchers.items()):
+ for cat in cats:
+ if (cat in signing_categories) or (cat in non_block_cats):
+ print("Added ", user, " to watch due to cat", cat)
+ watchers.add(user)
+
+ # Handle watchers
+ watchingGroups = read_repo_file(repo_config, "groups.yaml", {})
+ for watcher in [x for x in watchers]:
+ if not watcher in watchingGroups:
+ continue
+ watchers.remove(watcher)
+ watchers.update(set(watchingGroups[watcher]))
+ watchers = set([gh_user_char + u for u in watchers])
+ print("Watchers " + ", ".join(watchers))
+ last_commit_obj = get_last_commit(pr)
+ if last_commit_obj is None:
+ return
+ last_commit = last_commit_obj.commit
+ commit_statuses = last_commit_obj.get_combined_status().statuses
+ bot_status = get_status(bot_status_name, commit_statuses)
+ if not bot_status:
+ bot_status_name = "bot/%s/jenkins" % prId
+ bot_ack_name = "bot/%s/ack" % prId
+ bot_test_param_name = "bot/%s/test_parameters" % prId
+ cms_status_prefix = "cms/%s" % prId
+ bot_status = get_status(bot_status_name, commit_statuses)
+ code_checks_status = [
+ s for s in commit_statuses if s.context == "%s/code-checks" % cms_status_prefix
+ ]
+ print("PR Statuses:", commit_statuses)
+ print(len(commit_statuses))
+ last_commit_date = last_commit.committer.date
+ print(
+ "Latest commit by ",
+ last_commit.committer.name.encode("ascii", "ignore").decode(),
+ " at ",
+ last_commit_date,
+ )
+ print("Latest commit message: ", last_commit.message.encode("ascii", "ignore").decode())
+ print("Latest commit sha: ", last_commit.sha)
+ print("PR update time", pr.updated_at)
+ print("Time UTC:", datetime.utcnow())
+ if last_commit_date > datetime.utcnow():
+ print("==== Future commit found ====")
+ add_labels = True
+ try:
+ add_labels = repo_config.ADD_LABELS
+ except:
+ pass
+ if (not dryRun) and add_labels:
+ labels = [x.name.encode("ascii", "ignore").decode() for x in issue.labels]
+ if not "future-commit" in labels:
+ labels.append("future-commit")
+ issue.edit(labels=labels)
+ return
+ extra_rm = get_release_managers(pr.base.ref)
+ if repository == CMSDIST_REPO_NAME:
+ br = "_".join(pr.base.ref.split("/")[:2][-1].split("_")[:3]) + "_X"
+ if br:
+ extra_rm = extra_rm + get_release_managers(br)
+ releaseManagers = list(set(extra_rm + CMSSW_L1))
+ else:
+ try:
+ if (
+ (repo_config.OPEN_ISSUE_FOR_PUSH_TESTS)
+ and (requestor == cmsbuild_user)
+ and re.match(PUSH_TEST_ISSUE_MSG, issue.title)
+ ):
+ signing_categories.add("tests")
+ push_test_issue = True
+ except:
+ pass
+ if repository == CMSSW_REPO_NAME and re.match(CREATE_REPO, issue.title):
+ with open("query-new-data-repo-issues-" + str(issue.number) + ".properties", "w") as f:
+ f.write("ISSUE_NUMBER=" + str(issue.number) + "\n")
+
+ # Process the issue comments
+ signatures = dict([(x, "pending") for x in signing_categories])
+ extra_pre_checks = []
+ pre_checks = []
+ if issue.pull_request:
+ pre_checks = [c for c in signing_categories if c in default_pre_checks]
+ for pre_check in pre_checks + ["code-checks"]:
+ pre_checks_state[pre_check] = get_status_state(
+ "%s/%s" % (cms_status_prefix, pre_check), commit_statuses
+ )
+ print("Pre check status:", pre_checks_state)
+ already_seen = None
+ pull_request_updated = False
+ comparison_done = False
+ comparison_notrun = False
+ mustMerge = False
+ release_queue = ""
+ release_arch = ""
+ cmssw_prs = ""
+ extra_wfs = ""
+ global_test_params = {}
+ assign_cats = {}
+ hold = {}
+ last_test_start_time = None
+ abort_test = None
+ need_external = False
+ backport_pr_num = ""
+ comp_warnings = False
+ extra_testers = []
+ all_comments = [issue]
+ code_checks_tools = ""
+ new_bot_tests = True
+ test_comment = None
+ trigger_test = False
+ ack_comment = None
+ test_params_msg = ""
+ test_params_comment = None
+ code_check_apply_patch = False
+
+ # start of parsing comments section
+ for c in issue.get_comments():
+ all_comments.append(c)
+ for comment in all_comments:
+ ack_comment = comment
+ commenter = comment.user.login.encode("ascii", "ignore").decode()
+ commenter_categories = get_commenter_categories(
+ commenter, int(comment.created_at.strftime("%s"))
+ )
+ valid_commenter = (commenter in TRIGGER_PR_TESTS + releaseManagers + [repo_org]) or (
+ len(commenter_categories) > 0
+ )
+ if (not valid_commenter) and (requestor != commenter):
+ continue
+ comment_msg = comment.body.encode("ascii", "ignore").decode() if comment.body else ""
+ # The first line is an invariant.
+ comment_lines = [l.strip() for l in comment_msg.split("\n") if l.strip()]
+ first_line = comment_lines[0:1]
+ if not first_line:
+ continue
+ first_line = first_line[0]
+ if (commenter == cmsbuild_user) and re.match(ISSUE_SEEN_MSG, first_line):
+ already_seen = comment
+ backport_pr_num = get_backported_pr(comment_msg)
+ if issue.pull_request and last_commit_date:
+ if comment.created_at >= last_commit_date:
+ pull_request_updated = False
+ else:
+ pull_request_updated = True
+ continue
- if cmssw_repo:
- m = CODE_CHECKS_REGEXP.match(first_line)
- if m:
- first_line = "code-checks"
- code_check_apply_patch = False
- if m.group(1):
- code_checks_tools = m.group(1).strip().split(" ")[-1]
- if m.group(2):
- code_check_apply_patch = True
-
- # Ignore all other messages which are before last commit.
- if issue.pull_request and (comment.created_at < last_commit_date):
- continue
-
- if (cmssw_repo and first_line=="code-checks"):
- signatures[first_line] = "pending"
- if first_line not in pre_checks+extra_pre_checks:
- extra_pre_checks.append(first_line)
- if code_checks_status and (code_checks_status[0].updated_at>=comment.created_at):
- continue
- if first_line in pre_checks:
- if pre_checks_state["code-checks"] in ["pending", ""]:
- continue
- elif pre_checks_state["code-checks"] in ["pending"]:
- continue
- pre_checks_state["code-checks"] = ""
- print("Found:Code Checks request", code_checks_tools)
- continue
-
- # Check for cmsbuild_user comments and tests requests only for pull requests
- if commenter == cmsbuild_user:
- if not issue.pull_request and not push_test_issue: continue
- sec_line = comment_lines[1:2]
- if not sec_line: sec_line = ""
- else: sec_line = sec_line[0]
- if re.match("Comparison is ready", first_line):
- if ('tests' in signatures) and signatures["tests"]!='pending': comparison_done = True
- elif "-code-checks" == first_line:
- signatures["code-checks"] = "rejected"
- pre_checks_url["code-checks"] = comment.html_url
- elif "+code-checks" == first_line:
- signatures["code-checks"] = "approved"
- pre_checks_url["code-checks"] = comment.html_url
- elif re.match("^Comparison not run.+",first_line):
- if ('tests' in signatures) and signatures["tests"]!='pending': comparison_notrun = True
- elif re.match( FAILED_TESTS_MSG, first_line) or re.match(IGNORING_TESTS_MSG, first_line):
- signatures["tests"] = "pending"
- elif re.match("Pull request ([^ #]+|)[#][0-9]+ was updated[.].*", first_line):
- pull_request_updated = False
- elif re.match( TRIGERING_TESTS_MSG, first_line) or re.match( TRIGERING_TESTS_MSG1, first_line):
- signatures["tests"] = "started"
- last_test_start_time = comment.created_at
- abort_test = None
- need_external = False
- if sec_line.startswith("Using externals from cms-sw/cmsdist#"): need_external = True
- elif sec_line.startswith('Tested with other pull request'): need_external = True
- elif sec_line.startswith('Using extra pull request'): need_external = True
- elif re.match( TESTS_RESULTS_MSG, first_line):
- test_sha = sec_line.replace("Tested at: ","").strip()
- if (not push_test_issue) and (test_sha != last_commit.sha) and (test_sha != 'UNKNOWN') and (not "I had the issue " in first_line):
- print("Ignoring test results for sha:",test_sha)
- continue
- comparison_done = False
- comparison_notrun = False
- comp_warnings = False
- if "+1" in first_line:
- signatures["tests"] = "approved"
- comp_warnings = len([1 for l in comment_lines if 'Compilation Warnings: Yes' in l ])>0
- pre_checks_url["tests"] = comment.html_url
- elif "-1" in first_line:
- signatures["tests"] = "rejected"
- pre_checks_url["tests"] = comment.html_url
- else:
- signatures["tests"] = "pending"
- print('Previous tests already finished, resetting test request state to ',signatures["tests"])
+ assign_type, new_cats = get_assign_categories(first_line)
+ if new_cats:
+ if (assign_type == "new categories assigned:") and (commenter == cmsbuild_user):
+ for ex_cat in new_cats:
+ if ex_cat in assign_cats:
+ assign_cats[ex_cat] = 1
+ if commenter_categories or (commenter in CMSSW_ISSUES_TRACKERS):
+ if assign_type == "assign":
+ for ex_cat in new_cats:
+ if not ex_cat in signing_categories:
+ assign_cats[ex_cat] = 0
+ signing_categories.add(ex_cat)
+ signatures[ex_cat] = "pending"
+ elif assign_type == "unassign":
+ for ex_cat in new_cats:
+ if ex_cat in assign_cats:
+ assign_cats.pop(ex_cat)
+ signing_categories.remove(ex_cat)
+ signatures.pop(ex_cat)
+ continue
- if (issue.pull_request or push_test_issue):
- # Check if the release manager asked for merging this.
- if ((commenter in releaseManagers) or ('orp' in commenter_categories)) and re.match("^\s*(merge)\s*$", first_line, re.I):
- mustMerge = True
- mustClose = False
- if ('orp' in commenter_categories) and ('orp' in signatures): signatures["orp"] = "approved"
- continue
-
- # Check if the someone asked to trigger the tests
- if valid_commenter:
- ok, v2, v3, v4 = check_test_cmd(first_line, repository, global_test_params)
- if ok:
- test_comment = comment
- abort_test = None
- cmssw_prs = v2
- extra_wfs = v3
- release_queue = v4
- release_arch = ''
- if '/' in release_queue:
- release_queue, release_arch = release_queue.split('/',1)
- elif re.match('^'+ARCH_PATTERN+'$', release_queue):
- release_arch = release_queue
- release_queue = ''
- print('Tests requested:', commenter, 'asked to test this PR with cmssw_prs=%s, release_queue=%s, arch=%s and workflows=%s' % (cmssw_prs, release_queue, release_arch, extra_wfs))
- print("Comment message:",first_line)
- signatures["tests"] = "pending"
- continue
- elif REGEX_TEST_ABORT.match(first_line) and (signatures["tests"] == "pending"):
- abort_test = comment
- test_comment = None
- signatures["tests"] = "pending"
-
- # Check L2 signoff for users in this PR signing categories
- if [ x for x in commenter_categories if x in signing_categories]:
- ctype = ""
- selected_cats = []
- if re.match("^([+]1|approve[d]?|sign|signed)$", first_line, re.I):
- ctype = "+1"
- selected_cats = commenter_categories
- elif re.match("^([-]1|reject|rejected)$", first_line, re.I):
- ctype = "-1"
- selected_cats = commenter_categories
- elif re.match("^[+-][a-z][a-z0-9-]+$", first_line, re.I):
- category_name = first_line[1:].lower()
- if category_name in commenter_categories:
- ctype = first_line[0]+"1"
- selected_cats = [ category_name ]
- if ctype == "+1":
- for sign in selected_cats:
- signatures[sign] = "approved"
- if (test_comment is None) and ((repository in auto_test_repo) or ('*' in auto_test_repo)):
- test_comment = comment
- if sign == "orp": mustClose = False
- elif ctype == "-1":
- for sign in selected_cats:
- signatures[sign] = "rejected"
- if sign == "orp": mustClose = False
- continue
-
- # end of parsing comments section
-
- if push_test_issue:
- auto_close_push_test_issue = True
- try: auto_close_push_test_issue=repo_config.AUTO_CLOSE_PUSH_TESTS_ISSUE
- except: pass
- if auto_close_push_test_issue and (issue.state == "open") and ('tests' in signatures) and ((signatures["tests"] in ["approved","rejected"]) or abort_test):
- print("Closing the issue as it has been tested/aborted")
- if not dryRun: issue.edit(state="closed")
- if abort_test:
- job, bnum = get_jenkins_job(issue)
- if job and bnum:
- params = {}
- params["JENKINS_PROJECT_TO_KILL"]=job
- params["JENKINS_BUILD_NUMBER"]=bnum
- create_property_file("trigger-abort-%s" % job, params, dryRun)
- return
-
- is_hold = len(hold)>0
- new_blocker = False
- blockers = ""
- for u in hold:
- blockers += " "+gh_user_char+u+","
- if hold[u]: new_blocker = True
- blockers = blockers.rstrip(",")
-
- new_assign_cats = []
- for ex_cat in assign_cats:
- if assign_cats[ex_cat]==1: continue
- new_assign_cats.append(ex_cat)
-
- print("All assigned cats:",",".join(list(assign_cats.keys())))
- print("Newly assigned cats:",",".join(new_assign_cats))
- print("Ignore tests:",ignore_tests)
- print("Enable tests:",enable_tests)
- print("Tests: %s" % (cmssw_prs))
- print("Abort:",abort_test)
- print("Test:",test_comment, bot_status)
-
- dryRunOrig = dryRun
- for cat in pre_checks:
- if (cat in signatures) and (signatures[cat]!="approved"):
- dryRun=True
- break
-
- old_labels = set([x.name.encode("ascii", "ignore").decode() for x in issue.labels])
- print("Stats:",backport_pr_num,extra_labels)
- print("Old Labels:",sorted(old_labels))
- print("Compilation Warnings: ",comp_warnings)
- print("Singnatures: ",signatures)
- if "mtype" in extra_labels:
- extra_labels["mtype"] = list(set(extra_labels["mtype"]))
- if "type" in extra_labels:
- extra_labels["type"] = [extra_labels["type"][-1]]
-
- #Always set test pending label
- if "tests" in signatures:
- if test_comment is not None:
- turl = test_comment.html_url
- if bot_status:
- print("BOT STATUS:\n %s\n %s\n %s\n %s" % (bot_status,bot_status.description,bot_status.target_url,test_comment.html_url))
- if bot_status and bot_status.description.startswith("Old style tests"):
- new_bot_tests = False
- elif (not bot_status) and (signatures["tests"]!="pending"):
- new_bot_tests = False
- if (not bot_status) or (bot_status.target_url != turl):
- if bot_status or (signatures["tests"]=="pending"):
- new_bot_tests = True
- trigger_test = True
- signatures["tests"]="started"
- desc = "requested by %s at %s UTC." % (test_comment.user.login.encode("ascii", "ignore").decode(), test_comment.created_at)
- if not new_bot_tests:
- desc = "Old style tests %s" % desc
- else:
- desc = "Tests %s" % desc
- print(desc)
- if not dryRun:
- last_commit_obj.create_status("success", description=desc, target_url=turl, context=bot_status_name)
- set_comment_emoji(test_comment.id, repository)
- if bot_status:
- print(bot_status.target_url,turl,signatures["tests"],bot_status.description)
- if bot_status and bot_status.target_url == turl and signatures["tests"]=="pending" and (" requested by " in bot_status.description):
- signatures["tests"]="started"
- if get_status_state("%s/unknown/release" % cms_status_prefix, commit_statuses) == "error":
- signatures["tests"]="pending"
- if signatures["tests"]=="started" and new_bot_tests:
- lab_stats = {}
- for status in commit_statuses:
- if not status.context.startswith(cms_status_prefix+"/"): continue
- cdata = status.context.split("/")
- if cdata[-1] not in ["optional", "required"]:
+ # Some of the special users can say "hold" prevent automatic merging of
+ # fully signed PRs.
+ if re.match("^hold$", first_line, re.I):
+ if commenter_categories or (commenter in releaseManagers + PR_HOLD_MANAGERS):
+ hold[commenter] = 1
+ continue
+ if re.match(REGEX_EX_CMDS, first_line, re.I):
+ if commenter_categories or (commenter in releaseManagers + [requestor]):
+ check_extra_labels(first_line.lower(), extra_labels)
continue
- if (cdata[-1] not in lab_stats) or (cdata[-1] == 'required'): lab_stats[cdata[-1]] = []
- lab_stats[cdata[-1]].append("pending")
- if status.state == "pending":
+ if re.match(REGEX_TYPE_CMDS, first_line, re.I):
+ if commenter_categories or (commenter in releaseManagers + [requestor]):
+ valid_labs = check_type_labels(first_line.lower(), extra_labels)
+ if not dryRun:
+ if valid_labs:
+ set_comment_emoji(comment.id, repository, emoji="+1")
+ else:
+ set_comment_emoji(comment.id, repository, emoji="-1")
+ if re.match(REGEX_EX_IGNORE_CHKS, first_line, re.I):
+ if valid_commenter:
+ ignore_tests = check_ignore_bot_tests(first_line.split(" ", 1)[-1])
continue
- scontext = "/".join(cdata[:-1])
- all_states = {}
- result_url = ""
- for s in [i for i in commit_statuses if ((i.context==scontext) or (i.context.startswith(scontext+"/")))]:
- if (not result_url) and ('/jenkins-artifacts/' in s.target_url):
- xdata = s.target_url.split("/")
- while xdata and (not xdata[-2].startswith('PR-')):
- xdata.pop()
- if xdata: result_url = "/".join(xdata)
- if s.context == status.context: continue
- if s.state not in all_states: all_states[s.state] = []
- all_states[s.state].append(s.context)
- print("Test status for %s: %s" % (status.context, all_states))
- if "pending" in all_states:
- if status.description.startswith("Finished"):
- print("Some test might have been restarted for %s. Resetting the status" % status.context)
- if not dryRun:
- last_commit_obj.create_status("success", description="OK", target_url=status.target_url, context=status.context)
+ if re.match(REGEX_EX_ENABLE_TESTS, first_line, re.I):
+ if valid_commenter:
+ enable_tests, ignore = check_enable_bot_tests(first_line.split(" ", 1)[-1])
+ if not dryRun:
+ set_comment_emoji(comment.id, repository, emoji="+1")
continue
- if "success" in all_states:
- lab_stats[cdata[-1]][-1] = "success"
- if "error" in all_states:
- if [c for c in all_states['error'] if ('/opt/' not in c)]:
- lab_stats[cdata[-1]][-1] = "error"
- print("Final Status:",status.context,cdata[-1],lab_stats[cdata[-1]][-1],status.description)
- if (lab_stats[cdata[-1]][-1] != "pending") and (not status.description.startswith("Finished")):
- if result_url:
- url = result_url.replace("/SDT/jenkins-artifacts/", "/SDT/cgi-bin/get_pr_results/jenkins-artifacts/")+"/pr-result"
- print("PR Result:", url)
- e, o = run_cmd("curl -k -s -L --max-time 60 %s" % url)
- if e:
- print(o)
- raise Exception("System-error: unable to get PR result")
- if o and (not dryRun):
- res="+1"
- if lab_stats[cdata[-1]][-1]=="error": res="-1"
- res = "%s\n\n%s" % (res, o)
- issue.create_comment(res)
+ if re.match("^allow\s+@([^ ]+)\s+test\s+rights$", first_line, re.I):
+ if commenter_categories or (commenter in releaseManagers):
+ tester = first_line.split("@", 1)[-1].split(" ", 1)[0]
+ if not tester in TRIGGER_PR_TESTS:
+ TRIGGER_PR_TESTS.append(tester)
+ extra_testers.append(tester)
+ print("Added user in test category:", tester)
+ continue
+ if re.match("^unhold$", first_line, re.I):
+ if "orp" in commenter_categories:
+ hold = {}
+ elif commenter_categories or (commenter in releaseManagers + PR_HOLD_MANAGERS):
+ if commenter in hold:
+ del hold[commenter]
+ continue
+ if (commenter == cmsbuild_user) and (re.match("^" + HOLD_MSG + ".+", first_line)):
+ for u in first_line.split(HOLD_MSG, 2)[1].split(","):
+ u = u.strip().lstrip("@")
+ if u in hold:
+ hold[u] = 0
+ if CLOSE_REQUEST.match(first_line):
+ if (commenter_categories or (commenter in releaseManagers)) or (
+ (not issue.pull_request) and (commenter in CMSSW_ISSUES_TRACKERS)
+ ):
+ reOpen = False
+ if issue.state == "open":
+ mustClose = True
+ print("==>Closing request received from %s" % commenter)
+ continue
+ if REOPEN_REQUEST.match(first_line):
+ if (commenter_categories or (commenter in releaseManagers)) or (
+ (not issue.pull_request) and (commenter in CMSSW_ISSUES_TRACKERS)
+ ):
+ mustClose = False
+ if (issue.state == "closed") and (comment.created_at >= issue.closed_at):
+ reOpen = True
+ print("==>Reopen request received from %s" % commenter)
+ continue
+ if valid_commenter:
+ valid_multiline_comment, test_params, test_params_m = multiline_check_function(
+ first_line, comment_lines, repository
+ )
+ if test_params_m:
+ test_params_msg = str(comment.id) + ":" + test_params_m
+ test_params_comment = comment
+ elif valid_multiline_comment:
+ test_params_comment = comment
+ global_test_params = dict(test_params)
+ if "ENABLE_BOT_TESTS" in global_test_params:
+ enable_tests = global_test_params["ENABLE_BOT_TESTS"]
+ test_params_msg = str(comment.id) + ":" + dumps(global_test_params, sort_keys=True)
+ continue
+
+ if cmssw_repo:
+ m = CODE_CHECKS_REGEXP.match(first_line)
+ if m:
+ first_line = "code-checks"
+ code_check_apply_patch = False
+ if m.group(1):
+ code_checks_tools = m.group(1).strip().split(" ")[-1]
+ if m.group(2):
+ code_check_apply_patch = True
+
+ # Ignore all other messages which are before last commit.
+ if issue.pull_request and (comment.created_at < last_commit_date):
+ continue
+
+ if cmssw_repo and first_line == "code-checks":
+ signatures[first_line] = "pending"
+ if first_line not in pre_checks + extra_pre_checks:
+ extra_pre_checks.append(first_line)
+ if code_checks_status and (code_checks_status[0].updated_at >= comment.created_at):
+ continue
+ if first_line in pre_checks:
+ if pre_checks_state["code-checks"] in ["pending", ""]:
+ continue
+ elif pre_checks_state["code-checks"] in ["pending"]:
+ continue
+ pre_checks_state["code-checks"] = ""
+ print("Found:Code Checks request", code_checks_tools)
+ continue
+
+ # Check for cmsbuild_user comments and tests requests only for pull requests
+ if commenter == cmsbuild_user:
+ if not issue.pull_request and not push_test_issue:
+ continue
+ sec_line = comment_lines[1:2]
+ if not sec_line:
+ sec_line = ""
+ else:
+ sec_line = sec_line[0]
+ if re.match("Comparison is ready", first_line):
+ if ("tests" in signatures) and signatures["tests"] != "pending":
+ comparison_done = True
+ elif "-code-checks" == first_line:
+ signatures["code-checks"] = "rejected"
+ pre_checks_url["code-checks"] = comment.html_url
+ elif "+code-checks" == first_line:
+ signatures["code-checks"] = "approved"
+ pre_checks_url["code-checks"] = comment.html_url
+ elif re.match("^Comparison not run.+", first_line):
+ if ("tests" in signatures) and signatures["tests"] != "pending":
+ comparison_notrun = True
+ elif re.match(FAILED_TESTS_MSG, first_line) or re.match(
+ IGNORING_TESTS_MSG, first_line
+ ):
+ signatures["tests"] = "pending"
+ elif re.match("Pull request ([^ #]+|)[#][0-9]+ was updated[.].*", first_line):
+ pull_request_updated = False
+ elif re.match(TRIGERING_TESTS_MSG, first_line) or re.match(
+ TRIGERING_TESTS_MSG1, first_line
+ ):
+ signatures["tests"] = "started"
+ last_test_start_time = comment.created_at
+ abort_test = None
+ need_external = False
+ if sec_line.startswith("Using externals from cms-sw/cmsdist#"):
+ need_external = True
+ elif sec_line.startswith("Tested with other pull request"):
+ need_external = True
+ elif sec_line.startswith("Using extra pull request"):
+ need_external = True
+ elif re.match(TESTS_RESULTS_MSG, first_line):
+ test_sha = sec_line.replace("Tested at: ", "").strip()
+ if (
+ (not push_test_issue)
+ and (test_sha != last_commit.sha)
+ and (test_sha != "UNKNOWN")
+ and (not "I had the issue " in first_line)
+ ):
+ print("Ignoring test results for sha:", test_sha)
+ continue
+ comparison_done = False
+ comparison_notrun = False
+ comp_warnings = False
+ if "+1" in first_line:
+ signatures["tests"] = "approved"
+ comp_warnings = (
+ len([1 for l in comment_lines if "Compilation Warnings: Yes" in l]) > 0
+ )
+ pre_checks_url["tests"] = comment.html_url
+ elif "-1" in first_line:
+ signatures["tests"] = "rejected"
+ pre_checks_url["tests"] = comment.html_url
+ else:
+ signatures["tests"] = "pending"
+ print(
+ "Previous tests already finished, resetting test request state to ",
+ signatures["tests"],
+ )
+
+ if issue.pull_request or push_test_issue:
+ # Check if the release manager asked for merging this.
+ if ((commenter in releaseManagers) or ("orp" in commenter_categories)) and re.match(
+ "^\s*(merge)\s*$", first_line, re.I
+ ):
+ mustMerge = True
+ mustClose = False
+ if ("orp" in commenter_categories) and ("orp" in signatures):
+ signatures["orp"] = "approved"
+ continue
+
+ # Check if the someone asked to trigger the tests
+ if valid_commenter:
+ ok, v2, v3, v4 = check_test_cmd(first_line, repository, global_test_params)
+ if ok:
+ test_comment = comment
+ abort_test = None
+ cmssw_prs = v2
+ extra_wfs = v3
+ release_queue = v4
+ release_arch = ""
+ if "/" in release_queue:
+ release_queue, release_arch = release_queue.split("/", 1)
+ elif re.match("^" + ARCH_PATTERN + "$", release_queue):
+ release_arch = release_queue
+ release_queue = ""
+ print(
+ "Tests requested:",
+ commenter,
+ "asked to test this PR with cmssw_prs=%s, release_queue=%s, arch=%s and workflows=%s"
+ % (cmssw_prs, release_queue, release_arch, extra_wfs),
+ )
+ print("Comment message:", first_line)
+ signatures["tests"] = "pending"
+ continue
+ elif REGEX_TEST_ABORT.match(first_line) and (signatures["tests"] == "pending"):
+ abort_test = comment
+ test_comment = None
+ signatures["tests"] = "pending"
+
+ # Check L2 signoff for users in this PR signing categories
+ if [x for x in commenter_categories if x in signing_categories]:
+ ctype = ""
+ selected_cats = []
+ if re.match("^([+]1|approve[d]?|sign|signed)$", first_line, re.I):
+ ctype = "+1"
+ selected_cats = commenter_categories
+ elif re.match("^([-]1|reject|rejected)$", first_line, re.I):
+ ctype = "-1"
+ selected_cats = commenter_categories
+ elif re.match("^[+-][a-z][a-z0-9-]+$", first_line, re.I):
+ category_name = first_line[1:].lower()
+ if category_name in commenter_categories:
+ ctype = first_line[0] + "1"
+ selected_cats = [category_name]
+ if ctype == "+1":
+ for sign in selected_cats:
+ signatures[sign] = "approved"
+ if (test_comment is None) and (
+ (repository in auto_test_repo) or ("*" in auto_test_repo)
+ ):
+ test_comment = comment
+ if sign == "orp":
+ mustClose = False
+ elif ctype == "-1":
+ for sign in selected_cats:
+ signatures[sign] = "rejected"
+ if sign == "orp":
+ mustClose = False
+ continue
+
+ # end of parsing comments section
+
+ if push_test_issue:
+ auto_close_push_test_issue = True
+ try:
+ auto_close_push_test_issue = repo_config.AUTO_CLOSE_PUSH_TESTS_ISSUE
+ except:
+ pass
+ if (
+ auto_close_push_test_issue
+ and (issue.state == "open")
+ and ("tests" in signatures)
+ and ((signatures["tests"] in ["approved", "rejected"]) or abort_test)
+ ):
+ print("Closing the issue as it has been tested/aborted")
+ if not dryRun:
+ issue.edit(state="closed")
+ if abort_test:
+ job, bnum = get_jenkins_job(issue)
+ if job and bnum:
+ params = {}
+ params["JENKINS_PROJECT_TO_KILL"] = job
+ params["JENKINS_BUILD_NUMBER"] = bnum
+ create_property_file("trigger-abort-%s" % job, params, dryRun)
+ return
+
+ is_hold = len(hold) > 0
+ new_blocker = False
+ blockers = ""
+ for u in hold:
+ blockers += " " + gh_user_char + u + ","
+ if hold[u]:
+ new_blocker = True
+ blockers = blockers.rstrip(",")
+
+ new_assign_cats = []
+ for ex_cat in assign_cats:
+ if assign_cats[ex_cat] == 1:
+ continue
+ new_assign_cats.append(ex_cat)
+
+ print("All assigned cats:", ",".join(list(assign_cats.keys())))
+ print("Newly assigned cats:", ",".join(new_assign_cats))
+ print("Ignore tests:", ignore_tests)
+ print("Enable tests:", enable_tests)
+ print("Tests: %s" % (cmssw_prs))
+ print("Abort:", abort_test)
+ print("Test:", test_comment, bot_status)
+
+ dryRunOrig = dryRun
+ for cat in pre_checks:
+ if (cat in signatures) and (signatures[cat] != "approved"):
+ dryRun = True
+ break
+
+ old_labels = set([x.name.encode("ascii", "ignore").decode() for x in issue.labels])
+ print("Stats:", backport_pr_num, extra_labels)
+ print("Old Labels:", sorted(old_labels))
+ print("Compilation Warnings: ", comp_warnings)
+ print("Singnatures: ", signatures)
+ if "mtype" in extra_labels:
+ extra_labels["mtype"] = list(set(extra_labels["mtype"]))
+ if "type" in extra_labels:
+ extra_labels["type"] = [extra_labels["type"][-1]]
+
+ # Always set test pending label
+ if "tests" in signatures:
+ if test_comment is not None:
+ turl = test_comment.html_url
+ if bot_status:
+ print(
+ "BOT STATUS:\n %s\n %s\n %s\n %s"
+ % (
+ bot_status,
+ bot_status.description,
+ bot_status.target_url,
+ test_comment.html_url,
+ )
+ )
+ if bot_status and bot_status.description.startswith("Old style tests"):
+ new_bot_tests = False
+ elif (not bot_status) and (signatures["tests"] != "pending"):
+ new_bot_tests = False
+ if (not bot_status) or (bot_status.target_url != turl):
+ if bot_status or (signatures["tests"] == "pending"):
+ new_bot_tests = True
+ trigger_test = True
+ signatures["tests"] = "started"
+ desc = "requested by %s at %s UTC." % (
+ test_comment.user.login.encode("ascii", "ignore").decode(),
+ test_comment.created_at,
+ )
+ if not new_bot_tests:
+ desc = "Old style tests %s" % desc
+ else:
+ desc = "Tests %s" % desc
+ print(desc)
+ if not dryRun:
+ last_commit_obj.create_status(
+ "success", description=desc, target_url=turl, context=bot_status_name
+ )
+ set_comment_emoji(test_comment.id, repository)
+ if bot_status:
+ print(bot_status.target_url, turl, signatures["tests"], bot_status.description)
+ if (
+ bot_status
+ and bot_status.target_url == turl
+ and signatures["tests"] == "pending"
+ and (" requested by " in bot_status.description)
+ ):
+ signatures["tests"] = "started"
+ if (
+ get_status_state("%s/unknown/release" % cms_status_prefix, commit_statuses)
+ == "error"
+ ):
+ signatures["tests"] = "pending"
+ if signatures["tests"] == "started" and new_bot_tests:
+ lab_stats = {}
+ for status in commit_statuses:
+ if not status.context.startswith(cms_status_prefix + "/"):
+ continue
+ cdata = status.context.split("/")
+ if cdata[-1] not in ["optional", "required"]:
+ continue
+ if (cdata[-1] not in lab_stats) or (cdata[-1] == "required"):
+ lab_stats[cdata[-1]] = []
+ lab_stats[cdata[-1]].append("pending")
+ if status.state == "pending":
+ continue
+ scontext = "/".join(cdata[:-1])
+ all_states = {}
+ result_url = ""
+ for s in [
+ i
+ for i in commit_statuses
+ if ((i.context == scontext) or (i.context.startswith(scontext + "/")))
+ ]:
+ if (not result_url) and ("/jenkins-artifacts/" in s.target_url):
+ xdata = s.target_url.split("/")
+ while xdata and (not xdata[-2].startswith("PR-")):
+ xdata.pop()
+ if xdata:
+ result_url = "/".join(xdata)
+ if s.context == status.context:
+ continue
+ if s.state not in all_states:
+ all_states[s.state] = []
+ all_states[s.state].append(s.context)
+ print("Test status for %s: %s" % (status.context, all_states))
+ if "pending" in all_states:
+ if status.description.startswith("Finished"):
+ print(
+ "Some test might have been restarted for %s. Resetting the status"
+ % status.context
+ )
+ if not dryRun:
+ last_commit_obj.create_status(
+ "success",
+ description="OK",
+ target_url=status.target_url,
+ context=status.context,
+ )
+ continue
+ if "success" in all_states:
+ lab_stats[cdata[-1]][-1] = "success"
+ if "error" in all_states:
+ if [c for c in all_states["error"] if ("/opt/" not in c)]:
+ lab_stats[cdata[-1]][-1] = "error"
+ print(
+ "Final Status:",
+ status.context,
+ cdata[-1],
+ lab_stats[cdata[-1]][-1],
+ status.description,
+ )
+ if (lab_stats[cdata[-1]][-1] != "pending") and (
+ not status.description.startswith("Finished")
+ ):
+ if result_url:
+ url = (
+ result_url.replace(
+ "/SDT/jenkins-artifacts/",
+ "/SDT/cgi-bin/get_pr_results/jenkins-artifacts/",
+ )
+ + "/pr-result"
+ )
+ print("PR Result:", url)
+ e, o = run_cmd("curl -k -s -L --max-time 60 %s" % url)
+ if e:
+ print(o)
+ raise Exception("System-error: unable to get PR result")
+ if o and (not dryRun):
+ res = "+1"
+ if lab_stats[cdata[-1]][-1] == "error":
+ res = "-1"
+ res = "%s\n\n%s" % (res, o)
+ issue.create_comment(res)
+ if not dryRun:
+ last_commit_obj.create_status(
+ "success",
+ description="Finished",
+ target_url=status.target_url,
+ context=status.context,
+ )
+ print("Lab Status", lab_stats)
+ lab_state = "required"
+ if lab_state not in lab_stats:
+ lab_state = "optional"
+ if (lab_state in lab_stats) and ("pending" not in lab_stats[lab_state]):
+ signatures["tests"] = "approved"
+ if "error" in lab_stats[lab_state]:
+ signatures["tests"] = "rejected"
+ elif not bot_status:
if not dryRun:
- last_commit_obj.create_status("success", description="Finished", target_url=status.target_url, context=status.context)
- print("Lab Status",lab_stats)
- lab_state = "required"
- if lab_state not in lab_stats: lab_state = "optional"
- if (lab_state in lab_stats) and ("pending" not in lab_stats[lab_state]):
- signatures["tests"]="approved"
- if "error" in lab_stats[lab_state]:
- signatures["tests"]="rejected"
- elif not bot_status:
- if not dryRun:
- last_commit_obj.create_status("pending", description="Waiting for authorized user to issue the test command.", context=bot_status_name)
- else:
- print("DryRun: Setting status Waiting for authorized user to issue the test command.")
-
- # Labels coming from signature.
- labels = []
- for cat in signing_categories:
- l = cat+"-pending"
- if cat in signatures: l = cat+"-"+signatures[cat]
- labels.append(l)
-
- if not issue.pull_request and len(signing_categories)==0:
- labels.append("pending-assignment")
- if is_hold: labels.append("hold")
-
- if "backport" in extra_labels:
- if backport_pr_num!=extra_labels["backport"][1]:
- try:
- bp_pr = repo.get_pull(int(extra_labels["backport"][1]))
- backport_pr_num=extra_labels["backport"][1]
- if bp_pr.merged: extra_labels["backport"][0]="backport-ok"
- except Exception as e :
- print("Error: Unknown PR", backport_pr_num,"\n",e)
- backport_pr_num=""
- extra_labels.pop("backport")
-
- if already_seen:
- if dryRun: print("Update PR seen message to include backport PR number",backport_pr_num)
+ last_commit_obj.create_status(
+ "pending",
+ description="Waiting for authorized user to issue the test command.",
+ context=bot_status_name,
+ )
+ else:
+ print(
+ "DryRun: Setting status Waiting for authorized user to issue the test command."
+ )
+
+ # Labels coming from signature.
+ labels = []
+ for cat in signing_categories:
+ l = cat + "-pending"
+ if cat in signatures:
+ l = cat + "-" + signatures[cat]
+ labels.append(l)
+
+ if not issue.pull_request and len(signing_categories) == 0:
+ labels.append("pending-assignment")
+ if is_hold:
+ labels.append("hold")
+
+ if "backport" in extra_labels:
+ if backport_pr_num != extra_labels["backport"][1]:
+ try:
+ bp_pr = repo.get_pull(int(extra_labels["backport"][1]))
+ backport_pr_num = extra_labels["backport"][1]
+ if bp_pr.merged:
+ extra_labels["backport"][0] = "backport-ok"
+ except Exception as e:
+ print("Error: Unknown PR", backport_pr_num, "\n", e)
+ backport_pr_num = ""
+ extra_labels.pop("backport")
+
+ if already_seen:
+ if dryRun:
+ print("Update PR seen message to include backport PR number", backport_pr_num)
+ else:
+ new_msg = ""
+ for l in already_seen.body.encode("ascii", "ignore").decode().split("\n"):
+ if BACKPORT_STR in l:
+ continue
+ new_msg += l + "\n"
+ if backport_pr_num:
+ new_msg = "%s%s%s\n" % (new_msg, BACKPORT_STR, backport_pr_num)
+ already_seen.edit(body=new_msg)
+ elif "backport-ok" in old_labels:
+ extra_labels["backport"][0] = "backport-ok"
+
+ # Add additional labels
+ for lab in extra_testers:
+ labels.append("allow-" + lab)
+ for lab in extra_labels:
+ if lab != "mtype":
+ labels.append(extra_labels[lab][0])
+ else:
+ for slab in extra_labels[lab]:
+ labels.append(slab)
+ if comp_warnings:
+ labels.append("compilation-warnings")
+
+ if cms_repo and issue.pull_request and (not new_bot_tests):
+ if comparison_done:
+ labels.append("comparison-available")
+ elif comparison_notrun:
+ labels.append("comparison-notrun")
else:
- new_msg = ""
- for l in already_seen.body.encode("ascii", "ignore").decode().split("\n"):
- if BACKPORT_STR in l: continue
- new_msg += l+"\n"
- if backport_pr_num: new_msg="%s%s%s\n" % (new_msg, BACKPORT_STR, backport_pr_num)
- already_seen.edit(body=new_msg)
- elif ("backport-ok" in old_labels):
- extra_labels["backport"][0]="backport-ok"
-
- # Add additional labels
- for lab in extra_testers: labels.append("allow-"+lab)
- for lab in extra_labels:
- if lab != "mtype":
- labels.append(extra_labels[lab][0])
+ labels.append("comparison-pending")
+
+ if ("PULL_REQUESTS" in global_test_params) or cmssw_prs:
+ need_external = True
+ # Now updated the labels.
+ xlabs = ["backport", "urgent", "backport-ok", "compilation-warnings"]
+ for lab in TYPE_COMMANDS:
+ xlabs.append(lab)
+ missingApprovals = [
+ x
+ for x in labels
+ if not x.endswith("-approved")
+ and not x.startswith("orp")
+ and not x.startswith("tests")
+ and not x.startswith("pending-assignment")
+ and not x.startswith("comparison")
+ and not x.startswith("code-checks")
+ and not x.startswith("allow-")
+ and not x in xlabs
+ ]
+
+ if not missingApprovals:
+ print("The pull request is complete.")
+ if missingApprovals:
+ labels.append("pending-signatures")
+ elif not "pending-assignment" in labels:
+ labels.append("fully-signed")
+ if need_external:
+ labels.append("requires-external")
+ labels = set(labels)
+ print("New Labels:", sorted(labels))
+
+ new_categories = set([])
+ for nc_lab in pkg_categories:
+ ncat = [nc_lab for oc_lab in old_labels if oc_lab.startswith(nc_lab + "-")]
+ if ncat:
+ continue
+ new_categories.add(nc_lab)
+
+ if new_assign_cats:
+ new_l2s = [
+ gh_user_char + name
+ for name, l2_categories in list(CMSSW_L2.items())
+ for signature in new_assign_cats
+ if signature in l2_categories
+ ]
+ if not dryRun:
+ issue.create_comment(
+ "New categories assigned: "
+ + ",".join(new_assign_cats)
+ + "\n\n"
+ + ",".join(new_l2s)
+ + " you have been requested to review this Pull request/Issue and eventually sign? Thanks"
+ )
+
+ # update blocker massge
+ if new_blocker:
+ if not dryRun:
+ issue.create_comment(
+ HOLD_MSG
+ + blockers
+ + "\nThey need to issue an `unhold` command to remove the `hold` state or L1 can `unhold` it for all"
+ )
+ print("Blockers:", blockers)
+
+ print("Changed Labels:", labels - old_labels, old_labels - labels)
+ if old_labels == labels:
+ print("Labels unchanged.")
+ elif not dryRunOrig:
+ add_labels = True
+ try:
+ add_labels = repo_config.ADD_LABELS
+ except:
+ pass
+ if add_labels:
+ issue.edit(labels=list(labels))
+
+ # Check if it needs to be automatically closed.
+ if mustClose:
+ if issue.state == "open":
+ print("This pull request must be closed.")
+ if not dryRunOrig:
+ issue.edit(state="closed")
+ elif reOpen:
+ if issue.state == "closed":
+ print("This pull request must be reopened.")
+ if not dryRunOrig:
+ issue.edit(state="open")
+
+ if not issue.pull_request:
+ issueMessage = None
+ if not already_seen:
+ backport_msg = ""
+ if backport_pr_num:
+ backport_msg = "%s%s\n" % (BACKPORT_STR, backport_pr_num)
+ uname = ""
+ if issue.user.name:
+ uname = issue.user.name.encode("ascii", "ignore").decode()
+ l2s = ", ".join([gh_user_char + name for name in CMSSW_ISSUES_TRACKERS])
+ issueMessage = format(
+ "%(msgPrefix)s %(gh_user_char)s%(user)s"
+ " %(name)s.\n\n"
+ "%(l2s)s can you please review it and eventually sign/assign?"
+ " Thanks.\n\n"
+ 'cms-bot commands are listed here\n%(backport_msg)s',
+ msgPrefix=NEW_ISSUE_PREFIX,
+ user=requestor,
+ gh_user_char=gh_user_char,
+ name=uname,
+ backport_msg=backport_msg,
+ l2s=l2s,
+ )
+ elif ("fully-signed" in labels) and (not "fully-signed" in old_labels):
+ issueMessage = "This issue is fully signed and ready to be closed."
+ print("Issue Message:", issueMessage)
+ if issueMessage and not dryRun:
+ issue.create_comment(issueMessage)
+ return
+
+ # get release managers
+ SUPER_USERS = read_repo_file(repo_config, "super-users.yaml", [])
+ releaseManagersList = ", ".join([gh_user_char + x for x in set(releaseManagers + SUPER_USERS)])
+
+ if cmssw_prs:
+ global_test_params["PULL_REQUESTS"] = cmssw_prs
+ if extra_wfs:
+ global_test_params["MATRIX_EXTRAS"] = extra_wfs
+ if release_queue:
+ global_test_params["RELEASE_FORMAT"] = release_queue
+ if not "PULL_REQUESTS" in global_test_params:
+ global_test_params["PULL_REQUESTS"] = "%s#%s" % (repository, prId)
else:
- for slab in extra_labels[lab]:
- labels.append(slab)
- if comp_warnings: labels.append("compilation-warnings")
-
- if cms_repo and issue.pull_request and (not new_bot_tests):
- if comparison_done:
- labels.append("comparison-available")
- elif comparison_notrun:
- labels.append("comparison-notrun")
+ global_test_params["PULL_REQUESTS"] = "%s#%s %s" % (
+ repository,
+ prId,
+ global_test_params["PULL_REQUESTS"],
+ )
+ if ignore_tests:
+ if ignore_tests == "NONE":
+ ignore_tests = ""
+ global_test_params["IGNORE_BOT_TESTS"] = ignore_tests
+ if enable_tests:
+ if enable_tests == "NONE":
+ enable_tests = ""
+ global_test_params["ENABLE_BOT_TESTS"] = enable_tests
+ if release_arch:
+ global_test_params["ARCHITECTURE_FILTER"] = release_arch
+ global_test_params["EXTRA_RELVALS_TESTS"] = " ".join(
+ [t.upper().replace("-", "_") for t in EXTRA_RELVALS_TESTS]
+ )
+
+ print("All Parameters:", global_test_params)
+ # For now, only trigger tests for cms-sw/cmssw and cms-sw/cmsdist
+ if create_test_property:
+ global_test_params["CONTEXT_PREFIX"] = cms_status_prefix
+ if trigger_test:
+ create_properties_file_tests(
+ repository, prId, global_test_params, dryRun, abort=False, repo_config=repo_config
+ )
+ if not dryRun:
+ set_comment_emoji(test_comment.id, repository)
+ elif abort_test and bot_status and (not bot_status.description.startswith("Aborted")):
+ if not has_user_emoji(abort_test, repository, "+1", cmsbuild_user):
+ create_properties_file_tests(
+ repository, prId, global_test_params, dryRun, abort=True
+ )
+ if not dryRun:
+ set_comment_emoji(abort_test.id, repository)
+ last_commit_obj.create_status(
+ "pending",
+ description="Aborted, waiting for authorized user to issue the test command.",
+ target_url=abort_test.html_url,
+ context=bot_status_name,
+ )
+
+ # Do not complain about tests
+ requiresTestMessage = " after it passes the integration tests"
+ if "tests-approved" in labels:
+ requiresTestMessage = " (tests are also fine)"
+ elif "tests-rejected" in labels:
+ requiresTestMessage = " (but tests are reportedly failing)"
+
+ autoMergeMsg = ""
+ if (
+ ("fully-signed" in labels)
+ and ("tests-approved" in labels)
+ and ((not "orp" in signatures) or (signatures["orp"] == "approved"))
+ ):
+ autoMergeMsg = "This pull request will be automatically merged."
else:
- labels.append("comparison-pending")
-
- if ('PULL_REQUESTS' in global_test_params) or cmssw_prs:
- need_external = True
- # Now updated the labels.
- xlabs = ["backport", "urgent", "backport-ok", "compilation-warnings"]
- for lab in TYPE_COMMANDS: xlabs.append(lab)
- missingApprovals = [x
- for x in labels
- if not x.endswith("-approved")
- and not x.startswith("orp")
- and not x.startswith("tests")
- and not x.startswith("pending-assignment")
- and not x.startswith("comparison")
- and not x.startswith("code-checks")
- and not x.startswith("allow-")
- and not x in xlabs]
-
- if not missingApprovals:
- print("The pull request is complete.")
- if missingApprovals:
- labels.append("pending-signatures")
- elif not "pending-assignment" in labels:
- labels.append("fully-signed")
- if need_external: labels.append("requires-external")
- labels = set(labels)
- print("New Labels:", sorted(labels))
-
- new_categories = set ([])
- for nc_lab in pkg_categories:
- ncat = [ nc_lab for oc_lab in old_labels if oc_lab.startswith(nc_lab+'-') ]
- if ncat: continue
- new_categories.add(nc_lab)
-
- if new_assign_cats:
- new_l2s = [gh_user_char + name
- for name, l2_categories in list(CMSSW_L2.items())
- for signature in new_assign_cats
- if signature in l2_categories]
- if not dryRun: issue.create_comment("New categories assigned: "+",".join(new_assign_cats)+"\n\n"+",".join(new_l2s)+" you have been requested to review this Pull request/Issue and eventually sign? Thanks")
-
- #update blocker massge
- if new_blocker:
- if not dryRun: issue.create_comment(HOLD_MSG+blockers+'\nThey need to issue an `unhold` command to remove the `hold` state or L1 can `unhold` it for all')
- print("Blockers:",blockers)
-
- print("Changed Labels:",labels-old_labels,old_labels-labels)
- if old_labels == labels:
- print("Labels unchanged.")
- elif not dryRunOrig:
- add_labels = True
- try: add_labels = repo_config.ADD_LABELS
- except: pass
- if add_labels: issue.edit(labels=list(labels))
-
- # Check if it needs to be automatically closed.
- if mustClose:
- if issue.state == "open":
- print("This pull request must be closed.")
- if not dryRunOrig: issue.edit(state="closed")
- elif reOpen:
- if issue.state == "closed":
- print("This pull request must be reopened.")
- if not dryRunOrig: issue.edit(state="open")
-
- if not issue.pull_request:
- issueMessage = None
- if not already_seen:
- backport_msg=""
- if backport_pr_num: backport_msg="%s%s\n" % (BACKPORT_STR,backport_pr_num)
- uname = ""
- if issue.user.name: uname = issue.user.name.encode("ascii", "ignore").decode()
- l2s = ", ".join([ gh_user_char + name for name in CMSSW_ISSUES_TRACKERS ])
- issueMessage = format("%(msgPrefix)s %(gh_user_char)s%(user)s"
- " %(name)s.\n\n"
- "%(l2s)s can you please review it and eventually sign/assign?"
- " Thanks.\n\n"
- "cms-bot commands are listed here\n%(backport_msg)s",
- msgPrefix=NEW_ISSUE_PREFIX,
- user=requestor,
- gh_user_char=gh_user_char,
- name=uname,
- backport_msg=backport_msg,
- l2s=l2s)
- elif ("fully-signed" in labels) and (not "fully-signed" in old_labels):
- issueMessage = "This issue is fully signed and ready to be closed."
- print("Issue Message:",issueMessage)
- if issueMessage and not dryRun: issue.create_comment(issueMessage)
- return
-
- # get release managers
- SUPER_USERS = read_repo_file(repo_config, "super-users.yaml", [])
- releaseManagersList = ", ".join([gh_user_char + x for x in set(releaseManagers + SUPER_USERS)])
-
- if cmssw_prs:
- global_test_params['PULL_REQUESTS'] = cmssw_prs
- if extra_wfs:
- global_test_params['MATRIX_EXTRAS'] = extra_wfs
- if release_queue:
- global_test_params['RELEASE_FORMAT'] = release_queue
- if not 'PULL_REQUESTS' in global_test_params:
- global_test_params['PULL_REQUESTS'] = '%s#%s' % (repository, prId)
- else:
- global_test_params['PULL_REQUESTS'] = '%s#%s %s' % (repository, prId, global_test_params['PULL_REQUESTS'])
- if ignore_tests:
- if ignore_tests == 'NONE': ignore_tests = ''
- global_test_params['IGNORE_BOT_TESTS'] = ignore_tests
- if enable_tests:
- if enable_tests == 'NONE': enable_tests = ''
- global_test_params['ENABLE_BOT_TESTS'] = enable_tests
- if release_arch:
- global_test_params['ARCHITECTURE_FILTER'] = release_arch
- global_test_params['EXTRA_RELVALS_TESTS'] = " ".join([ t.upper().replace("-", "_") for t in EXTRA_RELVALS_TESTS])
-
- print("All Parameters:",global_test_params)
- #For now, only trigger tests for cms-sw/cmssw and cms-sw/cmsdist
- if create_test_property:
- global_test_params["CONTEXT_PREFIX"] = cms_status_prefix
- if trigger_test:
- create_properties_file_tests(repository, prId, global_test_params, dryRun, abort=False, repo_config=repo_config)
- if not dryRun:
- set_comment_emoji(test_comment.id, repository)
- elif abort_test and bot_status and (not bot_status.description.startswith("Aborted")):
- if not has_user_emoji(abort_test, repository, "+1", cmsbuild_user):
- create_properties_file_tests(repository, prId, global_test_params, dryRun, abort=True)
+ if is_hold:
+ autoMergeMsg = format(
+ "This PR is put on hold by %(blockers)s. They have"
+ " to `unhold` to remove the `hold` state or"
+ " %(managers)s will have to `merge` it by"
+ " hand.",
+ blockers=blockers,
+ managers=releaseManagersList,
+ )
+ elif "new-package-pending" in labels:
+ autoMergeMsg = format(
+ "This pull request requires a new package and "
+ " will not be merged. %(managers)s",
+ managers=releaseManagersList,
+ )
+ elif ("orp" in signatures) and (signatures["orp"] != "approved"):
+ autoMergeMsg = format(
+ "This pull request will now be reviewed by the release team"
+ " before it's merged. %(managers)s (and backports should be raised in the release meeting by the corresponding L2)",
+ managers=releaseManagersList,
+ )
+
+ devReleaseRelVal = ""
+ if (pr.base.ref in RELEASE_BRANCH_PRODUCTION) and (pr.base.ref != "master"):
+ devReleaseRelVal = (
+ " and once validation in the development release cycle "
+ + CMSSW_DEVEL_BRANCH
+ + " is complete"
+ )
+
+ if ("fully-signed" in labels) and (not "fully-signed" in old_labels):
+ messageFullySigned = format(
+ "This pull request is fully signed and it will be"
+ " integrated in one of the next %(branch)s IBs"
+ "%(requiresTest)s"
+ "%(devReleaseRelVal)s."
+ " %(autoMerge)s",
+ requiresTest=requiresTestMessage,
+ autoMerge=autoMergeMsg,
+ devReleaseRelVal=devReleaseRelVal,
+ branch=pr.base.ref,
+ )
+ print("Fully signed message updated")
if not dryRun:
- set_comment_emoji(abort_test.id, repository)
- last_commit_obj.create_status("pending", description="Aborted, waiting for authorized user to issue the test command.", target_url=abort_test.html_url, context=bot_status_name)
-
- # Do not complain about tests
- requiresTestMessage = " after it passes the integration tests"
- if "tests-approved" in labels:
- requiresTestMessage = " (tests are also fine)"
- elif "tests-rejected" in labels:
- requiresTestMessage = " (but tests are reportedly failing)"
-
- autoMergeMsg = ""
- if (("fully-signed" in labels) and ("tests-approved" in labels) and
- ((not "orp" in signatures) or (signatures["orp"] == "approved"))):
- autoMergeMsg = "This pull request will be automatically merged."
- else:
- if is_hold:
- autoMergeMsg = format("This PR is put on hold by %(blockers)s. They have"
- " to `unhold` to remove the `hold` state or"
- " %(managers)s will have to `merge` it by"
- " hand.",
- blockers=blockers,
- managers=releaseManagersList)
- elif "new-package-pending" in labels:
- autoMergeMsg = format("This pull request requires a new package and "
- " will not be merged. %(managers)s",
- managers=releaseManagersList)
- elif ("orp" in signatures) and (signatures["orp"] != "approved"):
- autoMergeMsg = format("This pull request will now be reviewed by the release team"
- " before it's merged. %(managers)s (and backports should be raised in the release meeting by the corresponding L2)",
- managers=releaseManagersList)
-
- devReleaseRelVal = ""
- if (pr.base.ref in RELEASE_BRANCH_PRODUCTION) and (pr.base.ref != "master"):
- devReleaseRelVal = " and once validation in the development release cycle "+CMSSW_DEVEL_BRANCH+" is complete"
-
- if ("fully-signed" in labels) and (not "fully-signed" in old_labels):
- messageFullySigned = format("This pull request is fully signed and it will be"
- " integrated in one of the next %(branch)s IBs"
- "%(requiresTest)s"
- "%(devReleaseRelVal)s."
- " %(autoMerge)s",
- requiresTest=requiresTestMessage,
- autoMerge = autoMergeMsg,
- devReleaseRelVal=devReleaseRelVal,
- branch=pr.base.ref)
- print("Fully signed message updated")
- if not dryRun: issue.create_comment(messageFullySigned)
-
- unsigned = [k for (k, v) in list(signatures.items()) if v == "pending"]
- missing_notifications = [gh_user_char + name
- for name, l2_categories in list(CMSSW_L2.items())
- for signature in signing_categories
- if signature in l2_categories
- and signature in unsigned and signature not in ["orp"] ]
-
- missing_notifications = set(missing_notifications)
- # Construct message for the watchers
- watchersMsg = ""
- if watchers:
- watchersMsg = format("%(watchers)s this is something you requested to"
- " watch as well.\n",
- watchers=", ".join(watchers))
- # Construct message for the release managers.
- managers = ", ".join([gh_user_char + x for x in releaseManagers])
-
- releaseManagersMsg = ""
- if releaseManagers:
- releaseManagersMsg = format("%(managers)s you are the release manager for this.\n",
- managers = managers)
-
- # Add a Warning if the pull request was done against a patch branch
- if cmssw_repo:
- warning_msg = ''
- if 'patchX' in pr.base.ref:
- print('Must warn that this is a patch branch')
- base_release = pr.base.ref.replace( '_patchX', '' )
- base_release_branch = re.sub( '[0-9]+$', 'X', base_release )
- warning_msg = format("Note that this branch is designed for requested bug "
- "fixes specific to the %(base_rel)s release.\nIf you "
- "wish to make a pull request for the %(base_branch)s "
- "release cycle, please use the %(base_branch)s branch instead\n",
- base_rel=base_release,
- base_branch=base_release_branch)
-
- # We do not want to spam people for the old pull requests.
- pkg_msg = []
- for pkg in packages:
- if pkg in package_categories:
- pkg_msg.append("- %s (**%s**)" % (pkg, ", ".join(package_categories[pkg])))
+ issue.create_comment(messageFullySigned)
+
+ unsigned = [k for (k, v) in list(signatures.items()) if v == "pending"]
+ missing_notifications = [
+ gh_user_char + name
+ for name, l2_categories in list(CMSSW_L2.items())
+ for signature in signing_categories
+ if signature in l2_categories and signature in unsigned and signature not in ["orp"]
+ ]
+
+ missing_notifications = set(missing_notifications)
+ # Construct message for the watchers
+ watchersMsg = ""
+ if watchers:
+ watchersMsg = format(
+ "%(watchers)s this is something you requested to" " watch as well.\n",
+ watchers=", ".join(watchers),
+ )
+ # Construct message for the release managers.
+ managers = ", ".join([gh_user_char + x for x in releaseManagers])
+
+ releaseManagersMsg = ""
+ if releaseManagers:
+ releaseManagersMsg = format(
+ "%(managers)s you are the release manager for this.\n", managers=managers
+ )
+
+ # Add a Warning if the pull request was done against a patch branch
+ if cmssw_repo:
+ warning_msg = ""
+ if "patchX" in pr.base.ref:
+ print("Must warn that this is a patch branch")
+ base_release = pr.base.ref.replace("_patchX", "")
+ base_release_branch = re.sub("[0-9]+$", "X", base_release)
+ warning_msg = format(
+ "Note that this branch is designed for requested bug "
+ "fixes specific to the %(base_rel)s release.\nIf you "
+ "wish to make a pull request for the %(base_branch)s "
+ "release cycle, please use the %(base_branch)s branch instead\n",
+ base_rel=base_release,
+ base_branch=base_release_branch,
+ )
+
+ # We do not want to spam people for the old pull requests.
+ pkg_msg = []
+ for pkg in packages:
+ if pkg in package_categories:
+ pkg_msg.append("- %s (**%s**)" % (pkg, ", ".join(package_categories[pkg])))
+ else:
+ pkg_msg.append("- %s (**new**)" % pkg)
+ messageNewPR = format(
+ "%(msgPrefix)s %(gh_user_char)s%(user)s"
+ " %(name)s for %(branch)s.\n\n"
+ "It involves the following packages:\n\n"
+ "%(packages)s\n\n"
+ "%(new_package_message)s\n"
+ "%(l2s)s can you please review it and eventually sign?"
+ " Thanks.\n"
+ "%(watchers)s"
+ "%(releaseManagers)s"
+ "%(patch_branch_warning)s\n"
+ 'cms-bot commands are listed here\n',
+ msgPrefix=NEW_PR_PREFIX,
+ user=pr.user.login,
+ gh_user_char=gh_user_char,
+ name=pr.user.name and "(%s)" % pr.user.name or "",
+ branch=pr.base.ref,
+ l2s=", ".join(missing_notifications),
+ packages="\n".join(pkg_msg),
+ new_package_message=new_package_message,
+ watchers=watchersMsg,
+ releaseManagers=releaseManagersMsg,
+ patch_branch_warning=warning_msg,
+ )
+
+ messageUpdatedPR = format(
+ "Pull request #%(pr)s was updated."
+ " %(signers)s can you please check and sign again.\n",
+ pr=pr.number,
+ signers=", ".join(missing_notifications),
+ )
+ else:
+ messageNewPR = format(
+ "%(msgPrefix)s %(gh_user_char)s%(user)s"
+ " %(name)s for branch %(branch)s.\n\n"
+ "%(l2s)s can you please review it and eventually sign?"
+ " Thanks.\n"
+ "%(watchers)s"
+ "%(releaseManagers)s"
+ 'cms-bot commands are listed here\n',
+ msgPrefix=NEW_PR_PREFIX,
+ user=pr.user.login,
+ gh_user_char=gh_user_char,
+ name=pr.user.name and "(%s)" % pr.user.name or "",
+ branch=pr.base.ref,
+ l2s=", ".join(missing_notifications),
+ releaseManagers=releaseManagersMsg,
+ watchers=watchersMsg,
+ )
+
+ messageUpdatedPR = format("Pull request #%(pr)s was updated.", pr=pr.number)
+
+ # Finally decide whether or not we should close the pull request:
+ messageBranchClosed = format(
+ "This branch is closed for updates."
+ " Closing this pull request.\n"
+ " Please bring this up in the ORP"
+ " meeting if really needed.\n"
+ )
+
+ commentMsg = ""
+ print("Status: Not see= %s, Updated: %s" % (already_seen, pull_request_updated))
+ if is_closed_branch(pr.base.ref) and (pr.state != "closed"):
+ commentMsg = messageBranchClosed
+ elif (not already_seen) or pull_request_updated:
+ if not already_seen:
+ commentMsg = messageNewPR
else:
- pkg_msg.append("- %s (**new**)" % pkg)
- messageNewPR = format("%(msgPrefix)s %(gh_user_char)s%(user)s"
- " %(name)s for %(branch)s.\n\n"
- "It involves the following packages:\n\n"
- "%(packages)s\n\n"
- "%(new_package_message)s\n"
- "%(l2s)s can you please review it and eventually sign?"
- " Thanks.\n"
- "%(watchers)s"
- "%(releaseManagers)s"
- "%(patch_branch_warning)s\n"
- "cms-bot commands are listed here\n",
- msgPrefix=NEW_PR_PREFIX,
- user=pr.user.login,
- gh_user_char=gh_user_char,
- name=pr.user.name and "(%s)" % pr.user.name or "",
- branch=pr.base.ref,
- l2s=", ".join(missing_notifications),
- packages="\n".join(pkg_msg),
- new_package_message=new_package_message,
- watchers=watchersMsg,
- releaseManagers=releaseManagersMsg,
- patch_branch_warning=warning_msg)
-
- messageUpdatedPR = format("Pull request #%(pr)s was updated."
- " %(signers)s can you please check and sign again.\n",
- pr=pr.number,
- signers=", ".join(missing_notifications))
- else:
- messageNewPR = format("%(msgPrefix)s %(gh_user_char)s%(user)s"
- " %(name)s for branch %(branch)s.\n\n"
- "%(l2s)s can you please review it and eventually sign?"
- " Thanks.\n"
- "%(watchers)s"
- "%(releaseManagers)s"
- "cms-bot commands are listed here\n",
- msgPrefix=NEW_PR_PREFIX,
- user=pr.user.login,
- gh_user_char=gh_user_char,
- name=pr.user.name and "(%s)" % pr.user.name or "",
- branch=pr.base.ref,
- l2s=", ".join(missing_notifications),
- releaseManagers=releaseManagersMsg,
- watchers=watchersMsg)
-
- messageUpdatedPR = format("Pull request #%(pr)s was updated.",
- pr=pr.number)
-
- # Finally decide whether or not we should close the pull request:
- messageBranchClosed = format("This branch is closed for updates."
- " Closing this pull request.\n"
- " Please bring this up in the ORP"
- " meeting if really needed.\n")
-
- commentMsg = ""
- print("Status: Not see= %s, Updated: %s" % (already_seen, pull_request_updated))
- if is_closed_branch(pr.base.ref) and (pr.state != "closed"):
- commentMsg = messageBranchClosed
- elif (not already_seen) or pull_request_updated:
- if not already_seen: commentMsg = messageNewPR
- else: commentMsg = messageUpdatedPR
- elif new_categories:
- commentMsg = messageUpdatedPR
- elif not missingApprovals:
- print("Pull request is already fully signed. Not sending message.")
- else:
- print("Already notified L2 about " + str(pr.number))
- if commentMsg and not dryRun:
- print("The following comment will be made:")
- try:
- print(commentMsg.decode("ascii", "replace"))
- except:
- pass
- for pre_check in pre_checks+extra_pre_checks:
- if pre_check not in signatures: signatures[pre_check] = "pending"
- print("PRE CHECK: %s,%s,%s" % (pre_check, signatures[pre_check], pre_checks_state[pre_check]))
- if signatures[pre_check]!="pending":
- if pre_checks_state[pre_check] in ["pending", ""]:
- state = "success" if signatures[pre_check]=="approved" else "error"
- url = pre_checks_url[pre_check]
- print("Setting status: %s,%s,%s" % (pre_check, state, url))
- if not dryRunOrig:
- last_commit_obj.create_status(state, target_url=url, description="Check details", context="%s/%s" % (cms_status_prefix, pre_check))
- continue
- if (not dryRunOrig) and (pre_checks_state[pre_check]==""):
- params = {"PULL_REQUEST" : "%s" % (prId), "CONTEXT_PREFIX": cms_status_prefix}
- if pre_check=="code-checks":
- params["CMSSW_TOOL_CONF"] = code_checks_tools
- params["APPLY_PATCH"] = str(code_check_apply_patch).lower()
- create_properties_file_tests(repository, prId, params, dryRunOrig, abort=False, req_type=pre_check)
- last_commit_obj.create_status("pending", description="%s requested" % pre_check, context="%s/%s" % (cms_status_prefix, pre_check))
+ commentMsg = messageUpdatedPR
+ elif new_categories:
+ commentMsg = messageUpdatedPR
+ elif not missingApprovals:
+ print("Pull request is already fully signed. Not sending message.")
+ else:
+ print("Already notified L2 about " + str(pr.number))
+ if commentMsg and not dryRun:
+ print("The following comment will be made:")
+ try:
+ print(commentMsg.decode("ascii", "replace"))
+ except:
+ pass
+ for pre_check in pre_checks + extra_pre_checks:
+ if pre_check not in signatures:
+ signatures[pre_check] = "pending"
+ print(
+ "PRE CHECK: %s,%s,%s" % (pre_check, signatures[pre_check], pre_checks_state[pre_check])
+ )
+ if signatures[pre_check] != "pending":
+ if pre_checks_state[pre_check] in ["pending", ""]:
+ state = "success" if signatures[pre_check] == "approved" else "error"
+ url = pre_checks_url[pre_check]
+ print("Setting status: %s,%s,%s" % (pre_check, state, url))
+ if not dryRunOrig:
+ last_commit_obj.create_status(
+ state,
+ target_url=url,
+ description="Check details",
+ context="%s/%s" % (cms_status_prefix, pre_check),
+ )
+ continue
+ if (not dryRunOrig) and (pre_checks_state[pre_check] == ""):
+ params = {"PULL_REQUEST": "%s" % (prId), "CONTEXT_PREFIX": cms_status_prefix}
+ if pre_check == "code-checks":
+ params["CMSSW_TOOL_CONF"] = code_checks_tools
+ params["APPLY_PATCH"] = str(code_check_apply_patch).lower()
+ create_properties_file_tests(
+ repository, prId, params, dryRunOrig, abort=False, req_type=pre_check
+ )
+ last_commit_obj.create_status(
+ "pending",
+ description="%s requested" % pre_check,
+ context="%s/%s" % (cms_status_prefix, pre_check),
+ )
+ else:
+ print("Dryrun: Setting pending status for %s" % pre_check)
+
+ if commentMsg and not dryRun:
+ issue.create_comment(commentMsg)
+
+ # Check if it needs to be automatically merged.
+ if all(
+ [
+ "fully-signed" in labels,
+ "tests-approved" in labels,
+ "orp-approved" in labels,
+ not "hold" in labels,
+ not "new-package-pending" in labels,
+ ]
+ ):
+ print("This pull request can be automatically merged")
+ mustMerge = True
else:
- print("Dryrun: Setting pending status for %s" % pre_check)
-
- if commentMsg and not dryRun:
- issue.create_comment(commentMsg)
-
- # Check if it needs to be automatically merged.
- if all(["fully-signed" in labels,
- "tests-approved" in labels,
- "orp-approved" in labels,
- not "hold" in labels,
- not "new-package-pending" in labels]):
- print("This pull request can be automatically merged")
- mustMerge = True
- else:
- print("This pull request will not be automatically merged.")
- if mustMerge == True:
- print("This pull request must be merged.")
- if not dryRun and (pr.state == "open"): pr.merge()
-
- state = get_status(bot_test_param_name, commit_statuses)
- if len(test_params_msg)>140: test_params_msg=test_params_msg[:135]+"..."
- if ((not state) and (test_params_msg!="")) or (state and state.description != test_params_msg):
- if test_params_msg=="": test_params_msg="No special test parameter set."
- print("Test params:",test_params_msg)
- url = ""
- if test_params_comment:
- e = get_user_emoji(test_params_comment, repository, cmsbuild_user)
- print(e)
- if not dryRun:
- emoji = "-1" if 'ERRORS: ' in test_params_msg else "+1"
- if e and (e['content']!=emoji):
- delete_comment_emoji(str(e['id']), test_params_comment.id, repository)
- state = "success" if emoji=="+1" else "error"
- last_commit_obj.create_status(state, description=test_params_msg, target_url=test_params_comment.html_url, context=bot_test_param_name)
- if (not e) or (e['content']!=emoji):
- set_comment_emoji(test_params_comment.id, repository, emoji=emoji)
- if ack_comment:
- state = get_status(bot_ack_name, commit_statuses)
- if (not state) or (state.target_url != ack_comment.html_url):
- desc = "Comment by %s at %s UTC processed." % (ack_comment.user.login.encode("ascii", "ignore").decode(), ack_comment.created_at)
- print(desc)
- if not dryRun:
- last_commit_obj.create_status("success", description=desc, target_url=ack_comment.html_url, context=bot_ack_name)
+ print("This pull request will not be automatically merged.")
+ if mustMerge == True:
+ print("This pull request must be merged.")
+ if not dryRun and (pr.state == "open"):
+ pr.merge()
+
+ state = get_status(bot_test_param_name, commit_statuses)
+ if len(test_params_msg) > 140:
+ test_params_msg = test_params_msg[:135] + "..."
+ if ((not state) and (test_params_msg != "")) or (
+ state and state.description != test_params_msg
+ ):
+ if test_params_msg == "":
+ test_params_msg = "No special test parameter set."
+ print("Test params:", test_params_msg)
+ url = ""
+ if test_params_comment:
+ e = get_user_emoji(test_params_comment, repository, cmsbuild_user)
+ print(e)
+ if not dryRun:
+ emoji = "-1" if "ERRORS: " in test_params_msg else "+1"
+ if e and (e["content"] != emoji):
+ delete_comment_emoji(str(e["id"]), test_params_comment.id, repository)
+ state = "success" if emoji == "+1" else "error"
+ last_commit_obj.create_status(
+ state,
+ description=test_params_msg,
+ target_url=test_params_comment.html_url,
+ context=bot_test_param_name,
+ )
+ if (not e) or (e["content"] != emoji):
+ set_comment_emoji(test_params_comment.id, repository, emoji=emoji)
+ if ack_comment:
+ state = get_status(bot_ack_name, commit_statuses)
+ if (not state) or (state.target_url != ack_comment.html_url):
+ desc = "Comment by %s at %s UTC processed." % (
+ ack_comment.user.login.encode("ascii", "ignore").decode(),
+ ack_comment.created_at,
+ )
+ print(desc)
+ if not dryRun:
+ last_commit_obj.create_status(
+ "success",
+ description=desc,
+ target_url=ack_comment.html_url,
+ context=bot_ack_name,
+ )
diff --git a/python/archived_argparse.py b/python/archived_argparse.py
index bcea63c77a5a..10f057c0d611 100644
--- a/python/archived_argparse.py
+++ b/python/archived_argparse.py
@@ -64,29 +64,29 @@
still considered an implementation detail.)
"""
-__version__ = '1.4.0' # we use our own version number independant of the
- # one in stdlib and we release this on pypi.
+__version__ = "1.4.0" # we use our own version number independant of the
+# one in stdlib and we release this on pypi.
__external_lib__ = True # to make sure the tests really test THIS lib,
- # not the builtin one in Python stdlib
+# not the builtin one in Python stdlib
__all__ = [
- 'ArgumentParser',
- 'ArgumentError',
- 'ArgumentTypeError',
- 'FileType',
- 'HelpFormatter',
- 'ArgumentDefaultsHelpFormatter',
- 'RawDescriptionHelpFormatter',
- 'RawTextHelpFormatter',
- 'Namespace',
- 'Action',
- 'ONE_OR_MORE',
- 'OPTIONAL',
- 'PARSER',
- 'REMAINDER',
- 'SUPPRESS',
- 'ZERO_OR_MORE',
+ "ArgumentParser",
+ "ArgumentError",
+ "ArgumentTypeError",
+ "FileType",
+ "HelpFormatter",
+ "ArgumentDefaultsHelpFormatter",
+ "RawDescriptionHelpFormatter",
+ "RawTextHelpFormatter",
+ "Namespace",
+ "Action",
+ "ONE_OR_MORE",
+ "OPTIONAL",
+ "PARSER",
+ "REMAINDER",
+ "SUPPRESS",
+ "ZERO_OR_MORE",
]
@@ -122,22 +122,23 @@ def sorted(iterable, reverse=False):
def _callable(obj):
- return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
+ return hasattr(obj, "__call__") or hasattr(obj, "__bases__")
-SUPPRESS = '==SUPPRESS=='
+SUPPRESS = "==SUPPRESS=="
-OPTIONAL = '?'
-ZERO_OR_MORE = '*'
-ONE_OR_MORE = '+'
-PARSER = 'A...'
-REMAINDER = '...'
-_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
+OPTIONAL = "?"
+ZERO_OR_MORE = "*"
+ONE_OR_MORE = "+"
+PARSER = "A..."
+REMAINDER = "..."
+_UNRECOGNIZED_ARGS_ATTR = "_unrecognized_args"
# =============================
# Utility functions and classes
# =============================
+
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
@@ -153,8 +154,8 @@ def __repr__(self):
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
- arg_strings.append('%s=%r' % (name, value))
- return '%s(%s)' % (type_name, ', '.join(arg_strings))
+ arg_strings.append("%s=%r" % (name, value))
+ return "%s(%s)" % (type_name, ", ".join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
@@ -173,6 +174,7 @@ def _ensure_value(namespace, name, value):
# Formatting Help
# ===============
+
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
@@ -180,16 +182,11 @@ class HelpFormatter(object):
provided by the class are considered an implementation detail.
"""
- def __init__(self,
- prog,
- indent_increment=2,
- max_help_position=24,
- width=None):
-
+ def __init__(self, prog, indent_increment=2, max_help_position=24, width=None):
# default setting for width
if width is None:
try:
- width = int(_os.environ['COLUMNS'])
+ width = int(_os.environ["COLUMNS"])
except (KeyError, ValueError):
width = 80
width -= 2
@@ -206,8 +203,8 @@ def __init__(self,
self._root_section = self._Section(self, None)
self._current_section = self._root_section
- self._whitespace_matcher = _re.compile(r'\s+')
- self._long_break_matcher = _re.compile(r'\n\n\n+')
+ self._whitespace_matcher = _re.compile(r"\s+")
+ self._long_break_matcher = _re.compile(r"\n\n\n+")
# ===============================
# Section and indentation methods
@@ -218,11 +215,10 @@ def _indent(self):
def _dedent(self):
self._current_indent -= self._indent_increment
- assert self._current_indent >= 0, 'Indent decreased below 0.'
+ assert self._current_indent >= 0, "Indent decreased below 0."
self._level -= 1
class _Section(object):
-
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
@@ -242,17 +238,17 @@ def format_help(self):
# return nothing if the section was empty
if not item_help:
- return ''
+ return ""
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
- heading = '%*s%s:\n' % (current_indent, '', self.heading)
+ heading = "%*s%s:\n" % (current_indent, "", self.heading)
else:
- heading = ''
+ heading = ""
# join the section-initial newline, the heading and the help
- return join(['\n', heading, item_help, '\n'])
+ return join(["\n", heading, item_help, "\n"])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
@@ -281,7 +277,6 @@ def add_usage(self, usage, actions, groups, prefix=None):
def add_argument(self, action):
if action.help is not SUPPRESS:
-
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
@@ -291,8 +286,7 @@ def add_argument(self, action):
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
- self._action_max_length = max(self._action_max_length,
- action_length)
+ self._action_max_length = max(self._action_max_length, action_length)
# add the item to the list
self._add_item(self._format_action, [action])
@@ -307,18 +301,16 @@ def add_arguments(self, actions):
def format_help(self):
help = self._root_section.format_help()
if help:
- help = self._long_break_matcher.sub('\n\n', help)
- help = help.strip('\n') + '\n'
+ help = self._long_break_matcher.sub("\n\n", help)
+ help = help.strip("\n") + "\n"
return help
def _join_parts(self, part_strings):
- return ''.join([part
- for part in part_strings
- if part and part is not SUPPRESS])
+ return "".join([part for part in part_strings if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
- prefix = _('usage: ')
+ prefix = _("usage: ")
# if usage is specified, use that
if usage is not None:
@@ -326,11 +318,11 @@ def _format_usage(self, usage, actions, groups, prefix):
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
- usage = '%(prog)s' % dict(prog=self._prog)
+ usage = "%(prog)s" % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
- prog = '%(prog)s' % dict(prog=self._prog)
+ prog = "%(prog)s" % dict(prog=self._prog)
# split optionals from positionals
optionals = []
@@ -344,20 +336,19 @@ def _format_usage(self, usage, actions, groups, prefix):
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
- usage = ' '.join([s for s in [prog, action_usage] if s])
+ usage = " ".join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
-
# break usage into wrappable parts
- part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
+ part_regexp = r"\(.*?\)+|\[.*?\]+|\S+"
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
- assert ' '.join(opt_parts) == opt_usage
- assert ' '.join(pos_parts) == pos_usage
+ assert " ".join(opt_parts) == opt_usage
+ assert " ".join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
@@ -369,20 +360,20 @@ def get_lines(parts, indent, prefix=None):
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
- lines.append(indent + ' '.join(line))
+ lines.append(indent + " ".join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
- lines.append(indent + ' '.join(line))
+ lines.append(indent + " ".join(line))
if prefix is not None:
- lines[0] = lines[0][len(indent):]
+ lines[0] = lines[0][len(indent) :]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
- indent = ' ' * (len(prefix) + len(prog) + 1)
+ indent = " " * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
@@ -393,7 +384,7 @@ def get_lines(parts, indent, prefix=None):
# if prog is long, put it on its own line
else:
- indent = ' ' * len(prefix)
+ indent = " " * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
@@ -403,10 +394,10 @@ def get_lines(parts, indent, prefix=None):
lines = [prog] + lines
# join lines into usage
- usage = '\n'.join(lines)
+ usage = "\n".join(lines)
# prefix with 'usage:'
- return '%s%s\n\n' % (prefix, usage)
+ return "%s%s\n\n" % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
@@ -424,30 +415,29 @@ def _format_actions_usage(self, actions, groups):
group_actions.add(action)
if not group.required:
if start in inserts:
- inserts[start] += ' ['
+ inserts[start] += " ["
else:
- inserts[start] = '['
- inserts[end] = ']'
+ inserts[start] = "["
+ inserts[end] = "]"
else:
if start in inserts:
- inserts[start] += ' ('
+ inserts[start] += " ("
else:
- inserts[start] = '('
- inserts[end] = ')'
+ inserts[start] = "("
+ inserts[end] = ")"
for i in range(start + 1, end):
- inserts[i] = '|'
+ inserts[i] = "|"
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
-
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
- if inserts.get(i) == '|':
+ if inserts.get(i) == "|":
inserts.pop(i)
- elif inserts.get(i + 1) == '|':
+ elif inserts.get(i + 1) == "|":
inserts.pop(i + 1)
# produce all arg strings
@@ -456,7 +446,7 @@ def _format_actions_usage(self, actions, groups):
# if it's in a group, strip the outer []
if action in group_actions:
- if part[0] == '[' and part[-1] == ']':
+ if part[0] == "[" and part[-1] == "]":
part = part[1:-1]
# add the action string to the list
@@ -469,18 +459,18 @@ def _format_actions_usage(self, actions, groups):
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
- part = '%s' % option_string
+ part = "%s" % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
- part = '%s %s' % (option_string, args_string)
+ part = "%s %s" % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
- part = '[%s]' % part
+ part = "[%s]" % part
# add the action string to the list
parts.append(part)
@@ -490,50 +480,49 @@ def _format_actions_usage(self, actions, groups):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
- text = ' '.join([item for item in parts if item is not None])
+ text = " ".join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
- open = r'[\[(]'
- close = r'[\])]'
- text = _re.sub(r'(%s) ' % open, r'\1', text)
- text = _re.sub(r' (%s)' % close, r'\1', text)
- text = _re.sub(r'%s *%s' % (open, close), r'', text)
- text = _re.sub(r'\(([^|]*)\)', r'\1', text)
+ open = r"[\[(]"
+ close = r"[\])]"
+ text = _re.sub(r"(%s) " % open, r"\1", text)
+ text = _re.sub(r" (%s)" % close, r"\1", text)
+ text = _re.sub(r"%s *%s" % (open, close), r"", text)
+ text = _re.sub(r"\(([^|]*)\)", r"\1", text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
- if '%(prog)' in text:
+ if "%(prog)" in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
- indent = ' ' * self._current_indent
- return self._fill_text(text, text_width, indent) + '\n\n'
+ indent = " " * self._current_indent
+ return self._fill_text(text, text_width, indent) + "\n\n"
def _format_action(self, action):
# determine the required width and the entry label
- help_position = min(self._action_max_length + 2,
- self._max_help_position)
+ help_position = min(self._action_max_length + 2, self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
- tup = self._current_indent, '', action_header
- action_header = '%*s%s\n' % tup
+ tup = self._current_indent, "", action_header
+ action_header = "%*s%s\n" % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
- tup = self._current_indent, '', action_width, action_header
- action_header = '%*s%-*s ' % tup
+ tup = self._current_indent, "", action_width, action_header
+ action_header = "%*s%-*s " % tup
indent_first = 0
# long action name; start on the next line
else:
- tup = self._current_indent, '', action_header
- action_header = '%*s%s\n' % tup
+ tup = self._current_indent, "", action_header
+ action_header = "%*s%s\n" % tup
indent_first = help_position
# collect the pieces of the action help
@@ -543,13 +532,13 @@ def _format_action(self, action):
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
- parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
+ parts.append("%*s%s\n" % (indent_first, "", help_lines[0]))
for line in help_lines[1:]:
- parts.append('%*s%s\n' % (help_position, '', line))
+ parts.append("%*s%s\n" % (help_position, "", line))
# or add a newline if the description doesn't end with one
- elif not action_header.endswith('\n'):
- parts.append('\n')
+ elif not action_header.endswith("\n"):
+ parts.append("\n")
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
@@ -560,7 +549,7 @@ def _format_action(self, action):
def _format_action_invocation(self, action):
if not action.option_strings:
- metavar, = self._metavar_formatter(action, action.dest)(1)
+ (metavar,) = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
@@ -577,16 +566,16 @@ def _format_action_invocation(self, action):
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
- parts.append('%s %s' % (option_string, args_string))
+ parts.append("%s %s" % (option_string, args_string))
- return ', '.join(parts)
+ return ", ".join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
- result = '{%s}' % ','.join(choice_strs)
+ result = "{%s}" % ",".join(choice_strs)
else:
result = default_metavar
@@ -594,26 +583,27 @@ def format(tuple_size):
if isinstance(result, tuple):
return result
else:
- return (result, ) * tuple_size
+ return (result,) * tuple_size
+
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
- result = '%s' % get_metavar(1)
+ result = "%s" % get_metavar(1)
elif action.nargs == OPTIONAL:
- result = '[%s]' % get_metavar(1)
+ result = "[%s]" % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
- result = '[%s [%s ...]]' % get_metavar(2)
+ result = "[%s [%s ...]]" % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
- result = '%s [%s ...]' % get_metavar(2)
+ result = "%s [%s ...]" % get_metavar(2)
elif action.nargs == REMAINDER:
- result = '...'
+ result = "..."
elif action.nargs == PARSER:
- result = '%s ...' % get_metavar(1)
+ result = "%s ..." % get_metavar(1)
else:
- formats = ['%s' for _ in range(action.nargs)]
- result = ' '.join(formats) % get_metavar(action.nargs)
+ formats = ["%s" for _ in range(action.nargs)]
+ result = " ".join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
@@ -622,11 +612,11 @@ def _expand_help(self, action):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
- if hasattr(params[name], '__name__'):
+ if hasattr(params[name], "__name__"):
params[name] = params[name].__name__
- if params.get('choices') is not None:
- choices_str = ', '.join([str(c) for c in params['choices']])
- params['choices'] = choices_str
+ if params.get("choices") is not None:
+ choices_str = ", ".join([str(c) for c in params["choices"]])
+ params["choices"] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
@@ -641,13 +631,12 @@ def _iter_indented_subactions(self, action):
self._dedent()
def _split_lines(self, text, width):
- text = self._whitespace_matcher.sub(' ', text).strip()
+ text = self._whitespace_matcher.sub(" ", text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
- text = self._whitespace_matcher.sub(' ', text).strip()
- return _textwrap.fill(text, width, initial_indent=indent,
- subsequent_indent=indent)
+ text = self._whitespace_matcher.sub(" ", text).strip()
+ return _textwrap.fill(text, width, initial_indent=indent, subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
@@ -661,7 +650,7 @@ class RawDescriptionHelpFormatter(HelpFormatter):
"""
def _fill_text(self, text, width, indent):
- return ''.join([indent + line for line in text.splitlines(True)])
+ return "".join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
@@ -684,11 +673,11 @@ class ArgumentDefaultsHelpFormatter(HelpFormatter):
def _get_help_string(self, action):
help = action.help
- if '%(default)' not in action.help:
+ if "%(default)" not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
- help += ' (default: %(default)s)'
+ help += " (default: %(default)s)"
return help
@@ -696,11 +685,12 @@ def _get_help_string(self, action):
# Options and Arguments
# =====================
+
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
- return '/'.join(argument.option_strings)
+ return "/".join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
@@ -722,15 +712,15 @@ def __init__(self, argument, message):
def __str__(self):
if self.argument_name is None:
- format = '%(message)s'
+ format = "%(message)s"
else:
- format = 'argument %(argument_name)s: %(message)s'
- return format % dict(message=self.message,
- argument_name=self.argument_name)
+ format = "argument %(argument_name)s: %(message)s"
+ return format % dict(message=self.message, argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
+
pass
@@ -738,6 +728,7 @@ class ArgumentTypeError(Exception):
# Action classes
# ==============
+
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
@@ -789,17 +780,19 @@ class Action(_AttributeHolder):
help string. If None, the 'dest' value will be used as the name.
"""
- def __init__(self,
- option_strings,
- dest,
- nargs=None,
- const=None,
- default=None,
- type=None,
- choices=None,
- required=False,
- help=None,
- metavar=None):
+ def __init__(
+ self,
+ option_strings,
+ dest,
+ nargs=None,
+ const=None,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None,
+ ):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
@@ -813,41 +806,44 @@ def __init__(self,
def _get_kwargs(self):
names = [
- 'option_strings',
- 'dest',
- 'nargs',
- 'const',
- 'default',
- 'type',
- 'choices',
- 'help',
- 'metavar',
+ "option_strings",
+ "dest",
+ "nargs",
+ "const",
+ "default",
+ "type",
+ "choices",
+ "help",
+ "metavar",
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
- raise NotImplementedError(_('.__call__() not defined'))
+ raise NotImplementedError(_(".__call__() not defined"))
class _StoreAction(Action):
-
- def __init__(self,
- option_strings,
- dest,
- nargs=None,
- const=None,
- default=None,
- type=None,
- choices=None,
- required=False,
- help=None,
- metavar=None):
+ def __init__(
+ self,
+ option_strings,
+ dest,
+ nargs=None,
+ const=None,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None,
+ ):
if nargs == 0:
- raise ValueError('nargs for store actions must be > 0; if you '
- 'have nothing to store, actions such as store '
- 'true or store const may be more appropriate')
+ raise ValueError(
+ "nargs for store actions must be > 0; if you "
+ "have nothing to store, actions such as store "
+ "true or store const may be more appropriate"
+ )
if const is not None and nargs != OPTIONAL:
- raise ValueError('nargs must be %r to supply const' % OPTIONAL)
+ raise ValueError("nargs must be %r to supply const" % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
@@ -858,22 +854,17 @@ def __init__(self,
choices=choices,
required=required,
help=help,
- metavar=metavar)
+ metavar=metavar,
+ )
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
-
- def __init__(self,
- option_strings,
- dest,
- const,
- default=None,
- required=False,
- help=None,
- metavar=None):
+ def __init__(
+ self, option_strings, dest, const, default=None, required=False, help=None, metavar=None
+ ):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
@@ -881,65 +872,59 @@ def __init__(self,
const=const,
default=default,
required=required,
- help=help)
+ help=help,
+ )
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
-
- def __init__(self,
- option_strings,
- dest,
- default=False,
- required=False,
- help=None):
+ def __init__(self, option_strings, dest, default=False, required=False, help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
- help=help)
+ help=help,
+ )
class _StoreFalseAction(_StoreConstAction):
-
- def __init__(self,
- option_strings,
- dest,
- default=True,
- required=False,
- help=None):
+ def __init__(self, option_strings, dest, default=True, required=False, help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
- help=help)
+ help=help,
+ )
class _AppendAction(Action):
-
- def __init__(self,
- option_strings,
- dest,
- nargs=None,
- const=None,
- default=None,
- type=None,
- choices=None,
- required=False,
- help=None,
- metavar=None):
+ def __init__(
+ self,
+ option_strings,
+ dest,
+ nargs=None,
+ const=None,
+ default=None,
+ type=None,
+ choices=None,
+ required=False,
+ help=None,
+ metavar=None,
+ ):
if nargs == 0:
- raise ValueError('nargs for append actions must be > 0; if arg '
- 'strings are not supplying the value to append, '
- 'the append const action may be more appropriate')
+ raise ValueError(
+ "nargs for append actions must be > 0; if arg "
+ "strings are not supplying the value to append, "
+ "the append const action may be more appropriate"
+ )
if const is not None and nargs != OPTIONAL:
- raise ValueError('nargs must be %r to supply const' % OPTIONAL)
+ raise ValueError("nargs must be %r to supply const" % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
@@ -950,7 +935,8 @@ def __init__(self,
choices=choices,
required=required,
help=help,
- metavar=metavar)
+ metavar=metavar,
+ )
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
@@ -959,15 +945,9 @@ def __call__(self, parser, namespace, values, option_string=None):
class _AppendConstAction(Action):
-
- def __init__(self,
- option_strings,
- dest,
- const,
- default=None,
- required=False,
- help=None,
- metavar=None):
+ def __init__(
+ self, option_strings, dest, const, default=None, required=False, help=None, metavar=None
+ ):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
@@ -976,7 +956,8 @@ def __init__(self,
default=default,
required=required,
help=help,
- metavar=metavar)
+ metavar=metavar,
+ )
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
@@ -985,20 +966,15 @@ def __call__(self, parser, namespace, values, option_string=None):
class _CountAction(Action):
-
- def __init__(self,
- option_strings,
- dest,
- default=None,
- required=False,
- help=None):
+ def __init__(self, option_strings, dest, default=None, required=False, help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
- help=help)
+ help=help,
+ )
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
@@ -1006,18 +982,10 @@ def __call__(self, parser, namespace, values, option_string=None):
class _HelpAction(Action):
-
- def __init__(self,
- option_strings,
- dest=SUPPRESS,
- default=SUPPRESS,
- help=None):
+ def __init__(self, option_strings, dest=SUPPRESS, default=SUPPRESS, help=None):
super(_HelpAction, self).__init__(
- option_strings=option_strings,
- dest=dest,
- default=default,
- nargs=0,
- help=help)
+ option_strings=option_strings, dest=dest, default=default, nargs=0, help=help
+ )
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
@@ -1025,19 +993,17 @@ def __call__(self, parser, namespace, values, option_string=None):
class _VersionAction(Action):
-
- def __init__(self,
- option_strings,
- version=None,
- dest=SUPPRESS,
- default=SUPPRESS,
- help="show program's version number and exit"):
+ def __init__(
+ self,
+ option_strings,
+ version=None,
+ dest=SUPPRESS,
+ default=SUPPRESS,
+ help="show program's version number and exit",
+ ):
super(_VersionAction, self).__init__(
- option_strings=option_strings,
- dest=dest,
- default=default,
- nargs=0,
- help=help)
+ option_strings=option_strings, dest=dest, default=default, nargs=0, help=help
+ )
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
@@ -1050,25 +1016,15 @@ def __call__(self, parser, namespace, values, option_string=None):
class _SubParsersAction(Action):
-
class _ChoicesPseudoAction(Action):
-
def __init__(self, name, aliases, help):
metavar = dest = name
if aliases:
- metavar += ' (%s)' % ', '.join(aliases)
+ metavar += " (%s)" % ", ".join(aliases)
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
- sup.__init__(option_strings=[], dest=dest, help=help,
- metavar=metavar)
-
- def __init__(self,
- option_strings,
- prog,
- parser_class,
- dest=SUPPRESS,
- help=None,
- metavar=None):
+ sup.__init__(option_strings=[], dest=dest, help=help, metavar=metavar)
+ def __init__(self, option_strings, prog, parser_class, dest=SUPPRESS, help=None, metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
@@ -1080,18 +1036,19 @@ def __init__(self,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
- metavar=metavar)
+ metavar=metavar,
+ )
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
- if kwargs.get('prog') is None:
- kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
+ if kwargs.get("prog") is None:
+ kwargs["prog"] = "%s %s" % (self._prog_prefix, name)
- aliases = kwargs.pop('aliases', ())
+ aliases = kwargs.pop("aliases", ())
# create a pseudo-action to hold the choice help
- if 'help' in kwargs:
- help = kwargs.pop('help')
+ if "help" in kwargs:
+ help = kwargs.pop("help")
choice_action = self._ChoicesPseudoAction(name, aliases, help)
self._choices_actions.append(choice_action)
@@ -1120,8 +1077,8 @@ def __call__(self, parser, namespace, values, option_string=None):
try:
parser = self._name_parser_map[parser_name]
except KeyError:
- tup = parser_name, ', '.join(self._name_parser_map)
- msg = _('unknown parser %r (choices: %s)' % tup)
+ tup = parser_name, ", ".join(self._name_parser_map)
+ msg = _("unknown parser %r (choices: %s)" % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
@@ -1137,6 +1094,7 @@ def __call__(self, parser, namespace, values, option_string=None):
# Type classes
# ==============
+
class FileType(object):
"""Factory for creating file object types
@@ -1150,16 +1108,16 @@ class FileType(object):
the builtin open() function.
"""
- def __init__(self, mode='r', bufsize=None):
+ def __init__(self, mode="r", bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
- if string == '-':
- if 'r' in self._mode:
+ if string == "-":
+ if "r" in self._mode:
return _sys.stdin
- elif 'w' in self._mode:
+ elif "w" in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
@@ -1178,13 +1136,15 @@ def __call__(self, string):
def __repr__(self):
args = [self._mode, self._bufsize]
- args_str = ', '.join([repr(arg) for arg in args if arg is not None])
- return '%s(%s)' % (type(self).__name__, args_str)
+ args_str = ", ".join([repr(arg) for arg in args if arg is not None])
+ return "%s(%s)" % (type(self).__name__, args_str)
+
# ===========================
# Optional and Positional Parsing
# ===========================
+
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
@@ -1209,12 +1169,7 @@ def __contains__(self, key):
class _ActionsContainer(object):
-
- def __init__(self,
- description,
- prefix_chars,
- argument_default,
- conflict_handler):
+ def __init__(self, description, prefix_chars, argument_default, conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
@@ -1226,17 +1181,17 @@ def __init__(self,
self._registries = {}
# register actions
- self.register('action', None, _StoreAction)
- self.register('action', 'store', _StoreAction)
- self.register('action', 'store_const', _StoreConstAction)
- self.register('action', 'store_true', _StoreTrueAction)
- self.register('action', 'store_false', _StoreFalseAction)
- self.register('action', 'append', _AppendAction)
- self.register('action', 'append_const', _AppendConstAction)
- self.register('action', 'count', _CountAction)
- self.register('action', 'help', _HelpAction)
- self.register('action', 'version', _VersionAction)
- self.register('action', 'parsers', _SubParsersAction)
+ self.register("action", None, _StoreAction)
+ self.register("action", "store", _StoreAction)
+ self.register("action", "store_const", _StoreConstAction)
+ self.register("action", "store_true", _StoreTrueAction)
+ self.register("action", "store_false", _StoreFalseAction)
+ self.register("action", "append", _AppendAction)
+ self.register("action", "append_const", _AppendConstAction)
+ self.register("action", "count", _CountAction)
+ self.register("action", "help", _HelpAction)
+ self.register("action", "version", _VersionAction)
+ self.register("action", "parsers", _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
@@ -1253,7 +1208,7 @@ def __init__(self,
self._defaults = {}
# determines whether an "option" looks like a negative number
- self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
+ self._negative_number_matcher = _re.compile(r"^-\d+$|^-\d*\.\d+$")
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
@@ -1287,7 +1242,6 @@ def get_default(self, dest):
return action.default
return self._defaults.get(dest, None)
-
# =======================
# Adding argument actions
# =======================
@@ -1302,8 +1256,8 @@ def add_argument(self, *args, **kwargs):
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
- if args and 'dest' in kwargs:
- raise ValueError('dest supplied twice for positional argument')
+ if args and "dest" in kwargs:
+ raise ValueError("dest supplied twice for positional argument")
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
@@ -1311,12 +1265,12 @@ def add_argument(self, *args, **kwargs):
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
- if 'default' not in kwargs:
- dest = kwargs['dest']
+ if "default" not in kwargs:
+ dest = kwargs["dest"]
if dest in self._defaults:
- kwargs['default'] = self._defaults[dest]
+ kwargs["default"] = self._defaults[dest]
elif self.argument_default is not None:
- kwargs['default'] = self.argument_default
+ kwargs["default"] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
@@ -1325,9 +1279,9 @@ def add_argument(self, *args, **kwargs):
action = action_class(**kwargs)
# raise an error if the action type is not callable
- type_func = self._registry_get('type', action.type, action.type)
+ type_func = self._registry_get("type", action.type, action.type)
if not _callable(type_func):
- raise ValueError('%r is not callable' % type_func)
+ raise ValueError("%r is not callable" % type_func)
return self._add_action(action)
@@ -1370,21 +1324,21 @@ def _add_container_actions(self, container):
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
- msg = _('cannot merge actions - two groups are named %r')
+ msg = _("cannot merge actions - two groups are named %r")
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
-
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
- conflict_handler=group.conflict_handler)
+ conflict_handler=group.conflict_handler,
+ )
# map the actions to their new group
for action in group._group_actions:
@@ -1394,8 +1348,7 @@ def _add_container_actions(self, container):
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
- mutex_group = self.add_mutually_exclusive_group(
- required=group.required)
+ mutex_group = self.add_mutually_exclusive_group(required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
@@ -1407,16 +1360,16 @@ def _add_container_actions(self, container):
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
- if 'required' in kwargs:
+ if "required" in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
- if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
- kwargs['required'] = True
- if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
- kwargs['required'] = True
+ if kwargs.get("nargs") not in [OPTIONAL, ZERO_OR_MORE]:
+ kwargs["required"] = True
+ if kwargs.get("nargs") == ZERO_OR_MORE and "default" not in kwargs:
+ kwargs["required"] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
@@ -1428,8 +1381,7 @@ def _get_optional_kwargs(self, *args, **kwargs):
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
- msg = _('invalid option string %r: '
- 'must start with a character %r')
+ msg = _("invalid option string %r: " "must start with a character %r")
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
@@ -1441,7 +1393,7 @@ def _get_optional_kwargs(self, *args, **kwargs):
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
- dest = kwargs.pop('dest', None)
+ dest = kwargs.pop("dest", None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
@@ -1449,28 +1401,27 @@ def _get_optional_kwargs(self, *args, **kwargs):
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
- msg = _('dest= is required for options like %r')
+ msg = _("dest= is required for options like %r")
raise ValueError(msg % option_string)
- dest = dest.replace('-', '_')
+ dest = dest.replace("-", "_")
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
- action = kwargs.pop('action', default)
- return self._registry_get('action', action, action)
+ action = kwargs.pop("action", default)
+ return self._registry_get("action", action, action)
def _get_handler(self):
# determine function from conflict handler string
- handler_func_name = '_handle_conflict_%s' % self.conflict_handler
+ handler_func_name = "_handle_conflict_%s" % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
- msg = _('invalid conflict_resolution value: %r')
+ msg = _("invalid conflict_resolution value: %r")
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
-
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
@@ -1484,17 +1435,15 @@ def _check_conflict(self, action):
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
- message = _('conflicting option string(s): %s')
- conflict_string = ', '.join([option_string
- for option_string, action
- in conflicting_actions])
+ message = _("conflicting option string(s): %s")
+ conflict_string = ", ".join(
+ [option_string for option_string, action in conflicting_actions]
+ )
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
-
# remove all conflicting options
for option_string, action in conflicting_actions:
-
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
@@ -1506,13 +1455,12 @@ def _handle_conflict_resolve(self, action, conflicting_actions):
class _ArgumentGroup(_ActionsContainer):
-
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
- update('conflict_handler', container.conflict_handler)
- update('prefix_chars', container.prefix_chars)
- update('argument_default', container.argument_default)
+ update("conflict_handler", container.conflict_handler)
+ update("prefix_chars", container.prefix_chars)
+ update("argument_default", container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
@@ -1525,8 +1473,7 @@ def __init__(self, container, title=None, description=None, **kwargs):
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
- self._has_negative_number_optionals = \
- container._has_negative_number_optionals
+ self._has_negative_number_optionals = container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
@@ -1539,7 +1486,6 @@ def _remove_action(self, action):
class _MutuallyExclusiveGroup(_ArgumentGroup):
-
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
@@ -1547,7 +1493,7 @@ def __init__(self, container, required=False):
def _add_action(self, action):
if action.required:
- msg = _('mutually exclusive arguments must be optional')
+ msg = _("mutually exclusive arguments must be optional")
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
@@ -1576,33 +1522,39 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
- add_help -- Add a -h/-help option
"""
- def __init__(self,
- prog=None,
- usage=None,
- description=None,
- epilog=None,
- version=None,
- parents=[],
- formatter_class=HelpFormatter,
- prefix_chars='-',
- fromfile_prefix_chars=None,
- argument_default=None,
- conflict_handler='error',
- add_help=True):
-
+ def __init__(
+ self,
+ prog=None,
+ usage=None,
+ description=None,
+ epilog=None,
+ version=None,
+ parents=[],
+ formatter_class=HelpFormatter,
+ prefix_chars="-",
+ fromfile_prefix_chars=None,
+ argument_default=None,
+ conflict_handler="error",
+ add_help=True,
+ ):
if version is not None:
import warnings
+
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
- """instead""", DeprecationWarning)
+ """instead""",
+ DeprecationWarning,
+ )
superinit = super(ArgumentParser, self).__init__
- superinit(description=description,
- prefix_chars=prefix_chars,
- argument_default=argument_default,
- conflict_handler=conflict_handler)
+ superinit(
+ description=description,
+ prefix_chars=prefix_chars,
+ argument_default=argument_default,
+ conflict_handler=conflict_handler,
+ )
# default setting for prog
if prog is None:
@@ -1617,32 +1569,39 @@ def __init__(self,
self.add_help = add_help
add_group = self.add_argument_group
- self._positionals = add_group(_('positional arguments'))
- self._optionals = add_group(_('optional arguments'))
+ self._positionals = add_group(_("positional arguments"))
+ self._optionals = add_group(_("optional arguments"))
self._subparsers = None
# register types
def identity(string):
return string
- self.register('type', None, identity)
+
+ self.register("type", None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
- if '-' in prefix_chars:
- default_prefix = '-'
+ if "-" in prefix_chars:
+ default_prefix = "-"
else:
default_prefix = prefix_chars[0]
if self.add_help:
self.add_argument(
- default_prefix+'h', default_prefix*2+'help',
- action='help', default=SUPPRESS,
- help=_('show this help message and exit'))
+ default_prefix + "h",
+ default_prefix * 2 + "help",
+ action="help",
+ default=SUPPRESS,
+ help=_("show this help message and exit"),
+ )
if self.version:
self.add_argument(
- default_prefix+'v', default_prefix*2+'version',
- action='version', default=SUPPRESS,
+ default_prefix + "v",
+ default_prefix * 2 + "version",
+ action="version",
+ default=SUPPRESS,
version=self.version,
- help=_("show program's version number and exit"))
+ help=_("show program's version number and exit"),
+ )
# add parent arguments and defaults
for parent in parents:
@@ -1659,13 +1618,13 @@ def identity(string):
# =======================
def _get_kwargs(self):
names = [
- 'prog',
- 'usage',
- 'description',
- 'version',
- 'formatter_class',
- 'conflict_handler',
- 'add_help',
+ "prog",
+ "usage",
+ "description",
+ "version",
+ "formatter_class",
+ "conflict_handler",
+ "add_help",
]
return [(name, getattr(self, name)) for name in names]
@@ -1674,29 +1633,29 @@ def _get_kwargs(self):
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
- self.error(_('cannot have multiple subparser arguments'))
+ self.error(_("cannot have multiple subparser arguments"))
# add the parser class to the arguments if it's not present
- kwargs.setdefault('parser_class', type(self))
+ kwargs.setdefault("parser_class", type(self))
- if 'title' in kwargs or 'description' in kwargs:
- title = _(kwargs.pop('title', 'subcommands'))
- description = _(kwargs.pop('description', None))
+ if "title" in kwargs or "description" in kwargs:
+ title = _(kwargs.pop("title", "subcommands"))
+ description = _(kwargs.pop("description", None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
- if kwargs.get('prog') is None:
+ if kwargs.get("prog") is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
- formatter.add_usage(self.usage, positionals, groups, '')
- kwargs['prog'] = formatter.format_help().strip()
+ formatter.add_usage(self.usage, positionals, groups, "")
+ kwargs["prog"] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
- parsers_class = self._pop_action_class(kwargs, 'parsers')
+ parsers_class = self._pop_action_class(kwargs, "parsers")
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
@@ -1711,14 +1670,10 @@ def _add_action(self, action):
return action
def _get_optional_actions(self):
- return [action
- for action in self._actions
- if action.option_strings]
+ return [action for action in self._actions if action.option_strings]
def _get_positional_actions(self):
- return [action
- for action in self._actions
- if not action.option_strings]
+ return [action for action in self._actions if not action.option_strings]
# =====================================
# Command line argument parsing methods
@@ -1726,8 +1681,8 @@ def _get_positional_actions(self):
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
- msg = _('unrecognized arguments: %s')
- self.error(msg % ' '.join(argv))
+ msg = _("unrecognized arguments: %s")
+ self.error(msg % " ".join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
@@ -1775,7 +1730,7 @@ def _parse_known_args(self, arg_strings, namespace):
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
- conflicts.extend(group_actions[i + 1:])
+ conflicts.extend(group_actions[i + 1 :])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
@@ -1784,26 +1739,25 @@ def _parse_known_args(self, arg_strings, namespace):
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
-
# all args after -- are non-options
- if arg_string == '--':
- arg_string_pattern_parts.append('-')
+ if arg_string == "--":
+ arg_string_pattern_parts.append("-")
for arg_string in arg_strings_iter:
- arg_string_pattern_parts.append('A')
+ arg_string_pattern_parts.append("A")
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
- pattern = 'A'
+ pattern = "A"
else:
option_string_indices[i] = option_tuple
- pattern = 'O'
+ pattern = "O"
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
- arg_strings_pattern = ''.join(arg_string_pattern_parts)
+ arg_strings_pattern = "".join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
@@ -1820,7 +1774,7 @@ def take_action(action, argument_strings, option_string=None):
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
- msg = _('not allowed with argument %s')
+ msg = _("not allowed with argument %s")
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
@@ -1831,7 +1785,6 @@ def take_action(action, argument_strings, option_string=None):
# function to convert arg_strings into an optional action
def consume_optional(start_index):
-
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
@@ -1841,7 +1794,6 @@ def consume_optional(start_index):
match_argument = self._match_argument
action_tuples = []
while True:
-
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
@@ -1850,7 +1802,7 @@ def consume_optional(start_index):
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
- arg_count = match_argument(action, 'A')
+ arg_count = match_argument(action, "A")
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
@@ -1866,7 +1818,7 @@ def consume_optional(start_index):
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
- msg = _('ignored explicit argument %r')
+ msg = _("ignored explicit argument %r")
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
@@ -1880,7 +1832,7 @@ def consume_optional(start_index):
# error if a double-dash option did not use the
# explicit argument
else:
- msg = _('ignored explicit argument %r')
+ msg = _("ignored explicit argument %r")
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
@@ -1916,13 +1868,13 @@ def consume_positionals(start_index):
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
- args = arg_strings[start_index: start_index + arg_count]
+ args = arg_strings[start_index : start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
- positionals[:] = positionals[len(arg_counts):]
+ positionals[:] = positionals[len(arg_counts) :]
return start_index
# consume Positionals and Optionals alternately, until we have
@@ -1934,12 +1886,10 @@ def consume_positionals(start_index):
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
-
# consume any Positionals preceding the next option
- next_option_string_index = min([
- index
- for index in option_string_indices
- if index >= start_index])
+ next_option_string_index = min(
+ [index for index in option_string_indices if index >= start_index]
+ )
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
@@ -1970,25 +1920,26 @@ def consume_positionals(start_index):
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
- self.error(_('too few arguments'))
+ self.error(_("too few arguments"))
# make sure all required actions were present, and convert defaults.
for action in self._actions:
if action not in seen_actions:
if action.required:
name = _get_action_name(action)
- self.error(_('argument %s is required') % name)
+ self.error(_("argument %s is required") % name)
else:
# Convert action default now instead of doing it before
# parsing arguments to avoid calling convert functions
# twice (which may fail) if the argument was given, but
# only if it was defined already in the namespace
- if (action.default is not None and
- isinstance(action.default, basestring) and
- hasattr(namespace, action.dest) and
- action.default is getattr(namespace, action.dest)):
- setattr(namespace, action.dest,
- self._get_value(action, action.default))
+ if (
+ action.default is not None
+ and isinstance(action.default, basestring)
+ and hasattr(namespace, action.dest)
+ and action.default is getattr(namespace, action.dest)
+ ):
+ setattr(namespace, action.dest, self._get_value(action, action.default))
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
@@ -1999,11 +1950,13 @@ def consume_positionals(start_index):
# if no actions were used, report the error
else:
- names = [_get_action_name(action)
- for action in group._group_actions
- if action.help is not SUPPRESS]
- msg = _('one of the arguments %s is required')
- self.error(msg % ' '.join(names))
+ names = [
+ _get_action_name(action)
+ for action in group._group_actions
+ if action.help is not SUPPRESS
+ ]
+ msg = _("one of the arguments %s is required")
+ self.error(msg % " ".join(names))
# return the updated namespace and the extra arguments
return namespace, extras
@@ -2012,7 +1965,6 @@ def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
-
# for regular arguments, just add them back into the list
if not arg_string or arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
@@ -2048,11 +2000,11 @@ def _match_argument(self, action, arg_strings_pattern):
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
- None: _('expected one argument'),
- OPTIONAL: _('expected at most one argument'),
- ONE_OR_MORE: _('expected at least one argument'),
+ None: _("expected one argument"),
+ OPTIONAL: _("expected at most one argument"),
+ ONE_OR_MORE: _("expected at least one argument"),
}
- default = _('expected %s argument(s)') % action.nargs
+ default = _("expected %s argument(s)") % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
@@ -2065,8 +2017,7 @@ def _match_arguments_partial(self, actions, arg_strings_pattern):
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
- pattern = ''.join([self._get_nargs_pattern(action)
- for action in actions_slice])
+ pattern = "".join([self._get_nargs_pattern(action) for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
@@ -2094,8 +2045,8 @@ def _parse_optional(self, arg_string):
return None
# if the option string before the "=" is present, return the action
- if '=' in arg_string:
- option_string, explicit_arg = arg_string.split('=', 1)
+ if "=" in arg_string:
+ option_string, explicit_arg = arg_string.split("=", 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
@@ -2106,15 +2057,16 @@ def _parse_optional(self, arg_string):
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
- options = ', '.join([option_string
- for action, option_string, explicit_arg in option_tuples])
+ options = ", ".join(
+ [option_string for action, option_string, explicit_arg in option_tuples]
+ )
tup = arg_string, options
- self.error(_('ambiguous option: %s could match %s') % tup)
+ self.error(_("ambiguous option: %s could match %s") % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
- option_tuple, = option_tuples
+ (option_tuple,) = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
@@ -2125,7 +2077,7 @@ def _parse_optional(self, arg_string):
return None
# if it contains a space, it was meant to be a positional
- if ' ' in arg_string:
+ if " " in arg_string:
return None
# it was meant to be an optional but there is no such option
@@ -2139,8 +2091,8 @@ def _get_option_tuples(self, option_string):
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
- if '=' in option_string:
- option_prefix, explicit_arg = option_string.split('=', 1)
+ if "=" in option_string:
+ option_prefix, explicit_arg = option_string.split("=", 1)
else:
option_prefix = option_string
explicit_arg = None
@@ -2171,7 +2123,7 @@ def _get_option_tuples(self, option_string):
# shouldn't ever get here
else:
- self.error(_('unexpected option string: %s') % option_string)
+ self.error(_("unexpected option string: %s") % option_string)
# return the collected option tuples
return result
@@ -2183,36 +2135,36 @@ def _get_nargs_pattern(self, action):
# the default (None) is assumed to be a single argument
if nargs is None:
- nargs_pattern = '(-*A-*)'
+ nargs_pattern = "(-*A-*)"
# allow zero or one arguments
elif nargs == OPTIONAL:
- nargs_pattern = '(-*A?-*)'
+ nargs_pattern = "(-*A?-*)"
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
- nargs_pattern = '(-*[A-]*)'
+ nargs_pattern = "(-*[A-]*)"
# allow one or more arguments
elif nargs == ONE_OR_MORE:
- nargs_pattern = '(-*A[A-]*)'
+ nargs_pattern = "(-*A[A-]*)"
# allow any number of options or arguments
elif nargs == REMAINDER:
- nargs_pattern = '([-AO]*)'
+ nargs_pattern = "([-AO]*)"
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
- nargs_pattern = '(-*A[-AO]*)'
+ nargs_pattern = "(-*A[-AO]*)"
# all others should be integers
else:
- nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
+ nargs_pattern = "(-*%s-*)" % "-*".join("A" * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
- nargs_pattern = nargs_pattern.replace('-*', '')
- nargs_pattern = nargs_pattern.replace('-', '')
+ nargs_pattern = nargs_pattern.replace("-*", "")
+ nargs_pattern = nargs_pattern.replace("-", "")
# return the pattern
return nargs_pattern
@@ -2223,7 +2175,7 @@ def _get_nargs_pattern(self, action):
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
- arg_strings = [s for s in arg_strings if s != '--']
+ arg_strings = [s for s in arg_strings if s != "--"]
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
@@ -2237,8 +2189,7 @@ def _get_values(self, action, arg_strings):
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
- elif (not arg_strings and action.nargs == ZERO_OR_MORE and
- not action.option_strings):
+ elif not arg_strings and action.nargs == ZERO_OR_MORE and not action.option_strings:
if action.default is not None:
value = action.default
else:
@@ -2247,7 +2198,7 @@ def _get_values(self, action, arg_strings):
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
- arg_string, = arg_strings
+ (arg_string,) = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
@@ -2270,9 +2221,9 @@ def _get_values(self, action, arg_strings):
return value
def _get_value(self, action, arg_string):
- type_func = self._registry_get('type', action.type, action.type)
+ type_func = self._registry_get("type", action.type, action.type)
if not _callable(type_func):
- msg = _('%r is not callable')
+ msg = _("%r is not callable")
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
@@ -2281,14 +2232,14 @@ def _get_value(self, action, arg_string):
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
- name = getattr(action.type, '__name__', repr(action.type))
+ name = getattr(action.type, "__name__", repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
- name = getattr(action.type, '__name__', repr(action.type))
- msg = _('invalid %s value: %r')
+ name = getattr(action.type, "__name__", repr(action.type))
+ msg = _("invalid %s value: %r")
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
@@ -2297,8 +2248,8 @@ def _get_value(self, action, arg_string):
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
- tup = value, ', '.join(map(repr, action.choices))
- msg = _('invalid choice: %r (choose from %s)') % tup
+ tup = value, ", ".join(map(repr, action.choices))
+ msg = _("invalid choice: %r (choose from %s)") % tup
raise ArgumentError(action, msg)
# =======================
@@ -2306,16 +2257,14 @@ def _check_value(self, action, value):
# =======================
def format_usage(self):
formatter = self._get_formatter()
- formatter.add_usage(self.usage, self._actions,
- self._mutually_exclusive_groups)
+ formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
- formatter.add_usage(self.usage, self._actions,
- self._mutually_exclusive_groups)
+ formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
@@ -2335,10 +2284,12 @@ def format_help(self):
def format_version(self):
import warnings
+
warnings.warn(
'The format_version method is deprecated -- the "version" '
- 'argument to ArgumentParser is no longer supported.',
- DeprecationWarning)
+ "argument to ArgumentParser is no longer supported.",
+ DeprecationWarning,
+ )
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
@@ -2361,10 +2312,12 @@ def print_help(self, file=None):
def print_version(self, file=None):
import warnings
+
warnings.warn(
'The print_version method is deprecated -- the "version" '
- 'argument to ArgumentParser is no longer supported.',
- DeprecationWarning)
+ "argument to ArgumentParser is no longer supported.",
+ DeprecationWarning,
+ )
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
@@ -2391,4 +2344,4 @@ def error(self, message):
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
- self.exit(2, _('%s: error: %s\n') % (self.prog, message))
+ self.exit(2, _("%s: error: %s\n") % (self.prog, message))
diff --git a/reco_profiling/profileRunner.py b/reco_profiling/profileRunner.py
index 0dad27a7d718..bb8888b36804 100644
--- a/reco_profiling/profileRunner.py
+++ b/reco_profiling/profileRunner.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
-#Profile runner for reco releases
-#maintained by the CMS reco group
+# Profile runner for reco releases
+# maintained by the CMS reco group
import subprocess
import glob
import sys
@@ -8,7 +8,7 @@
import shutil
workflow_configs = {
- #Run3 HI workflow
+ # Run3 HI workflow
"159.03": {
"num_events": 100,
"steps": {
@@ -22,11 +22,11 @@
"FastTimer": True,
"igprof": True,
},
- },
+ },
"nThreads": 1,
- "matrix": "standard"
+ "matrix": "standard",
},
- #Run3 workflow
+ # Run3 workflow
"11834.21": {
"num_events": 400,
"steps": {
@@ -45,11 +45,11 @@
"FastTimer": True,
"igprof": True,
},
- },
+ },
"nThreads": 1,
- "matrix": "upgrade"
+ "matrix": "upgrade",
},
- #Phase2 workflow used in mid-2021
+ # Phase2 workflow used in mid-2021
"23434.21": {
"num_events": 100,
"steps": {
@@ -68,11 +68,11 @@
"FastTimer": True,
"igprof": True,
},
- },
+ },
"nThreads": 1,
- "matrix": "upgrade"
+ "matrix": "upgrade",
},
- #Phase2 workflow used in late-2021
+ # Phase2 workflow used in late-2021
"34834.21": {
"num_events": 100,
"steps": {
@@ -86,11 +86,11 @@
"FastTimer": True,
"igprof": True,
},
- },
+ },
"nThreads": 1,
- "matrix": "upgrade"
- } ,
- #Phase2 workflow used in early-2022
+ "matrix": "upgrade",
+ },
+ # Phase2 workflow used in early-2022
"35234.21": {
"num_events": 100,
"steps": {
@@ -104,11 +104,11 @@
"FastTimer": True,
"igprof": True,
},
- },
+ },
"nThreads": 1,
- "matrix": "upgrade"
- } ,
- #Phase2 workflow used in mid-2022
+ "matrix": "upgrade",
+ },
+ # Phase2 workflow used in mid-2022
"39634.21": {
"num_events": 100,
"steps": {
@@ -122,11 +122,11 @@
"FastTimer": True,
"igprof": True,
},
- },
+ },
"nThreads": 1,
- "matrix": "upgrade"
- } ,
- #Phase2 workflow renumbered in late-2022
+ "matrix": "upgrade",
+ },
+ # Phase2 workflow renumbered in late-2022
"21034.21": {
"num_events": 100,
"steps": {
@@ -140,11 +140,11 @@
"FastTimer": True,
"igprof": True,
},
- },
+ },
"nThreads": 1,
- "matrix": "upgrade"
- } ,
- #Phase2 workflow used begin-2023
+ "matrix": "upgrade",
+ },
+ # Phase2 workflow used begin-2023
"23834.21": {
"num_events": 100,
"steps": {
@@ -158,11 +158,11 @@
"FastTimer": True,
"igprof": True,
},
- },
+ },
"nThreads": 1,
- "matrix": "upgrade"
- } ,
- #Phase2 workflow used late-2023
+ "matrix": "upgrade",
+ },
+ # Phase2 workflow used late-2023
"25034.21": {
"num_events": 100,
"steps": {
@@ -176,11 +176,11 @@
"FastTimer": True,
"igprof": True,
},
- },
+ },
"nThreads": 1,
- "matrix": "upgrade"
- } ,
- #8-thread T0-like promptreco workflow
+ "matrix": "upgrade",
+ },
+ # 8-thread T0-like promptreco workflow
"136.889": {
"num_events": 5000,
"steps": {
@@ -189,11 +189,11 @@
"FastTimer": False,
"igprof": False,
},
- },
+ },
"nThreads": 8,
- "matrix": "standard"
+ "matrix": "standard",
},
- #2018 HI T0-like workflow
+ # 2018 HI T0-like workflow
"140.56": {
"num_events": 1000,
"steps": {
@@ -202,29 +202,31 @@
"FastTimer": False,
"igprof": False,
},
- },
+ },
"nThreads": 8,
- "matrix": "standard"
+ "matrix": "standard",
},
}
-#Prepare cmdLog and execute the workflow steps to get e.g. DAS entries, but call cmsRun with --no_exec
+
+# Prepare cmdLog and execute the workflow steps to get e.g. DAS entries, but call cmsRun with --no_exec
def prepareMatrixWF(workflow_number, num_events, matrix="upgrade", nthreads=1):
cmd = [
- "runTheMatrix.py",
- "-w",
- matrix,
- "-l",
- str(workflow_number),
- "--command=\"--no_exec\"",
- "--ibeos",
- "--nThreads",
- str(nthreads),
+ "runTheMatrix.py",
+ "-w",
+ matrix,
+ "-l",
+ str(workflow_number),
+ '--command="--no_exec"',
+ "--ibeos",
+ "--nThreads",
+ str(nthreads),
]
cmd = " ".join(cmd)
os.system(cmd)
-#extracts the cmsdriver lines from the cmdLog
+
+# extracts the cmsdriver lines from the cmdLog
def parseCmdLog(filename):
cmsdriver_lines = []
with open(filename) as fi:
@@ -235,8 +237,10 @@ def parseCmdLog(filename):
cmsdriver_lines.append(stripPipe(line))
return cmsdriver_lines
+
def stripPipe(cmsdriver_line):
- return cmsdriver_line[:cmsdriver_line.index(">")]
+ return cmsdriver_line[: cmsdriver_line.index(">")]
+
def getWFDir(workflow_number):
dirs = list(glob.glob("{}_*".format(workflow_number)))
@@ -244,6 +248,7 @@ def getWFDir(workflow_number):
return None
return dirs[0]
+
def wrapInRetry(cmd):
s = """n=0
until [ "$n" -ge 10 ]
@@ -251,27 +256,51 @@ def wrapInRetry(cmd):
echo "attempt $n"
{} && break
n=$((n+1))
-done""".format(cmd)
+done""".format(
+ cmd
+ )
return s
+
def echoBefore(cmd, msg):
s = """
echo "{}"
{}
-""".format(msg, cmd)
+""".format(
+ msg, cmd
+ )
return s
+
def prepTimeMemoryInfo(cmd, istep):
- cmd_tmi = cmd + " --customise=Validation/Performance/TimeMemoryInfo.py &> step{}_TimeMemoryInfo.log".format(istep)
+ cmd_tmi = (
+ cmd
+ + " --customise=Validation/Performance/TimeMemoryInfo.py &> step{}_TimeMemoryInfo.log".format(
+ istep
+ )
+ )
return cmd_tmi
+
def prepFastTimer(cmd, istep):
- cmd_ft = cmd + " --customise HLTrigger/Timer/FastTimer.customise_timer_service_singlejob --customise_commands \"process.FastTimerService.writeJSONSummary=True;process.FastTimerService.jsonFileName=\\\"step{istep}_circles.json\\\"\" &> step{istep}_FastTimerService.log".format(istep=istep)
+ cmd_ft = (
+ cmd
+ + ' --customise HLTrigger/Timer/FastTimer.customise_timer_service_singlejob --customise_commands "process.FastTimerService.writeJSONSummary=True;process.FastTimerService.jsonFileName=\\"step{istep}_circles.json\\"" &> step{istep}_FastTimerService.log'.format(
+ istep=istep
+ )
+ )
return cmd_ft
+
def prepIgprof(cmd, istep):
- cmd_ig = cmd + " --customise Validation/Performance/IgProfInfo.customise --no_exec --python_filename step{istep}_igprof.py &> step{istep}_igprof_conf.txt".format(istep=istep)
- return cmd_ig
+ cmd_ig = (
+ cmd
+ + " --customise Validation/Performance/IgProfInfo.customise --no_exec --python_filename step{istep}_igprof.py &> step{istep}_igprof_conf.txt".format(
+ istep=istep
+ )
+ )
+ return cmd_ig
+
def configureProfilingSteps(cmsdriver_lines, num_events, steps_config):
igprof_exe = "igprof"
@@ -279,125 +308,145 @@ def configureProfilingSteps(cmsdriver_lines, num_events, steps_config):
steps = {}
for line in cmsdriver_lines:
spl = line.split()[1]
- #step1 has the format `cmsDriver.py fragment`, otherwise it's `cmsDriver.py stepN`
+ # step1 has the format `cmsDriver.py fragment`, otherwise it's `cmsDriver.py stepN`
if "step" in spl:
istep = int(spl.replace("step", ""))
else:
istep = 1
- steps[istep] = line + " -n {num_events} --suffix \"-j step{istep}_JobReport.xml\"".format(istep=istep, num_events=num_events)
+ steps[istep] = line + ' -n {num_events} --suffix "-j step{istep}_JobReport.xml"'.format(
+ istep=istep, num_events=num_events
+ )
steps_to_run = list(sorted(steps.keys()))
- outfiles = [
- "step{}_JobReport.xml".format(istep) for istep in steps_to_run
- ]
- outfiles += [
- "step{}.root".format(istep) for istep in steps_to_run
- ]
- outfiles += [
- "step{}.log".format(istep) for istep in steps_to_run
- ]
+ outfiles = ["step{}_JobReport.xml".format(istep) for istep in steps_to_run]
+ outfiles += ["step{}.root".format(istep) for istep in steps_to_run]
+ outfiles += ["step{}.log".format(istep) for istep in steps_to_run]
- #First run all the steps without any special options
- new_cmdlist = [steps[istep]+"&>step{istep}.log".format(istep=istep) for istep in steps_to_run]
+ # First run all the steps without any special options
+ new_cmdlist = [
+ steps[istep] + "&>step{istep}.log".format(istep=istep) for istep in steps_to_run
+ ]
igprof_commands = []
for step_name in steps_config.keys():
istep = int(step_name.replace("step", ""))
step = steps[istep]
- #strip the JobReport from the step command
- step = step[:step.index("--suffix")-1]
+ # strip the JobReport from the step command
+ step = step[: step.index("--suffix") - 1]
if steps_config[step_name]["TimeMemoryInfo"]:
step_tmi = prepTimeMemoryInfo(step, istep)
outfiles += ["step{}_TimeMemoryInfo.log".format(istep)]
- new_cmdlist += [
- echoBefore(step_tmi, "step{istep} TimeMemoryInfo".format(istep=istep))
- ]
+ new_cmdlist += [echoBefore(step_tmi, "step{istep} TimeMemoryInfo".format(istep=istep))]
if steps_config[step_name]["FastTimer"]:
step_ft = prepFastTimer(step, istep)
- outfiles += ["step{}_FastTimerService.log".format(istep), "step{}_circles.json".format(istep)]
+ outfiles += [
+ "step{}_FastTimerService.log".format(istep),
+ "step{}_circles.json".format(istep),
+ ]
new_cmdlist += [
echoBefore(step_ft, "step{istep} FastTimer".format(istep=istep)),
]
if steps_config[step_name]["igprof"]:
step_ig = prepIgprof(step, istep)
- new_cmdlist += [
- echoBefore(step_ig, "step{istep} IgProf conf".format(istep=istep))
- ]
-
- igprof_pp = wrapInRetry(igprof_exe + " -d -pp -z -o step{istep}_igprofCPU.gz -t cmsRun cmsRun step{istep}_igprof.py &> step{istep}_igprof_cpu.txt".format(istep=istep))
- igprof_mp = wrapInRetry(igprof_exe + " -d -mp -z -o step{istep}_igprofMEM.gz -t cmsRunGlibC cmsRunGlibC step{istep}_igprof.py &> step{istep}_igprof_mem.txt".format(istep=istep))
+ new_cmdlist += [echoBefore(step_ig, "step{istep} IgProf conf".format(istep=istep))]
+
+ igprof_pp = wrapInRetry(
+ igprof_exe
+ + " -d -pp -z -o step{istep}_igprofCPU.gz -t cmsRun cmsRun step{istep}_igprof.py &> step{istep}_igprof_cpu.txt".format(
+ istep=istep
+ )
+ )
+ igprof_mp = wrapInRetry(
+ igprof_exe
+ + " -d -mp -z -o step{istep}_igprofMEM.gz -t cmsRunGlibC cmsRunGlibC step{istep}_igprof.py &> step{istep}_igprof_mem.txt".format(
+ istep=istep
+ )
+ )
outfiles += [
- "step{istep}_igprof_cpu.txt".format(istep=istep),
- "step{istep}_igprof_mem.txt".format(istep=istep)
+ "step{istep}_igprof_cpu.txt".format(istep=istep),
+ "step{istep}_igprof_mem.txt".format(istep=istep),
]
-
+
igprof_commands += [
echoBefore(igprof_pp, "step{istep} IgProf pp".format(istep=istep)),
"mv IgProf.1.gz step{istep}_igprofCPU.1.gz".format(istep=istep),
- "mv IgProf.{nev}.gz step{istep}_igprofCPU.{nev}.gz".format(nev=int(num_events/2), istep=istep),
- "mv IgProf.{nev}.gz step{istep}_igprofCPU.{nev}.gz".format(nev=int(num_events-1), istep=istep),
+ "mv IgProf.{nev}.gz step{istep}_igprofCPU.{nev}.gz".format(
+ nev=int(num_events / 2), istep=istep
+ ),
+ "mv IgProf.{nev}.gz step{istep}_igprofCPU.{nev}.gz".format(
+ nev=int(num_events - 1), istep=istep
+ ),
echoBefore(igprof_mp, "step{istep} IgProf mp".format(istep=istep)),
"mv IgProf.1.gz step{istep}_igprofMEM.1.gz".format(istep=istep),
- "mv IgProf.{nev}.gz step{istep}_igprofMEM.{nev}.gz".format(nev=int(num_events/2), istep=istep),
- "mv IgProf.{nev}.gz step{istep}_igprofMEM.{nev}.gz".format(nev=int(num_events-1), istep=istep),
+ "mv IgProf.{nev}.gz step{istep}_igprofMEM.{nev}.gz".format(
+ nev=int(num_events / 2), istep=istep
+ ),
+ "mv IgProf.{nev}.gz step{istep}_igprofMEM.{nev}.gz".format(
+ nev=int(num_events - 1), istep=istep
+ ),
]
outfiles += [
- "step{istep}_igprofCPU.{nev}.gz".format(istep=istep, nev=nev) for nev in [1,int(num_events/2), int(num_events-1)]
+ "step{istep}_igprofCPU.{nev}.gz".format(istep=istep, nev=nev)
+ for nev in [1, int(num_events / 2), int(num_events - 1)]
]
outfiles += [
- "step{istep}_igprofMEM.{nev}.gz".format(istep=istep, nev=nev) for nev in [1,int(num_events/2), int(num_events-1)]
- ]
- outfiles += [
- "step{istep}_igprofCPU.gz".format(istep=istep)
+ "step{istep}_igprofMEM.{nev}.gz".format(istep=istep, nev=nev)
+ for nev in [1, int(num_events / 2), int(num_events - 1)]
]
+ outfiles += ["step{istep}_igprofCPU.gz".format(istep=istep)]
new_cmdlist = new_cmdlist + igprof_commands
return new_cmdlist, outfiles
+
def writeProfilingScript(wfdir, runscript, cmdlist):
runscript_path = "{}/{}".format(wfdir, runscript)
with open(runscript_path, "w") as fi:
fi.write("#!/bin/sh\n")
- #for 12_3_0_pre3,pre4
- #fi.write("scram setup /cvmfs/cms.cern.ch/slc7_amd64_gcc10/cms/cmssw-tool-conf/52.0-904e6a6e16dcc9bdba60a5fd496e4237/tools/selected/libunwind.xml\n")
+ # for 12_3_0_pre3,pre4
+ # fi.write("scram setup /cvmfs/cms.cern.ch/slc7_amd64_gcc10/cms/cmssw-tool-conf/52.0-904e6a6e16dcc9bdba60a5fd496e4237/tools/selected/libunwind.xml\n")
- #this is required for igprof
+ # this is required for igprof
fi.write("ulimit -a\n")
- #don't abort on error
- #fi.write("set -e\n")
-
- #print commands verbosely
+ # don't abort on error
+ # fi.write("set -e\n")
+
+ # print commands verbosely
fi.write("set -x\n")
- # ensure that compiler include paths are added to ROOT_INCLUDE_PATH
- fi.write("for path in $(LC_ALL=C g++ -xc++ -E -v /dev/null 2>&1 | sed -n -e '/^.include/,${' -e '/^ \/.*++/p' -e '}');do ROOT_INCLUDE_PATH=$path:$ROOT_INCLUDE_PATH; done")
+ # ensure that compiler include paths are added to ROOT_INCLUDE_PATH
+ fi.write(
+ "for path in $(LC_ALL=C g++ -xc++ -E -v /dev/null 2>&1 | sed -n -e '/^.include/,${' -e '/^ \/.*++/p' -e '}');do ROOT_INCLUDE_PATH=$path:$ROOT_INCLUDE_PATH; done"
+ )
fi.write("\n")
fi.write("")
fi.write("\n")
for cmd in cmdlist:
- fi.write(cmd + '\n')
+ fi.write(cmd + "\n")
return
+
def runProfiling(wfdir, runscript):
os.chdir(wfdir)
os.system("chmod +x {}".format(runscript))
os.system("bash {}".format(runscript))
os.chdir("..")
+
def copyProfilingOutputs(wfdir, out_dir, outfiles):
for output in outfiles:
path = "{}/{}".format(wfdir, output)
- #check that all outputs exists and are of nonzero size
+ # check that all outputs exists and are of nonzero size
if os.path.isfile(path) and os.stat(path).st_size > 0:
print("copying {} to {}".format(path, out_dir))
shutil.copy(path, out_dir)
@@ -405,17 +454,25 @@ def copyProfilingOutputs(wfdir, out_dir, outfiles):
print("ERROR: Output {} not found or is broken, skipping".format(path))
return
+
def main(wf, num_events, out_dir):
wfdir = getWFDir(wf)
-
+
if not (wfdir is None):
print("Output directory {} exists, aborting".format(wfdir))
sys.exit(1)
- prepareMatrixWF(wf, num_events, matrix=workflow_configs[wf]["matrix"], nthreads=workflow_configs[wf]["nThreads"])
+ prepareMatrixWF(
+ wf,
+ num_events,
+ matrix=workflow_configs[wf]["matrix"],
+ nthreads=workflow_configs[wf]["nThreads"],
+ )
wfdir = getWFDir(wf)
cmsdriver_lines = parseCmdLog("{}/cmdLog".format(wfdir))
- new_cmdlist, outfiles = configureProfilingSteps(cmsdriver_lines, num_events, workflow_configs[wf]["steps"])
+ new_cmdlist, outfiles = configureProfilingSteps(
+ cmsdriver_lines, num_events, workflow_configs[wf]["steps"]
+ )
runscript = "cmdLog_profiling.sh"
outfiles += ["cmdLog_profiling.sh"]
@@ -423,20 +480,32 @@ def main(wf, num_events, out_dir):
runProfiling(wfdir, runscript)
copyProfilingOutputs(wfdir, out_dir, outfiles)
+
def parse_args():
import argparse
+
parser = argparse.ArgumentParser()
- parser.add_argument("--workflow", type=str, default="35234.21", help="The workflow to use for profiling")
- parser.add_argument("--num-events", type=int, default=-1, help="Number of events to use, -1 to use the default")
- parser.add_argument("--out-dir", type=str, help="The output directory where to copy the profiling results", required=True)
+ parser.add_argument(
+ "--workflow", type=str, default="35234.21", help="The workflow to use for profiling"
+ )
+ parser.add_argument(
+ "--num-events", type=int, default=-1, help="Number of events to use, -1 to use the default"
+ )
+ parser.add_argument(
+ "--out-dir",
+ type=str,
+ help="The output directory where to copy the profiling results",
+ required=True,
+ )
args = parser.parse_args()
- if args.num_events==-1:
+ if args.num_events == -1:
args.num_events = workflow_configs[args.workflow]["num_events"]
return args
+
if __name__ == "__main__":
args = parse_args()
-
+
os.makedirs(args.out_dir)
main(args.workflow, args.num_events, args.out_dir)
diff --git a/release_notes_collection.py b/release_notes_collection.py
index 05cc85687fc9..1e73d2479c8f 100755
--- a/release_notes_collection.py
+++ b/release_notes_collection.py
@@ -8,118 +8,190 @@
from hashlib import md5
import time
-RX_RELEASE = re.compile('CMSSW_(\d+)_(\d+)_(\d+)(_pre[0-9]+)*(_cand[0-9]+)*(_patch[0-9]+)*')
+RX_RELEASE = re.compile("CMSSW_(\d+)_(\d+)_(\d+)(_pre[0-9]+)*(_cand[0-9]+)*(_patch[0-9]+)*")
RX_AUTHOR = re.compile("(.*)(@[a-zA-Z-_0-9]+)")
RX_COMPARE = re.compile("(https://github.*compare.*\.\.\..*)")
-RX_COMMIT = re.compile("^-\s+(:arrow_right:\s*|)([^/]+\/[^/]+|)\#(\d{0,5})( from.*)")
+RX_COMMIT = re.compile("^-\s+(:arrow_right:\s*|)([^/]+\/[^/]+|)\#(\d{0,5})( from.*)")
-Release = namedtuple("Release", ["major", "minor", "subminor", "pre", "cand", "patch","published_at"])
+Release = namedtuple(
+ "Release", ["major", "minor", "subminor", "pre", "cand", "patch", "published_at"]
+)
DEBUG = True
+
def head(title, release):
- rel_link=title.replace("CMSSW_","")
- ret = "---\n"
- ret += "layout: post\n"
- ret += 'rel_link: "{rel_link}"\n'.format(rel_link=rel_link)
- ret += 'title: "{title}"\n'.format(title=title)
- ret += "date: {published_at}\n".format(published_at=time.strftime("%Y-%m-%d %H:%M:%S",time.strptime(release.published_at,"%Y-%m-%dT%H:%M:%SZ")))
- ret += "categories: cmssw\n"
- ret += "relmajor: {major}\n".format(major=release.major)
- ret += "relminor: {minor}\n".format(minor=release.minor)
- ret += "relsubminor: {subminor}\n".format(subminor=release.subminor)
- if release.pre:
- ret += "relpre: {pre}\n".format(pre=release.pre)
- if release.cand:
- ret += "relcand: {cand}\n".format(cand=release.cand)
- if release.patch:
- ret += "relpatch: {patch}\n".format(patch=release.patch)
- ret += "---\n\n"
- return ret
+ rel_link = title.replace("CMSSW_", "")
+ ret = "---\n"
+ ret += "layout: post\n"
+ ret += 'rel_link: "{rel_link}"\n'.format(rel_link=rel_link)
+ ret += 'title: "{title}"\n'.format(title=title)
+ ret += "date: {published_at}\n".format(
+ published_at=time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.strptime(release.published_at, "%Y-%m-%dT%H:%M:%SZ")
+ )
+ )
+ ret += "categories: cmssw\n"
+ ret += "relmajor: {major}\n".format(major=release.major)
+ ret += "relminor: {minor}\n".format(minor=release.minor)
+ ret += "relsubminor: {subminor}\n".format(subminor=release.subminor)
+ if release.pre:
+ ret += "relpre: {pre}\n".format(pre=release.pre)
+ if release.cand:
+ ret += "relcand: {cand}\n".format(cand=release.cand)
+ if release.patch:
+ ret += "relpatch: {patch}\n".format(patch=release.patch)
+ ret += "---\n\n"
+ return ret
+
def get_pr(pr, repo, cmsprs):
- pr_md5 = md5((pr+"\n").encode()).hexdigest()
- pr_cache = join(cmsprs,repo,pr_md5[0:2],pr_md5[2:]+".json")
- if exists(pr_cache): return json.load(open(pr_cache))
- return {}
+ pr_md5 = md5((pr + "\n").encode()).hexdigest()
+ pr_cache = join(cmsprs, repo, pr_md5[0:2], pr_md5[2:] + ".json")
+ if exists(pr_cache):
+ return json.load(open(pr_cache))
+ return {}
+
def getReleasesNotes(opts):
- get_gh_token(token_file=expanduser("~/.github-token-cmsbot"))
- notes = []
- error_releases = {}
- print("Reading releases page")
- rel_opt=""
- if opts.release: rel_opt="/tags/%s" % opts.release
- releases=github_api("/repos/%s/releases%s" % (opts.repository, rel_opt), method="GET")
- if opts.release: releases = [releases]
- for release in releases:
- rel_name = release['name']
- rel_id = str(release['id'])
- print("Checking release", rel_name)
- if " " in rel_name:
- error_releases[rel_name]="Space in name:"+rel_id
- print(" Skipping release (contains space in name):",rel_name)
- continue
- rel_cyc = "_".join(rel_name.split("_")[0:2])
- rel_numbers = re.match(RX_RELEASE, rel_name)
- if not rel_numbers:
- error_releases[rel_name]="Does not match release regexp:"+rel_id
- print(" Skipping release (does not match release regexp):",rel_name)
- continue
- if (not 'body' in release) or (not release['body']):
- error_releases[rel_name]="Empty release body message:"+rel_id
- print(" Skipping release (empty release body message):",rel_name)
- continue
- if not re.match('^%s$' % opts.release_filter, rel_name):
- print(" Skipping release (release does not match filter):",rel_name)
- continue
- rel_file = join(opts.release_notes_dir,rel_cyc,"%s.md" % rel_name)
- if (not opts.force) and exists(rel_file):
- print(" Skipping release (already exists):",rel_name)
- continue
- release_notes = []
- prepo = ""
- count = 0
- forward_port_sym = ''
- for line in release['body'].encode("ascii", "ignore").decode().split('\n'):
- line = re.sub(RX_AUTHOR, '\\1**\\2**', line)
- m = RX_COMMIT.match(line)
- if m:
- repo = opts.repository
- forward_port = ""
- if m.group(1): forward_port = forward_port_sym
- if m.group(2): repo = m.group(2)
- if repo != prepo: count = 0
- prepo=repo
- count+=1
- line = '\n{count}. {forward_port}[{pr}](http://github.com/{repo}/pull/{pr})'.format(forward_port=forward_port,count=count,repo=repo,pr=m.group(3))+'{:target="_blank"} '+m.group(4)
- pr = get_pr(m.group(3), repo, opts.prs_dir)
- print(" PR found: "+repo+"#"+m.group(3))
- if 'created_at' in pr:line+=" created: "+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(float(pr['created_at'])))
- if 'merged_at' in pr:line+=" merged: "+time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(float(pr['merged_at'])))
- elif RX_COMPARE.match(line):
- line = re.sub(RX_COMPARE, '[compare to previous](\\1)\n\n', line)
+ get_gh_token(token_file=expanduser("~/.github-token-cmsbot"))
+ notes = []
+ error_releases = {}
+ print("Reading releases page")
+ rel_opt = ""
+ if opts.release:
+ rel_opt = "/tags/%s" % opts.release
+ releases = github_api("/repos/%s/releases%s" % (opts.repository, rel_opt), method="GET")
+ if opts.release:
+ releases = [releases]
+ for release in releases:
+ rel_name = release["name"]
+ rel_id = str(release["id"])
+ print("Checking release", rel_name)
+ if " " in rel_name:
+ error_releases[rel_name] = "Space in name:" + rel_id
+ print(" Skipping release (contains space in name):", rel_name)
+ continue
+ rel_cyc = "_".join(rel_name.split("_")[0:2])
+ rel_numbers = re.match(RX_RELEASE, rel_name)
+ if not rel_numbers:
+ error_releases[rel_name] = "Does not match release regexp:" + rel_id
+ print(" Skipping release (does not match release regexp):", rel_name)
+ continue
+ if (not "body" in release) or (not release["body"]):
+ error_releases[rel_name] = "Empty release body message:" + rel_id
+ print(" Skipping release (empty release body message):", rel_name)
+ continue
+ if not re.match("^%s$" % opts.release_filter, rel_name):
+ print(" Skipping release (release does not match filter):", rel_name)
+ continue
+ rel_file = join(opts.release_notes_dir, rel_cyc, "%s.md" % rel_name)
+ if (not opts.force) and exists(rel_file):
+ print(" Skipping release (already exists):", rel_name)
+ continue
+ release_notes = []
+ prepo = ""
+ count = 0
+ forward_port_sym = ''
+ for line in release["body"].encode("ascii", "ignore").decode().split("\n"):
+ line = re.sub(RX_AUTHOR, "\\1**\\2**", line)
+ m = RX_COMMIT.match(line)
+ if m:
+ repo = opts.repository
+ forward_port = ""
+ if m.group(1):
+ forward_port = forward_port_sym
+ if m.group(2):
+ repo = m.group(2)
+ if repo != prepo:
+ count = 0
+ prepo = repo
+ count += 1
+ line = (
+ "\n{count}. {forward_port}[{pr}](http://github.com/{repo}/pull/{pr})".format(
+ forward_port=forward_port, count=count, repo=repo, pr=m.group(3)
+ )
+ + '{:target="_blank"} '
+ + m.group(4)
+ )
+ pr = get_pr(m.group(3), repo, opts.prs_dir)
+ print(" PR found: " + repo + "#" + m.group(3))
+ if "created_at" in pr:
+ line += " created: " + time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(float(pr["created_at"]))
+ )
+ if "merged_at" in pr:
+ line += " merged: " + time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(float(pr["merged_at"]))
+ )
+ elif RX_COMPARE.match(line):
+ line = re.sub(RX_COMPARE, "[compare to previous](\\1)\n\n", line)
- release_notes.append(line.replace(':arrow_right:',forward_port_sym))
- r = Release(int(rel_numbers.group(1)), int(rel_numbers.group(2)),
- int(rel_numbers.group(3)),rel_numbers.group(4),
- rel_numbers.group(5),rel_numbers.group(6),release['published_at'])
- out_rel = open(rel_file, 'w')
- out_rel.write(head(rel_name, r))
- out_rel.write('# %s\n%s' % (rel_name, "\n".join(release_notes)))
- out_rel.close()
- print(" Created release notes:",rel_name)
- if error_releases: print("Releases with errors:",error_releases)
+ release_notes.append(line.replace(":arrow_right:", forward_port_sym))
+ r = Release(
+ int(rel_numbers.group(1)),
+ int(rel_numbers.group(2)),
+ int(rel_numbers.group(3)),
+ rel_numbers.group(4),
+ rel_numbers.group(5),
+ rel_numbers.group(6),
+ release["published_at"],
+ )
+ out_rel = open(rel_file, "w")
+ out_rel.write(head(rel_name, r))
+ out_rel.write("# %s\n%s" % (rel_name, "\n".join(release_notes)))
+ out_rel.close()
+ print(" Created release notes:", rel_name)
+ if error_releases:
+ print("Releases with errors:", error_releases)
-if __name__ == '__main__':
- parser = OptionParser(usage="%prog")
- parser.add_option("-r", "--repository", dest="repository", help="Github Repositoy name e.g. cms-sw/cmssw.", type=str, default="cms-sw/cmssw")
- parser.add_option("-p", "--prs-path", dest="prs_dir", help="Directory with Pull request", type=str, default="cms-prs")
- parser.add_option("-N", "--release-notes", dest="release_notes_dir", help="Directory where to store release notes", type=str, default="ReleaseNotes/_releases")
- parser.add_option("-R", "--release", dest="release", help="Release name", type=str, default=None)
- parser.add_option("-F", "--release-filter", dest="release_filter", help="Release filter", type=str, default="CMSSW_.*")
- parser.add_option("-f", "--force", dest="force", action="store_true", help="Force re-creation of release notes", default=False)
- opts, args = parser.parse_args()
- if opts.release: opts.force=True
- getReleasesNotes(opts)
+if __name__ == "__main__":
+ parser = OptionParser(usage="%prog")
+ parser.add_option(
+ "-r",
+ "--repository",
+ dest="repository",
+ help="Github Repositoy name e.g. cms-sw/cmssw.",
+ type=str,
+ default="cms-sw/cmssw",
+ )
+ parser.add_option(
+ "-p",
+ "--prs-path",
+ dest="prs_dir",
+ help="Directory with Pull request",
+ type=str,
+ default="cms-prs",
+ )
+ parser.add_option(
+ "-N",
+ "--release-notes",
+ dest="release_notes_dir",
+ help="Directory where to store release notes",
+ type=str,
+ default="ReleaseNotes/_releases",
+ )
+ parser.add_option(
+ "-R", "--release", dest="release", help="Release name", type=str, default=None
+ )
+ parser.add_option(
+ "-F",
+ "--release-filter",
+ dest="release_filter",
+ help="Release filter",
+ type=str,
+ default="CMSSW_.*",
+ )
+ parser.add_option(
+ "-f",
+ "--force",
+ dest="force",
+ action="store_true",
+ help="Force re-creation of release notes",
+ default=False,
+ )
+ opts, args = parser.parse_args()
+ if opts.release:
+ opts.force = True
+ getReleasesNotes(opts)
diff --git a/releases.py b/releases.py
index 78d92e62877f..e36b38ef2185 100644
--- a/releases.py
+++ b/releases.py
@@ -1,25 +1,32 @@
from milestones import *
import re
-#Default development branch
+# Default development branch
# Changes from master branch will be merge in to it
# Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch)
# For new release cycle just change this and make sure to add its milestone and production branches
CMSSW_DEVEL_BRANCH = "CMSSW_13_3_X"
-RELEASE_BRANCH_MILESTONE["master"]=RELEASE_BRANCH_MILESTONE[CMSSW_DEVEL_BRANCH]
+RELEASE_BRANCH_MILESTONE["master"] = RELEASE_BRANCH_MILESTONE[CMSSW_DEVEL_BRANCH]
RELEASE_BRANCH_PRODUCTION.append("master")
-USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ])
+USERS_TO_TRIGGER_HOOKS = set(
+ SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel]
+)
+
def get_release_managers(branch):
- if branch in RELEASE_MANAGERS: return RELEASE_MANAGERS[branch]
- for exp in RELEASE_MANAGERS:
- if re.match(exp, branch): return RELEASE_MANAGERS[exp]
- return []
+ if branch in RELEASE_MANAGERS:
+ return RELEASE_MANAGERS[branch]
+ for exp in RELEASE_MANAGERS:
+ if re.match(exp, branch):
+ return RELEASE_MANAGERS[exp]
+ return []
-def is_closed_branch(branch):
- if branch in RELEASE_BRANCH_CLOSED: return True
- for exp in RELEASE_BRANCH_CLOSED:
- if re.match(exp, branch): return True
- return False
+def is_closed_branch(branch):
+ if branch in RELEASE_BRANCH_CLOSED:
+ return True
+ for exp in RELEASE_BRANCH_CLOSED:
+ if re.match(exp, branch):
+ return True
+ return False
diff --git a/repo_config.py b/repo_config.py
index 165ce3ce02fc..938d434c2835 100644
--- a/repo_config.py
+++ b/repo_config.py
@@ -1,12 +1,13 @@
-from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER
-from os.path import dirname,abspath
-GH_TOKEN="~/.github-token"
-GH_TOKEN_READONLY="~/.github-token-readonly"
-CONFIG_DIR=dirname(abspath(__file__))
-CMSBUILD_USER=CMSBUILD_GH_USER
-GH_REPO_ORGANIZATION=GH_CMSSW_ORGANIZATION
-CREATE_EXTERNAL_ISSUE=True
-JENKINS_SERVER="http://cmsjenkins03.cern.ch:8080/jenkins"
+from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER
+from os.path import dirname, abspath
+
+GH_TOKEN = "~/.github-token"
+GH_TOKEN_READONLY = "~/.github-token-readonly"
+CONFIG_DIR = dirname(abspath(__file__))
+CMSBUILD_USER = CMSBUILD_GH_USER
+GH_REPO_ORGANIZATION = GH_CMSSW_ORGANIZATION
+CREATE_EXTERNAL_ISSUE = True
+JENKINS_SERVER = "http://cmsjenkins03.cern.ch:8080/jenkins"
IGNORE_ISSUES = {
- GH_CMSSW_ORGANIZATION+"/"+GH_CMSSW_REPO : [12368],
+ GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO: [12368],
}
diff --git a/report-build-release-status.py b/report-build-release-status.py
index 30c848e364b3..cbd5e59e32a1 100755
--- a/report-build-release-status.py
+++ b/report-build-release-status.py
@@ -3,16 +3,25 @@
from __future__ import print_function
from _py2with3compatibility import getoutput
from optparse import OptionParser
-from github_utils import create_issue_comment, get_issue_labels, remove_issue_label, add_issue_labels, remove_issue_labels_all
+from github_utils import (
+ create_issue_comment,
+ get_issue_labels,
+ remove_issue_label,
+ add_issue_labels,
+ remove_issue_labels_all,
+)
from os.path import expanduser
from datetime import datetime
from socket import setdefaulttimeout
from os import environ
import re
+
setdefaulttimeout(120)
-JENKINS_PREFIX="jenkins"
-try: JENKINS_PREFIX=environ['JENKINS_URL'].strip("/").split("/")[-1]
-except: JENKINS_PREFIX="jenkins"
+JENKINS_PREFIX = "jenkins"
+try:
+ JENKINS_PREFIX = environ["JENKINS_URL"].strip("/").split("/")[-1]
+except:
+ JENKINS_PREFIX = "jenkins"
#
# Posts a message in the github issue that triggered the build
# The structure of the message depends on the option used
@@ -21,297 +30,350 @@
# -------------------------------------------------------------------------------
# Global Variables
# --------------------------------------------------------------------------------
-GH_CMSSW_ORGANIZATION = 'cms-sw'
-GH_CMSSW_REPO = 'cmssw'
-POST_BUILDING='BUILDING'
-POST_TOOL_CONF_BUILDING='TOOL_CONF_BUILDING'
-BUILD_OK='BUILD_OK'
-TOOL_CONF_OK='TOOL_CONF_OK'
-TOOL_CONF_ERROR = 'TOOL_CONF_ERROR'
-BUILD_ERROR='BUILD_ERROR'
-UPLOADING='UPLOADING'
-UPLOAD_OK='UPLOAD_OK'
-UPLOAD_ERROR='UPLOAD_ERROR'
-CLEANUP_OK='CLEANUP_OK'
-CLEANUP_ERROR='CLEANUP_ERROR'
-TESTS_OK='TESTS_OK'
-RELEASE_NOTES_OK='RELEASE_NOTES_OK'
-RELEASE_NOTES_ERROR='RELEASE_NOTES_ERROR'
-INSTALLATION_OK='INSTALLATION_OK'
-INSTALLATION_SKIP='INSTALLATION_SKIP'
-INSTALLATION_ERROR='INSTALLATION_ERROR'
+GH_CMSSW_ORGANIZATION = "cms-sw"
+GH_CMSSW_REPO = "cmssw"
+POST_BUILDING = "BUILDING"
+POST_TOOL_CONF_BUILDING = "TOOL_CONF_BUILDING"
+BUILD_OK = "BUILD_OK"
+TOOL_CONF_OK = "TOOL_CONF_OK"
+TOOL_CONF_ERROR = "TOOL_CONF_ERROR"
+BUILD_ERROR = "BUILD_ERROR"
+UPLOADING = "UPLOADING"
+UPLOAD_OK = "UPLOAD_OK"
+UPLOAD_ERROR = "UPLOAD_ERROR"
+CLEANUP_OK = "CLEANUP_OK"
+CLEANUP_ERROR = "CLEANUP_ERROR"
+TESTS_OK = "TESTS_OK"
+RELEASE_NOTES_OK = "RELEASE_NOTES_OK"
+RELEASE_NOTES_ERROR = "RELEASE_NOTES_ERROR"
+INSTALLATION_OK = "INSTALLATION_OK"
+INSTALLATION_SKIP = "INSTALLATION_SKIP"
+INSTALLATION_ERROR = "INSTALLATION_ERROR"
# this means that there was an error in the script that excecutes the tests,
# it is independent from the tests results
-TESTS_ERROR='TESTS_ERROR'
-BUILDING_MSG='The build has started for {architecture} in {machine}. \n' \
- 'You can see the progress here: https://cmssdt.cern.ch/%s/job/build-release/{jk_build_number}/console \n' \
- '{details}' % JENKINS_PREFIX
-BUILDING_TOOL_CONF_MSG='The cmssw-tool-conf build has started for {architecture} in {machine}. \n' \
- 'You can see the progress here: https://cmssdt.cern.ch/%s/job/build-release/{jk_build_number}/console \n' \
- '{details}' % JENKINS_PREFIX
-BUILD_OK_MSG='The build has finished sucessfully for the architecture {architecture} and is ready to be uploaded. \n' \
- 'You can start the uploads by writing the comment: "upload all". I will upload all the architectures as soon as the build finishes successfully.\n' \
- 'You can see the log for the build here: \n' \
- '{log_url} \n' \
- 'Some tests ( runTheMatrix.py -s ) are being run, the results will be posted when done.'
-TOOL_CONF_OK_MSG='The cmssw-tool-conf build has finished sucessfully for the architecture {architecture} and it was automatically uploaded. \n' \
- 'Remember that if you write "+1" I will start to build this and all the architectures as soon as their cmssw-tool-conf finish.\n' \
- 'You can see the log for the build here: \n' \
- '{log_url} \n'
-TOOL_CONF_ERROR_MSG='There was an error building cmssw-tool-conf for {architecture} \n' \
- 'You can see the log for the build here: \n' \
- '{log_url} \n'
-UPLOADING_MSG='The upload has started for {architecture} in {machine}. \n' \
- 'You can see the progress here: https://cmssdt.cern.ch/%s/job/upload-release/{jk_build_number}/console' % JENKINS_PREFIX
-UPLOAD_OK_MSG='The upload has successfully finished for {architecture} \n You can see the log here: \n {log_url}'
-INSTALLATION_OK_MSG='The installation has successfully finished for {architecture} \n You can see the log here: \n {log_url} \n' \
- 'To generate the release notes for the release write "release-notes since \\", in the first line of your comment.\n ' \
- 'I will generate the release notes based on the release that you provide. You don\'t need to provide the architecture ' \
- 'I will use the production architecture to infer the cmsdist tag.\n' \
- 'Alternatively, you can just write "release-notes", I will try to guess the previous release.'
-INSTALLATION_SKIP_MSG='CERN AFS installation skipped for {architecture} as no CMSSW releases are now deployed on AFS. \n' \
- 'To generate the release notes for the release write "release-notes since \\", in the first line of your comment.\n ' \
- 'I will generate the release notes based on the release that you provide. You don\'t need to provide the architecture ' \
- 'I will use the production architecture to infer the cmsdist tag.\n' \
- 'Alternatively, you can just write "release-notes", I will try to guess the previous release.'
-UPLOAD_ERROR_MSG='The was error uploading {architecture}. \n You can see the log here: \n {log_url}'
-INSTALLATION_ERROR_MSG='The was error installing {architecture}. \n You can see the log here: \n {log_url}'
-CLEANUP_OK_MSG='The workspace for {architecture} has been deleted \n You can see the log here: \n {log_url} \n'
-CLEANUP_ERROR_MSG='There was an error deletng the workspace for {architecture} \n You can see the log here: \n {log_url} \n'
-TESTS_OK_MSG='The tests have finished for {architecture} \n You can see the log here: \n {log_url} \n'
-TESTS_ERROR_MSG='There was an error when running the tests for {architecture} \n You can see the log here: \n {log_url} \n'
-BUILD_ERROR_MSG='The was an error for {architecture}. \n You can see the log here: \n {log_url}'
-RELEASE_NOTES_OK_MSG='The release notes are ready: https://github.com/cms-sw/cmssw/releases/tag/{rel_name}'
-RELEASE_NOTES_ERROR_MSG='There was an error generating the release notes, please look into the logs'
-BUILD_QUEUED_LABEL = 'build-release-queued'
-BUILD_STARTED = 'build-release-started'
-BASE_BUILD_LOG_URL = 'https://cmssdt.cern.ch/SDT/'+JENKINS_PREFIX+'-artifacts/auto-build-release/%s-%s/%d'
-BASE_UPLOAD_LOG_URL = 'https://cmssdt.cern.ch/SDT/'+JENKINS_PREFIX+'-artifacts/auto-upload-release/%s-%s/%d'
-BASE_CLEANUP_LOG_URL = 'https://cmssdt.cern.ch/SDT/'+JENKINS_PREFIX+'-artifacts/cleanup-auto-build/%s-%s/%d'
-BASE_INSTALLATION_URL = 'https://cmssdt.cern.ch/SDT/%s-artifacts/deploy-release-afs/{rel_name}/{architecture}/{job_id}/' % JENKINS_PREFIX
+TESTS_ERROR = "TESTS_ERROR"
+BUILDING_MSG = (
+ "The build has started for {architecture} in {machine}. \n"
+ "You can see the progress here: https://cmssdt.cern.ch/%s/job/build-release/{jk_build_number}/console \n"
+ "{details}" % JENKINS_PREFIX
+)
+BUILDING_TOOL_CONF_MSG = (
+ "The cmssw-tool-conf build has started for {architecture} in {machine}. \n"
+ "You can see the progress here: https://cmssdt.cern.ch/%s/job/build-release/{jk_build_number}/console \n"
+ "{details}" % JENKINS_PREFIX
+)
+BUILD_OK_MSG = (
+ "The build has finished sucessfully for the architecture {architecture} and is ready to be uploaded. \n"
+ 'You can start the uploads by writing the comment: "upload all". I will upload all the architectures as soon as the build finishes successfully.\n'
+ "You can see the log for the build here: \n"
+ "{log_url} \n"
+ "Some tests ( runTheMatrix.py -s ) are being run, the results will be posted when done."
+)
+TOOL_CONF_OK_MSG = (
+ "The cmssw-tool-conf build has finished sucessfully for the architecture {architecture} and it was automatically uploaded. \n"
+ 'Remember that if you write "+1" I will start to build this and all the architectures as soon as their cmssw-tool-conf finish.\n'
+ "You can see the log for the build here: \n"
+ "{log_url} \n"
+)
+TOOL_CONF_ERROR_MSG = (
+ "There was an error building cmssw-tool-conf for {architecture} \n"
+ "You can see the log for the build here: \n"
+ "{log_url} \n"
+)
+UPLOADING_MSG = (
+ "The upload has started for {architecture} in {machine}. \n"
+ "You can see the progress here: https://cmssdt.cern.ch/%s/job/upload-release/{jk_build_number}/console"
+ % JENKINS_PREFIX
+)
+UPLOAD_OK_MSG = "The upload has successfully finished for {architecture} \n You can see the log here: \n {log_url}"
+INSTALLATION_OK_MSG = (
+ "The installation has successfully finished for {architecture} \n You can see the log here: \n {log_url} \n"
+ 'To generate the release notes for the release write "release-notes since \\", in the first line of your comment.\n '
+ "I will generate the release notes based on the release that you provide. You don't need to provide the architecture "
+ "I will use the production architecture to infer the cmsdist tag.\n"
+ 'Alternatively, you can just write "release-notes", I will try to guess the previous release.'
+)
+INSTALLATION_SKIP_MSG = (
+ "CERN AFS installation skipped for {architecture} as no CMSSW releases are now deployed on AFS. \n"
+ 'To generate the release notes for the release write "release-notes since \\", in the first line of your comment.\n '
+ "I will generate the release notes based on the release that you provide. You don't need to provide the architecture "
+ "I will use the production architecture to infer the cmsdist tag.\n"
+ 'Alternatively, you can just write "release-notes", I will try to guess the previous release.'
+)
+UPLOAD_ERROR_MSG = (
+ "The was error uploading {architecture}. \n You can see the log here: \n {log_url}"
+)
+INSTALLATION_ERROR_MSG = (
+ "The was error installing {architecture}. \n You can see the log here: \n {log_url}"
+)
+CLEANUP_OK_MSG = "The workspace for {architecture} has been deleted \n You can see the log here: \n {log_url} \n"
+CLEANUP_ERROR_MSG = "There was an error deletng the workspace for {architecture} \n You can see the log here: \n {log_url} \n"
+TESTS_OK_MSG = (
+ "The tests have finished for {architecture} \n You can see the log here: \n {log_url} \n"
+)
+TESTS_ERROR_MSG = "There was an error when running the tests for {architecture} \n You can see the log here: \n {log_url} \n"
+BUILD_ERROR_MSG = "The was an error for {architecture}. \n You can see the log here: \n {log_url}"
+RELEASE_NOTES_OK_MSG = (
+ "The release notes are ready: https://github.com/cms-sw/cmssw/releases/tag/{rel_name}"
+)
+RELEASE_NOTES_ERROR_MSG = (
+ "There was an error generating the release notes, please look into the logs"
+)
+BUILD_QUEUED_LABEL = "build-release-queued"
+BUILD_STARTED = "build-release-started"
+BASE_BUILD_LOG_URL = (
+ "https://cmssdt.cern.ch/SDT/" + JENKINS_PREFIX + "-artifacts/auto-build-release/%s-%s/%d"
+)
+BASE_UPLOAD_LOG_URL = (
+ "https://cmssdt.cern.ch/SDT/" + JENKINS_PREFIX + "-artifacts/auto-upload-release/%s-%s/%d"
+)
+BASE_CLEANUP_LOG_URL = (
+ "https://cmssdt.cern.ch/SDT/" + JENKINS_PREFIX + "-artifacts/cleanup-auto-build/%s-%s/%d"
+)
+BASE_INSTALLATION_URL = (
+ "https://cmssdt.cern.ch/SDT/%s-artifacts/deploy-release-afs/{rel_name}/{architecture}/{job_id}/"
+ % JENKINS_PREFIX
+)
# -------------------------------------------------------------------------------
# Functions
# --------------------------------------------------------------------------------
+
#
# posts a message to the issue in github
# if dry-run is selected it doesn't post the message and just prints it
#
-def post_message(repo, issue, msg ):
- if opts.dryRun:
- print('Not posting message (dry-run):\n %s' % msg)
- else:
- print('Posting message:\n %s' % msg)
- create_issue_comment(repo, issue, msg)
-
+def post_message(repo, issue, msg):
+ if opts.dryRun:
+ print("Not posting message (dry-run):\n %s" % msg)
+ else:
+ print("Posting message:\n %s" % msg)
+ create_issue_comment(repo, issue, msg)
# Adds a label to the issue in github
# if dry-run is selected it doesn't add the label and just prints it
-def add_label(repo, issue, label ):
- if opts.dryRun:
- print('Not adding label (dry-run):\n %s' % label)
- return
- print('Adding label:\n %s' % label)
- add_issue_labels(repo, issue, [label] )
+def add_label(repo, issue, label):
+ if opts.dryRun:
+ print("Not adding label (dry-run):\n %s" % label)
+ return
+ print("Adding label:\n %s" % label)
+ add_issue_labels(repo, issue, [label])
+
# Removes a label form the issue
-def remove_label( repo, issue, label ):
- if opts.dryRun:
- print('Not removing label (dry-run):\n %s' % label)
- return
-
- reM = re.compile ("^%s$" % label)
- for l in ALL_LABELS:
- if not reM.match(l): continue
- print('Removing label: %s' % l)
- try:
- remove_issue_label(repo, issue, l)
- except Exception as e:
- pass
+def remove_label(repo, issue, label):
+ if opts.dryRun:
+ print("Not removing label (dry-run):\n %s" % label)
+ return
+
+ reM = re.compile("^%s$" % label)
+ for l in ALL_LABELS:
+ if not reM.match(l):
+ continue
+ print("Removing label: %s" % l)
+ try:
+ remove_issue_label(repo, issue, l)
+ except Exception as e:
+ pass
+
#
# removes the labels of the issue
#
-def remove_labels( repo, issue ):
- if opts.dryRun:
- print('Not removing issue labels (dry-run)')
- return
- remove_issue_labels_all(repo, issue)
+def remove_labels(repo, issue):
+ if opts.dryRun:
+ print("Not removing issue labels (dry-run)")
+ return
+ remove_issue_labels_all(repo, issue)
#
# Get tests log output
#
def get_test_log(logfile):
- from os import getenv
- from os.path import join,exists
- logmsg=''
- try:
- logfile = join(getenv('WORKSPACE'),logfile)
+ from os import getenv
+ from os.path import join, exists
+
+ logmsg = ""
try:
- logmsg = '\n\nTests results:\n'+getoutput("grep 'ERROR\| tests passed' "+logfile)
+ logfile = join(getenv("WORKSPACE"), logfile)
+ try:
+ logmsg = "\n\nTests results:\n" + getoutput("grep 'ERROR\| tests passed' " + logfile)
+ except:
+ logmsg = "\n\nUnable to read tests log: No such file " + logfile
except:
- logmsg = '\n\nUnable to read tests log: No such file '+logfile
- except:
- logmsg = '\n\nUnable to read tests log: WORKSPACE variable not set.'
- return logmsg
-
+ logmsg = "\n\nUnable to read tests log: WORKSPACE variable not set."
+ return logmsg
+
+
# Start of execution
# --------------------------------------------------------------------------------
if __name__ == "__main__":
- parser = OptionParser( usage="%prog [ options ] \n "
- "message-type = BUILDING | BUILD_OK | BUILD_ERROR | UPLOADING | UPLOAD_OK | UPLOAD_ERROR | CLEANUP_OK | CLEANUP_ERROR | TESTS_OK | TESTS_ERROR "
- "| RELEASE_NOTES_OK | RELEASE_NOTES_ERROR | INSTALLATION_OK | INSTALLATION_ERROR | INSTALLATION_SKIP")
- parser.add_option( "-n" , "--dry-run" , dest="dryRun" , action="store_true", help="Do not post on Github", default=False )
- parser.add_option( "-d" , "--details" , dest="details" , action="store", help="Add aditional details to the message", default=False )
-
- opts, args = parser.parse_args( )
- if len( args ) != 6:
- parser.error( "Not enough arguments" )
-
- jenkins_build_number = int( args[ 0 ] )
- hostname = args[ 1 ]
- issue = int( args[ 2 ] )
- arch = args[ 3 ]
- release_name = args[ 4 ]
- action = args[ 5 ]
-
- repo = GH_CMSSW_ORGANIZATION + '/' + GH_CMSSW_REPO
- ALL_LABELS = [ l["name"] for l in get_issue_labels(repo, issue) ]
- test_logfile = "build/"+release_name+"-tests/matrixTests/runall-report-step123-.log"
-
- if action == POST_BUILDING:
- msg_details = ''
- if opts.details:
- msg_details = opts.details
- msg = BUILDING_MSG.format( architecture=arch, machine=hostname, jk_build_number=jenkins_build_number, details=msg_details )
- post_message( repo, issue , msg )
- remove_label( repo, issue, arch+'-.*' )
- new_label = arch+'-building'
- add_label( repo, issue, new_label )
-
- elif action == POST_TOOL_CONF_BUILDING:
-
- msg_details = ''
- if opts.details:
- msg_details = opts.details
- msg = BUILDING_TOOL_CONF_MSG.format( architecture=arch, machine=hostname, jk_build_number=jenkins_build_number, details=msg_details )
- post_message( repo, issue , msg )
- remove_label( repo, issue, arch+'-.*' )
- new_label = arch+'-tool-conf-building'
- add_label( repo, issue, new_label )
-
- elif action == BUILD_OK:
-
- results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number)
- msg = BUILD_OK_MSG.format( architecture=arch, log_url=results_url )
- post_message( repo, issue , msg )
- remove_label( repo, issue, arch+'-.*' )
- add_label( repo, issue, arch+'-build-ok' )
-
- elif action == TOOL_CONF_OK:
-
- results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number)
- msg = TOOL_CONF_OK_MSG.format( architecture=arch, log_url=results_url )
- post_message( repo, issue , msg )
- remove_label( repo, issue, arch+'-.*' )
- add_label( repo, issue, arch+'-tool-conf-ok' )
-
- elif action == BUILD_ERROR:
-
- results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number)
- msg = BUILD_ERROR_MSG.format( architecture=arch, log_url=results_url )
- post_message( repo, issue , msg )
- remove_label( repo, issue, arch+'-.*' )
- add_label( repo, issue, arch+'-build-error' )
-
- elif action == TOOL_CONF_ERROR:
-
- results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number)
- msg = TOOL_CONF_ERROR_MSG.format( architecture=arch, log_url=results_url )
- post_message( repo, issue , msg )
- remove_label( repo, issue, arch+'-.*' )
- add_label( repo, issue, arch+'-tool-conf-error' )
-
- elif action == UPLOADING:
-
- msg = UPLOADING_MSG.format( architecture=arch, machine=hostname, jk_build_number=jenkins_build_number)
- post_message( repo, issue , msg )
-
- elif action == UPLOAD_OK:
-
- results_url = BASE_UPLOAD_LOG_URL % (release_name,arch,jenkins_build_number)
- msg = UPLOAD_OK_MSG.format( architecture=arch , log_url=results_url )
- post_message( repo, issue , msg )
- remove_label( repo, issue, arch+'-.*' )
- add_label( repo, issue, arch+'-upload-ok' )
-
- elif action == UPLOAD_ERROR:
-
- results_url = BASE_UPLOAD_LOG_URL % (release_name,arch,jenkins_build_number)
- msg = UPLOAD_ERROR_MSG.format( architecture=arch , log_url=results_url )
- post_message( repo, issue , msg )
- remove_label( repo, issue, arch+'-.*' )
- add_label( repo, issue, arch+'-upload-error' )
-
- elif action == CLEANUP_OK:
-
- results_url = BASE_CLEANUP_LOG_URL % (release_name,arch,jenkins_build_number)
- msg = CLEANUP_OK_MSG.format( architecture=arch , log_url=results_url )
- post_message( repo, issue , msg )
-
- elif action == CLEANUP_ERROR:
-
- results_url = BASE_CLEANUP_LOG_URL % (release_name,arch,jenkins_build_number)
- msg = CLEANUP_ERROR_MSG.format( architecture=arch , log_url=results_url )
- post_message( repo, issue , msg )
-
- elif action == TESTS_OK:
-
- results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number)
- msg = TESTS_OK_MSG.format( architecture=arch, log_url=results_url )
- post_message( repo, issue , msg + get_test_log(test_logfile))
-
- elif action == TESTS_ERROR:
-
- results_url = BASE_BUILD_LOG_URL % (release_name,arch,jenkins_build_number)
- msg = TESTS_ERROR_MSG.format( architecture=arch, log_url=results_url )
- post_message( repo, issue , msg + get_test_log(test_logfile))
-
- elif action == RELEASE_NOTES_OK:
-
- msg = RELEASE_NOTES_OK_MSG.format( rel_name=release_name )
- post_message( repo, issue, msg)
-
- elif action == RELEASE_NOTES_ERROR:
-
- msg = RELEASE_NOTES_ERROR_MSG.format( rel_name=release_name )
- post_message( repo, issue, msg)
-
- elif action in [ INSTALLATION_OK, INSTALLATION_SKIP ]:
-
- results_url = BASE_INSTALLATION_URL.format( rel_name=release_name,
- architecture=arch,
- job_id=jenkins_build_number )
- #msg = INSTALLATION_OK_MSG.format( architecture=arch , log_url=results_url )
- #if action == INSTALLATION_SKIP:
- # msg = INSTALLATION_SKIP_MSG.format( architecture=arch , log_url=results_url )
- #post_message( repo, issue, msg )
- remove_label( repo, issue, arch+'-.*' )
- add_label( repo, issue, arch+'-installation-ok' )
-
- elif action == INSTALLATION_ERROR:
-
- results_url = BASE_INSTALLATION_URL.format( rel_name=release_name,
- architecture=arch,
- job_id=jenkins_build_number )
- msg = INSTALLATION_ERROR_MSG.format( architecture=arch , log_url=results_url )
- post_message( repo, issue, msg )
- remove_label( repo, issue, arch+'-.*' )
- add_label( repo, issue, arch+'-installation-error' )
-
- else:
- parser.error( "Message type not recognized" )
+ parser = OptionParser(
+ usage="%prog [ options ] \n "
+ "message-type = BUILDING | BUILD_OK | BUILD_ERROR | UPLOADING | UPLOAD_OK | UPLOAD_ERROR | CLEANUP_OK | CLEANUP_ERROR | TESTS_OK | TESTS_ERROR "
+ "| RELEASE_NOTES_OK | RELEASE_NOTES_ERROR | INSTALLATION_OK | INSTALLATION_ERROR | INSTALLATION_SKIP"
+ )
+ parser.add_option(
+ "-n",
+ "--dry-run",
+ dest="dryRun",
+ action="store_true",
+ help="Do not post on Github",
+ default=False,
+ )
+ parser.add_option(
+ "-d",
+ "--details",
+ dest="details",
+ action="store",
+ help="Add aditional details to the message",
+ default=False,
+ )
+
+ opts, args = parser.parse_args()
+ if len(args) != 6:
+ parser.error("Not enough arguments")
+
+ jenkins_build_number = int(args[0])
+ hostname = args[1]
+ issue = int(args[2])
+ arch = args[3]
+ release_name = args[4]
+ action = args[5]
+
+ repo = GH_CMSSW_ORGANIZATION + "/" + GH_CMSSW_REPO
+ ALL_LABELS = [l["name"] for l in get_issue_labels(repo, issue)]
+ test_logfile = "build/" + release_name + "-tests/matrixTests/runall-report-step123-.log"
+
+ if action == POST_BUILDING:
+ msg_details = ""
+ if opts.details:
+ msg_details = opts.details
+ msg = BUILDING_MSG.format(
+ architecture=arch,
+ machine=hostname,
+ jk_build_number=jenkins_build_number,
+ details=msg_details,
+ )
+ post_message(repo, issue, msg)
+ remove_label(repo, issue, arch + "-.*")
+ new_label = arch + "-building"
+ add_label(repo, issue, new_label)
+
+ elif action == POST_TOOL_CONF_BUILDING:
+ msg_details = ""
+ if opts.details:
+ msg_details = opts.details
+ msg = BUILDING_TOOL_CONF_MSG.format(
+ architecture=arch,
+ machine=hostname,
+ jk_build_number=jenkins_build_number,
+ details=msg_details,
+ )
+ post_message(repo, issue, msg)
+ remove_label(repo, issue, arch + "-.*")
+ new_label = arch + "-tool-conf-building"
+ add_label(repo, issue, new_label)
+
+ elif action == BUILD_OK:
+ results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number)
+ msg = BUILD_OK_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg)
+ remove_label(repo, issue, arch + "-.*")
+ add_label(repo, issue, arch + "-build-ok")
+
+ elif action == TOOL_CONF_OK:
+ results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number)
+ msg = TOOL_CONF_OK_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg)
+ remove_label(repo, issue, arch + "-.*")
+ add_label(repo, issue, arch + "-tool-conf-ok")
+
+ elif action == BUILD_ERROR:
+ results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number)
+ msg = BUILD_ERROR_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg)
+ remove_label(repo, issue, arch + "-.*")
+ add_label(repo, issue, arch + "-build-error")
+
+ elif action == TOOL_CONF_ERROR:
+ results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number)
+ msg = TOOL_CONF_ERROR_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg)
+ remove_label(repo, issue, arch + "-.*")
+ add_label(repo, issue, arch + "-tool-conf-error")
+
+ elif action == UPLOADING:
+ msg = UPLOADING_MSG.format(
+ architecture=arch, machine=hostname, jk_build_number=jenkins_build_number
+ )
+ post_message(repo, issue, msg)
+
+ elif action == UPLOAD_OK:
+ results_url = BASE_UPLOAD_LOG_URL % (release_name, arch, jenkins_build_number)
+ msg = UPLOAD_OK_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg)
+ remove_label(repo, issue, arch + "-.*")
+ add_label(repo, issue, arch + "-upload-ok")
+
+ elif action == UPLOAD_ERROR:
+ results_url = BASE_UPLOAD_LOG_URL % (release_name, arch, jenkins_build_number)
+ msg = UPLOAD_ERROR_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg)
+ remove_label(repo, issue, arch + "-.*")
+ add_label(repo, issue, arch + "-upload-error")
+
+ elif action == CLEANUP_OK:
+ results_url = BASE_CLEANUP_LOG_URL % (release_name, arch, jenkins_build_number)
+ msg = CLEANUP_OK_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg)
+
+ elif action == CLEANUP_ERROR:
+ results_url = BASE_CLEANUP_LOG_URL % (release_name, arch, jenkins_build_number)
+ msg = CLEANUP_ERROR_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg)
+
+ elif action == TESTS_OK:
+ results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number)
+ msg = TESTS_OK_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg + get_test_log(test_logfile))
+
+ elif action == TESTS_ERROR:
+ results_url = BASE_BUILD_LOG_URL % (release_name, arch, jenkins_build_number)
+ msg = TESTS_ERROR_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg + get_test_log(test_logfile))
+
+ elif action == RELEASE_NOTES_OK:
+ msg = RELEASE_NOTES_OK_MSG.format(rel_name=release_name)
+ post_message(repo, issue, msg)
+
+ elif action == RELEASE_NOTES_ERROR:
+ msg = RELEASE_NOTES_ERROR_MSG.format(rel_name=release_name)
+ post_message(repo, issue, msg)
+
+ elif action in [INSTALLATION_OK, INSTALLATION_SKIP]:
+ results_url = BASE_INSTALLATION_URL.format(
+ rel_name=release_name, architecture=arch, job_id=jenkins_build_number
+ )
+ # msg = INSTALLATION_OK_MSG.format( architecture=arch , log_url=results_url )
+ # if action == INSTALLATION_SKIP:
+ # msg = INSTALLATION_SKIP_MSG.format( architecture=arch , log_url=results_url )
+ # post_message( repo, issue, msg )
+ remove_label(repo, issue, arch + "-.*")
+ add_label(repo, issue, arch + "-installation-ok")
+
+ elif action == INSTALLATION_ERROR:
+ results_url = BASE_INSTALLATION_URL.format(
+ rel_name=release_name, architecture=arch, job_id=jenkins_build_number
+ )
+ msg = INSTALLATION_ERROR_MSG.format(architecture=arch, log_url=results_url)
+ post_message(repo, issue, msg)
+ remove_label(repo, issue, arch + "-.*")
+ add_label(repo, issue, arch + "-installation-error")
+
+ else:
+ parser.error("Message type not recognized")
diff --git a/report-pull-request-results.py b/report-pull-request-results.py
index 265bf9abeadd..0d977d3c7b0b 100755
--- a/report-pull-request-results.py
+++ b/report-pull-request-results.py
@@ -16,332 +16,430 @@
import os, sys
from socket import setdefaulttimeout
from github_utils import api_rate_limits
+
setdefaulttimeout(120)
SCRIPT_DIR = dirname(abspath(sys.argv[0]))
-#-----------------------------------------------------------------------------------
-#---- Parser Options
-#-----------------------------------------------------------------------------------
-parser = OptionParser(usage="usage: %prog ACTION [options] \n ACTION = PARSE_UNIT_TESTS_FAIL | PARSE_BUILD_FAIL "
- "| PARSE_MATRIX_FAIL | COMPARISON_READY | GET_BASE_MESSAGE | PARSE_EXTERNAL_BUILD_FAIL "
- "| PARSE_ADDON_FAIL | PARSE_CRAB_FAIL | PARSE_CLANG_BUILD_FAIL | MATERIAL_BUDGET "
- "| PYTHON3_FAIL | PARSE_GPU_UNIT_TESTS_FAIL | MERGE_COMMITS")
-
-parser.add_option("-f", "--unit-tests-file", action="store", type="string", dest="unit_tests_file", help="results file to analyse", default='None')
-parser.add_option("--f2", action="store", type="string", dest="results_file2", help="second results file to analyse" )
-parser.add_option("--missing_map", action="store", type="string", dest="missing_map", help="Missing workflow map file", default='None' )
-parser.add_option("--recent-merges", action="store", type="string", dest="recent_merges_file", help="file with the recent merges after doing the git cms-merge-topic")
-parser.add_option("--no-post", action="store_true", dest="no_post_mesage", help="I will only show the message I would post, but I will not post it in github")
-parser.add_option("--repo", action="store", dest="custom_repo", help="Tells me to use a custom repository from the user cms-sw", default="cms-sw/cmssw" )
-parser.add_option("--report-file", action="store", type="string", dest="report_file", help="Report the github comment in report file instead of github", default='')
-parser.add_option("--report-url", action="store", type="string", dest="report_url", help="URL where pr results are stored.", default='')
-parser.add_option("--commit", action="store", type="string", dest="commit", help="Pull request latest commit", default='')
+# -----------------------------------------------------------------------------------
+# ---- Parser Options
+# -----------------------------------------------------------------------------------
+parser = OptionParser(
+ usage="usage: %prog ACTION [options] \n ACTION = PARSE_UNIT_TESTS_FAIL | PARSE_BUILD_FAIL "
+ "| PARSE_MATRIX_FAIL | COMPARISON_READY | GET_BASE_MESSAGE | PARSE_EXTERNAL_BUILD_FAIL "
+ "| PARSE_ADDON_FAIL | PARSE_CRAB_FAIL | PARSE_CLANG_BUILD_FAIL | MATERIAL_BUDGET "
+ "| PYTHON3_FAIL | PARSE_GPU_UNIT_TESTS_FAIL | MERGE_COMMITS"
+)
+
+parser.add_option(
+ "-f",
+ "--unit-tests-file",
+ action="store",
+ type="string",
+ dest="unit_tests_file",
+ help="results file to analyse",
+ default="None",
+)
+parser.add_option(
+ "--f2",
+ action="store",
+ type="string",
+ dest="results_file2",
+ help="second results file to analyse",
+)
+parser.add_option(
+ "--missing_map",
+ action="store",
+ type="string",
+ dest="missing_map",
+ help="Missing workflow map file",
+ default="None",
+)
+parser.add_option(
+ "--recent-merges",
+ action="store",
+ type="string",
+ dest="recent_merges_file",
+ help="file with the recent merges after doing the git cms-merge-topic",
+)
+parser.add_option(
+ "--no-post",
+ action="store_true",
+ dest="no_post_mesage",
+ help="I will only show the message I would post, but I will not post it in github",
+)
+parser.add_option(
+ "--repo",
+ action="store",
+ dest="custom_repo",
+ help="Tells me to use a custom repository from the user cms-sw",
+ default="cms-sw/cmssw",
+)
+parser.add_option(
+ "--report-file",
+ action="store",
+ type="string",
+ dest="report_file",
+ help="Report the github comment in report file instead of github",
+ default="",
+)
+parser.add_option(
+ "--report-url",
+ action="store",
+ type="string",
+ dest="report_url",
+ help="URL where pr results are stored.",
+ default="",
+)
+parser.add_option(
+ "--commit",
+ action="store",
+ type="string",
+ dest="commit",
+ help="Pull request latest commit",
+ default="",
+)
(options, args) = parser.parse_args()
-def openlog(log, mode='r'):
- return open(log, mode=mode, encoding='utf-8', errors='ignore')
+
+def openlog(log, mode="r"):
+ return open(log, mode=mode, encoding="utf-8", errors="ignore")
+
def writelog(ref, line):
- ref.write(line.encode('ascii', 'ignore').decode('utf-8') if sys.version_info[0] < 3 else line)
+ ref.write(line.encode("ascii", "ignore").decode("utf-8") if sys.version_info[0] < 3 else line)
+
+
#
# Reads the log file for a step in a workflow and identifies the error if it starts with 'Begin Fatal Exception'
#
def get_wf_error_msg(out_file, filename=True):
- if out_file.endswith(MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND):
- return ''
- error_lines = ''
- if exists( out_file ):
- reading = False
- for line in openlog( out_file):
- if reading:
- error_lines += line
- if '----- End Fatal Exception' in line:
- reading = False
- break
- elif '----- Begin Fatal Exception' in line:
- error_lines += '\n'+ line
- reading = True
- if not error_lines and filename:
- error_lines = "/".join(out_file.split("/")[-2:])+'\n'
- return error_lines
+ if out_file.endswith(MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND):
+ return ""
+ error_lines = ""
+ if exists(out_file):
+ reading = False
+ for line in openlog(out_file):
+ if reading:
+ error_lines += line
+ if "----- End Fatal Exception" in line:
+ reading = False
+ break
+ elif "----- Begin Fatal Exception" in line:
+ error_lines += "\n" + line
+ reading = True
+ if not error_lines and filename:
+ error_lines = "/".join(out_file.split("/")[-2:]) + "\n"
+ return error_lines
+
#
# Reads a line that starts with 'ERROR executing', the line has ben splitted by ' '
# it gets the directory where the results for the workflow are, the step that failed
# and the log file
#
-def parse_workflow_info( parts, relval_dir ):
- workflow_info = {}
- # this is the output file to which the output of command for the step was directed
- # it starts asumed as not found
- out_file = MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND
- workflow_info[ 'step' ] = MATRIX_WORKFLOW_STEP_NA
- out_directory = "UNKNOWN"
- for i in range( 0 , len( parts ) ):
- current_part = parts[ i ]
- if ( current_part == 'cd' ):
- out_directory = parts[ i+1 ]
- out_directory = re.sub( ';' , '', out_directory)
- number = re.sub( '_.*$' , '' , out_directory )
- workflow_info[ 'out_directory' ] = out_directory
- workflow_info[ 'number' ] = number
- if ( current_part == '>' ):
- out_file = parts[ i+1 ]
- step = re.sub( '_.*log' , '' , out_file)
- workflow_info[ 'out_file'] = out_file
- workflow_info[ 'step' ] = step
-
- workflow_info['message'] = get_wf_error_msg(join(relval_dir, out_directory, out_file))
- return workflow_info
-
+def parse_workflow_info(parts, relval_dir):
+ workflow_info = {}
+ # this is the output file to which the output of command for the step was directed
+ # it starts asumed as not found
+ out_file = MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND
+ workflow_info["step"] = MATRIX_WORKFLOW_STEP_NA
+ out_directory = "UNKNOWN"
+ for i in range(0, len(parts)):
+ current_part = parts[i]
+ if current_part == "cd":
+ out_directory = parts[i + 1]
+ out_directory = re.sub(";", "", out_directory)
+ number = re.sub("_.*$", "", out_directory)
+ workflow_info["out_directory"] = out_directory
+ workflow_info["number"] = number
+ if current_part == ">":
+ out_file = parts[i + 1]
+ step = re.sub("_.*log", "", out_file)
+ workflow_info["out_file"] = out_file
+ workflow_info["step"] = step
+
+ workflow_info["message"] = get_wf_error_msg(join(relval_dir, out_directory, out_file))
+ return workflow_info
+
+
#
# Reads the log file for the matrix tests. It identifyes which workflows failed
# and then proceeds to read the corresponding log file to identify the message
#
def read_matrix_log_file(matrix_log):
- workflows_with_error = [ ]
- relval_dir = join(dirname (matrix_log), "runTheMatrix-results")
- common_errors = []
- for line in openlog( matrix_log):
- line = line.strip()
- if 'ERROR executing' in line:
- print('processing: %s' % line)
- parts = re.sub("\s+"," ",line).split(" ")
- workflow_info = parse_workflow_info( parts, relval_dir)
- if 'number' in workflow_info:
- workflows_with_error.append( workflow_info )
- elif ' Step0-DAS_ERROR ' in line:
- print('processing: %s' % line)
- parts = line.split("_",2)
- workflow_info = {}
- workflow_info[ 'step' ] = "step1"
- workflow_info[ 'number' ] = parts [0]
- workflow_info[ 'message' ] = "DAS Error"
- workflows_with_error.append( workflow_info )
- elif 'ValueError: Undefined' in line:
- common_errors.append(line+"\n")
-
- # check if it was timeout
- message = "\n## RelVals\n\n"
- if 'ERROR TIMEOUT' in line:
- message += 'The relvals timed out after 4 hours.\n'
- if common_errors:
- message += ''.join(common_errors)
- cnt = 0
- max_show = 3
- extra_msg = False
- for wf in workflows_with_error:
- wnum = wf['number']
- cnt += 1
- if 'out_directory' in wf:
- wnum = "[%s](%s/runTheMatrix-results/%s)" % (wnum, options.report_url, wf['out_directory'])
- if cnt<=max_show:
- msg = wf['message'].strip()
- if len(msg.split('\n'))>1:
- message += '- ' + wnum + '\n```\n' + msg + '\n```\n'
- else:
- message += '- ' + wnum + '```' + msg + '```\n'
- else:
- if not extra_msg:
- extra_msg = True
- message += '\nExpand to see more relval errors ...\n\n'
- message += '- ' + wnum + '\n'
- if extra_msg: message += '\n\n'
- send_message_pr(message)
+ workflows_with_error = []
+ relval_dir = join(dirname(matrix_log), "runTheMatrix-results")
+ common_errors = []
+ for line in openlog(matrix_log):
+ line = line.strip()
+ if "ERROR executing" in line:
+ print("processing: %s" % line)
+ parts = re.sub("\s+", " ", line).split(" ")
+ workflow_info = parse_workflow_info(parts, relval_dir)
+ if "number" in workflow_info:
+ workflows_with_error.append(workflow_info)
+ elif " Step0-DAS_ERROR " in line:
+ print("processing: %s" % line)
+ parts = line.split("_", 2)
+ workflow_info = {}
+ workflow_info["step"] = "step1"
+ workflow_info["number"] = parts[0]
+ workflow_info["message"] = "DAS Error"
+ workflows_with_error.append(workflow_info)
+ elif "ValueError: Undefined" in line:
+ common_errors.append(line + "\n")
+
+ # check if it was timeout
+ message = "\n## RelVals\n\n"
+ if "ERROR TIMEOUT" in line:
+ message += "The relvals timed out after 4 hours.\n"
+ if common_errors:
+ message += "".join(common_errors)
+ cnt = 0
+ max_show = 3
+ extra_msg = False
+ for wf in workflows_with_error:
+ wnum = wf["number"]
+ cnt += 1
+ if "out_directory" in wf:
+ wnum = "[%s](%s/runTheMatrix-results/%s)" % (
+ wnum,
+ options.report_url,
+ wf["out_directory"],
+ )
+ if cnt <= max_show:
+ msg = wf["message"].strip()
+ if len(msg.split("\n")) > 1:
+ message += "- " + wnum + "\n```\n" + msg + "\n```\n"
+ else:
+ message += "- " + wnum + "```" + msg + "```\n"
+ else:
+ if not extra_msg:
+ extra_msg = True
+ message += "\nExpand to see more relval errors ...\n\n"
+ message += "- " + wnum + "\n"
+ if extra_msg:
+ message += "\n\n"
+ send_message_pr(message)
+
#
# reads the addon tests log file and gets the tests that failed
#
def cmd_to_addon_test(command, addon_dir):
- try:
- cmdMatch = re.match("^\[(.+):(\d+)\] +(.*)", command)
- addon_subdir = cmdMatch.group(1)
- logfile = 'step%s.log' % cmdMatch.group(2)
- e, o = run_cmd('ls -d %s/%s/%s 2>/dev/null | tail -1' % (addon_dir, addon_subdir, logfile))
- except:
- commandbase = command.replace(' ','_').replace('/','_')
- logfile='%s.log' % commandbase[:150].replace("'",'').replace('"','').replace('../','')
- e, o = run_cmd("ls -d %s/*/%s 2>/dev/null | tail -1" % (addon_dir, logfile))
- if (not e) and o:
- return (o.split("/")[-2], get_wf_error_msg(o, False).strip())
- print("ERROR: %s -> %s" % (command, o))
- return ("", "")
+ try:
+ cmdMatch = re.match("^\[(.+):(\d+)\] +(.*)", command)
+ addon_subdir = cmdMatch.group(1)
+ logfile = "step%s.log" % cmdMatch.group(2)
+ e, o = run_cmd("ls -d %s/%s/%s 2>/dev/null | tail -1" % (addon_dir, addon_subdir, logfile))
+ except:
+ commandbase = command.replace(" ", "_").replace("/", "_")
+ logfile = "%s.log" % commandbase[:150].replace("'", "").replace('"', "").replace("../", "")
+ e, o = run_cmd("ls -d %s/*/%s 2>/dev/null | tail -1" % (addon_dir, logfile))
+ if (not e) and o:
+ return (o.split("/")[-2], get_wf_error_msg(o, False).strip())
+ print("ERROR: %s -> %s" % (command, o))
+ return ("", "")
+
def read_addon_log_file(unit_tests_file):
- message='\n## AddOn Tests\n\n'
- addon_dir = join(dirname(unit_tests_file), "addOnTests")
- cnt = 0
- max_show = 3
- extra_msg = False
- for line in openlog(unit_tests_file):
- line = line.strip()
- if( ': FAILED -' in line):
- cnt += 1
- tname, err = cmd_to_addon_test(line.split(': FAILED -')[0].strip(), addon_dir)
- if not tname: tname = "unknown"
- else: tname = "[%s](%s/addOnTests/%s)" % (tname, options.report_url, tname)
- if cnt <= max_show:
- if err: line = err
- message += "- "+ tname + '\n```\n' + line + '\n```\n'
- else:
- if not extra_msg:
- extra_msg = True
- message += '\nExpand to see more addon errors ...\n\n'
- message += '- ' + tname + '\n'
- if extra_msg: message += '\n\n'
- send_message_pr(message)
+ message = "\n## AddOn Tests\n\n"
+ addon_dir = join(dirname(unit_tests_file), "addOnTests")
+ cnt = 0
+ max_show = 3
+ extra_msg = False
+ for line in openlog(unit_tests_file):
+ line = line.strip()
+ if ": FAILED -" in line:
+ cnt += 1
+ tname, err = cmd_to_addon_test(line.split(": FAILED -")[0].strip(), addon_dir)
+ if not tname:
+ tname = "unknown"
+ else:
+ tname = "[%s](%s/addOnTests/%s)" % (tname, options.report_url, tname)
+ if cnt <= max_show:
+ if err:
+ line = err
+ message += "- " + tname + "\n```\n" + line + "\n```\n"
+ else:
+ if not extra_msg:
+ extra_msg = True
+ message += (
+ "\nExpand to see more addon errors ...\n\n"
+ )
+ message += "- " + tname + "\n"
+ if extra_msg:
+ message += "\n\n"
+ send_message_pr(message)
+
#
# reads material budget logs
#
def read_material_budget_log_file(unit_tests_file):
- message = '\n## Material Budget\n\nThere was error running material budget tests.'
- send_message_pr(message)
+ message = "\n## Material Budget\n\nThere was error running material budget tests."
+ send_message_pr(message)
+
def get_recent_merges_message():
- message = ""
- if options.recent_merges_file:
- extra_msg = []
- json_obj = json.load(openlog(options.recent_merges_file))
- for r in json_obj:
- for pr in json_obj[r]: extra_msg.append(" - @%s %s#%s" % (json_obj[r][pr]['author'], r, pr))
+ message = ""
+ if options.recent_merges_file:
+ extra_msg = []
+ json_obj = json.load(openlog(options.recent_merges_file))
+ for r in json_obj:
+ for pr in json_obj[r]:
+ extra_msg.append(" - @%s %s#%s" % (json_obj[r][pr]["author"], r, pr))
- if extra_msg:
- message += '\n\nThe following merge commits were also included on top of IB + this PR '\
- 'after doing git cms-merge-topic: \n'
+ if extra_msg:
+ message += (
+ "\n\nThe following merge commits were also included on top of IB + this PR "
+ "after doing git cms-merge-topic: \n"
+ )
+
+ for l in extra_msg:
+ message += l + "\n"
- for l in extra_msg: message += l + '\n'
+ message += "\nYou can see more details here:\n"
+ message += GITLOG_FILE_BASE_URL + "\n"
+ message += GIT_CMS_MERGE_TOPIC_BASE_URL + "\n"
+ return message
- message += '\nYou can see more details here:\n'
- message += GITLOG_FILE_BASE_URL +'\n'
- message += GIT_CMS_MERGE_TOPIC_BASE_URL + '\n'
- return message
def get_pr_tests_info():
- message = ""
- if options.commit:
- message = "\n**COMMIT**: %s" % options.commit
- message += "\n**CMSSW**: "
- if 'CMSSW_VERSION' in os.environ:
- message += os.environ['CMSSW_VERSION']
- else:
- message += "UNKNOWN"
- if 'SCRAM_ARCH' in os.environ:
- message += '/' + os.environ['SCRAM_ARCH']
- else:
- message += '/UNKNOWN'
- if ('ENABLE_BOT_TESTS' in os.environ) and os.environ['ENABLE_BOT_TESTS']:
- message += "\n**Additional Tests**: %s" % os.environ['ENABLE_BOT_TESTS']
- return message
+ message = ""
+ if options.commit:
+ message = "\n**COMMIT**: %s" % options.commit
+ message += "\n**CMSSW**: "
+ if "CMSSW_VERSION" in os.environ:
+ message += os.environ["CMSSW_VERSION"]
+ else:
+ message += "UNKNOWN"
+ if "SCRAM_ARCH" in os.environ:
+ message += "/" + os.environ["SCRAM_ARCH"]
+ else:
+ message += "/UNKNOWN"
+ if ("ENABLE_BOT_TESTS" in os.environ) and os.environ["ENABLE_BOT_TESTS"]:
+ message += "\n**Additional Tests**: %s" % os.environ["ENABLE_BOT_TESTS"]
+ return message
#
# reads the build log file looking for the first error
# it includes 5 lines before and 5 lines after the error
#
-def read_build_log_file(build_log, isClang=False , toolconf=False):
- line_number = 0
- error_line = 0
- lines_to_keep_before=5
- lines_to_keep_after=5
- lines_since_error=0
- lines_before = ['']
- lines_after = ['']
- error_found = False
- for line in openlog(build_log):
- line_number += 1
- if (not error_found):
- lines_before.append(line)
- if (line_number > lines_to_keep_before):
- lines_before.pop(0)
- #this is how it determines that a line has an error
- if ('error: ' in line) or line.startswith("gmake: "):
- error_found = True
- error_line = line_number
+def read_build_log_file(build_log, isClang=False, toolconf=False):
+ line_number = 0
+ error_line = 0
+ lines_to_keep_before = 5
+ lines_to_keep_after = 5
+ lines_since_error = 0
+ lines_before = [""]
+ lines_after = [""]
+ error_found = False
+ for line in openlog(build_log):
+ line_number += 1
+ if not error_found:
+ lines_before.append(line)
+ if line_number > lines_to_keep_before:
+ lines_before.pop(0)
+ # this is how it determines that a line has an error
+ if ("error: " in line) or line.startswith("gmake: "):
+ error_found = True
+ error_line = line_number
+ if error_found:
+ if lines_since_error == 0:
+ lines_since_error += 1
+ continue
+ elif lines_since_error <= lines_to_keep_after:
+ lines_since_error += 1
+ lines_after.append(line)
+ else:
+ break
+
+ message = ""
+ err_type = "compilation warning"
if error_found:
- if (lines_since_error == 0):
- lines_since_error += 1
- continue
- elif (lines_since_error <= lines_to_keep_after):
- lines_since_error += 1
- lines_after.append(line)
- else:
- break
-
- message = ""
- err_type = "compilation warning"
- if error_found: err_type = "compilation error"
- if isClang:
- cmd = openlog( build_log).readline()
- message += '\n## Clang Build\n\nI found '+err_type+' while trying to compile with clang. '
- message += 'Command used:\n```\n' + cmd +'\n```\n'
- elif toolconf:
- message += '\n## External Build\n\nI found '+err_type+' when building: '
- else:
- message += '\n## Build\n\nI found '+err_type+' when building: '
-
- if error_found:
- message += '\n\n
'
- for line in lines_before:
- message += line + '\f'
- for line in lines_after:
- message += line + '\f'
- message += '
'
- else:
- message += " See details on the summary page."
-
- send_message_pr(message)
+ err_type = "compilation error"
+ if isClang:
+ cmd = openlog(build_log).readline()
+ message += (
+ "\n## Clang Build\n\nI found " + err_type + " while trying to compile with clang. "
+ )
+ message += "Command used:\n```\n" + cmd + "\n```\n"
+ elif toolconf:
+ message += "\n## External Build\n\nI found " + err_type + " when building: "
+ else:
+ message += "\n## Build\n\nI found " + err_type + " when building: "
+
+ if error_found:
+ message += "\n\n
"
+ for line in lines_before:
+ message += line + "\f"
+ for line in lines_after:
+ message += line + "\f"
+ message += "
"
+ else:
+ message += " See details on the summary page."
+
+ send_message_pr(message)
+
#
# reads the unit tests file and gets the tests that failed
#
def read_unit_tests_file(unit_tests_file):
- errors_found=''
- err_cnt = 0
- for line in openlog(unit_tests_file):
- if( 'had ERRORS' in line):
- err_cnt += 1
- if err_cnt == 4:
- errors_found += "and more ...\n"
- if err_cnt > 3:
- continue
- errors_found += line
-
-
- message = '\n## Unit Tests\n\nI found %s errors in the following unit tests:\n\n
%s
' % (err_cnt, errors_found)
- send_message_pr(message)
+ errors_found = ""
+ err_cnt = 0
+ for line in openlog(unit_tests_file):
+ if "had ERRORS" in line:
+ err_cnt += 1
+ if err_cnt == 4:
+ errors_found += "and more ...\n"
+ if err_cnt > 3:
+ continue
+ errors_found += line
+
+ message = (
+ "\n## Unit Tests\n\nI found %s errors in the following unit tests:\n\n
%s
"
+ % (err_cnt, errors_found)
+ )
+ send_message_pr(message)
def read_gpu_tests_file(unit_tests_file):
- errors_found=''
- err_cnt = 0
- for line in openlog(unit_tests_file):
- if( 'had ERRORS' in line):
- err_cnt += 1
- if err_cnt == 4:
- errors_found += "and more ...\n"
- if err_cnt > 3:
- continue
- errors_found += line
- message = '\n## GPU Unit Tests\n\nI found %s errors in the following unit tests:\n\n
%s
' % (err_cnt, errors_found)
- send_message_pr(message)
+ errors_found = ""
+ err_cnt = 0
+ for line in openlog(unit_tests_file):
+ if "had ERRORS" in line:
+ err_cnt += 1
+ if err_cnt == 4:
+ errors_found += "and more ...\n"
+ if err_cnt > 3:
+ continue
+ errors_found += line
+ message = (
+ "\n## GPU Unit Tests\n\nI found %s errors in the following unit tests:\n\n
%s
"
+ % (err_cnt, errors_found)
+ )
+ send_message_pr(message)
+
#
# reads the python3 file and gets the tests that failed
#
def read_python3_file(python3_file):
- errors_found=''
- err_cnt = 0
- for line in openlog(python3_file):
- if( ' Error compiling ' in line):
- err_cnt += 1
- if err_cnt == 4:
- errors_found += "and more ...\n"
- if err_cnt > 3:
- continue
- errors_found += line
- message = '\n#Python3\n\nI found %s errors: \n\n
%s
' % (err_cnt, errors_found)
- send_message_pr(message)
+ errors_found = ""
+ err_cnt = 0
+ for line in openlog(python3_file):
+ if " Error compiling " in line:
+ err_cnt += 1
+ if err_cnt == 4:
+ errors_found += "and more ...\n"
+ if err_cnt > 3:
+ continue
+ errors_found += line
+ message = "\n#Python3\n\nI found %s errors: \n\n
%s
" % (err_cnt, errors_found)
+ send_message_pr(message)
#
@@ -350,153 +448,184 @@ def read_python3_file(python3_file):
# and if it is it doesn't post it again
#
def send_message_pr(message):
- if options.no_post_mesage:
- print('Not posting message (dry-run): \n ', message)
+ if options.no_post_mesage:
+ print("Not posting message (dry-run): \n ", message)
+ return
+ with openlog(options.report_file, "a") as rfile:
+ writelog(rfile, message + "\n")
return
- with openlog(options.report_file, "a") as rfile:
- writelog(rfile, message+"\n")
- return
#
# sends an approval message for a pr in cmssw
#
def add_to_report(message):
- if not message: return
- with openlog(options.report_file, "a") as rfile:
- writelog(rfile, message+"\n")
- return
+ if not message:
+ return
+ with openlog(options.report_file, "a") as rfile:
+ writelog(rfile, message + "\n")
+ return
+
def get_base_message():
- add_to_report(get_pr_tests_info())
- return
-
-def send_comparison_ready_message(comparison_errors_file, wfs_with_das_inconsistency_file, missing_map ):
- message = '\n## Comparison Summary\n\n'
- wfs_with_errors = ''
- for line in openlog( comparison_errors_file ):
- line = line.rstrip()
- parts = line.split( ';' )
- wf = parts[ 0 ]
- step = parts[ 1 ]
- wfs_with_errors += ( wf + ' step ' + step + '\n' )
-
- if wfs_with_errors != '':
- error_info = COMPARISON_INCOMPLETE_MSG.format( workflows=wfs_with_errors )
- message += '\n\n' + error_info
-
- wfs_das_inconsistency = openlog( wfs_with_das_inconsistency_file).readline().rstrip().rstrip(',').split( ',' )
-
- if '' in wfs_das_inconsistency:
- wfs_das_inconsistency.remove( '' )
-
- if wfs_das_inconsistency:
- das_inconsistency_info = DAS_INCONSISTENCY_MSG.format( workflows=', '.join( wfs_das_inconsistency ) )
- message += '\n\n' + das_inconsistency_info
-
- if missing_map and exists (missing_map):
- missing = []
- for line in openlog(missing_map):
- line = line.strip()
- if line: missing.append(" * "+line)
- if missing:
- from categories import COMPARISON_MISSING_MAP
- map_notify = ", ".join([ "@"+u for u in COMPARISON_MISSING_MAP] )
- message += "\n\n"+map_notify+" comparisons for the following workflows were not done due to missing matrix map:\n"+"\n".join(missing)
-
- alt_comp_dir = join(dirname(comparison_errors_file), "upload","alternative-comparisons")
- print("Alt comparison directory: ",alt_comp_dir)
- if exists(alt_comp_dir):
- err, out = run_cmd("grep ' Compilation failed' %s/runDQMComp-*.log" % alt_comp_dir)
- print(out)
- if not err:
- err_wfs = {}
- for line in out.split("\n"):
- wf = line.split(".log:",1)[0].split("runDQMComp-")[-1]
- err_wfs [wf]=1
- if err_wfs: message += "\n\nAlternative comparison was/were failed for workflow(s):\n"+"\n".join(list(err_wfs.keys()))
-
- JRCompSummaryLog = join(dirname(comparison_errors_file), "upload/validateJR/qaResultsSummary.log")
- print("JR comparison Summary: ",JRCompSummaryLog)
- if exists(JRCompSummaryLog):
- err, out = run_cmd("cat %s" % JRCompSummaryLog)
- if (not err) and out:
- message += "\n\n**Summary**:\n"
- for l in out.split("\n"):
- if l.strip(): message += " - %s\n" % l.strip()
-
- send_message_pr(message )
+ add_to_report(get_pr_tests_info())
+ return
+
+
+def send_comparison_ready_message(
+ comparison_errors_file, wfs_with_das_inconsistency_file, missing_map
+):
+ message = "\n## Comparison Summary\n\n"
+ wfs_with_errors = ""
+ for line in openlog(comparison_errors_file):
+ line = line.rstrip()
+ parts = line.split(";")
+ wf = parts[0]
+ step = parts[1]
+ wfs_with_errors += wf + " step " + step + "\n"
+
+ if wfs_with_errors != "":
+ error_info = COMPARISON_INCOMPLETE_MSG.format(workflows=wfs_with_errors)
+ message += "\n\n" + error_info
+
+ wfs_das_inconsistency = (
+ openlog(wfs_with_das_inconsistency_file).readline().rstrip().rstrip(",").split(",")
+ )
+
+ if "" in wfs_das_inconsistency:
+ wfs_das_inconsistency.remove("")
+
+ if wfs_das_inconsistency:
+ das_inconsistency_info = DAS_INCONSISTENCY_MSG.format(
+ workflows=", ".join(wfs_das_inconsistency)
+ )
+ message += "\n\n" + das_inconsistency_info
+
+ if missing_map and exists(missing_map):
+ missing = []
+ for line in openlog(missing_map):
+ line = line.strip()
+ if line:
+ missing.append(" * " + line)
+ if missing:
+ from categories import COMPARISON_MISSING_MAP
+
+ map_notify = ", ".join(["@" + u for u in COMPARISON_MISSING_MAP])
+ message += (
+ "\n\n"
+ + map_notify
+ + " comparisons for the following workflows were not done due to missing matrix map:\n"
+ + "\n".join(missing)
+ )
+
+ alt_comp_dir = join(dirname(comparison_errors_file), "upload", "alternative-comparisons")
+ print("Alt comparison directory: ", alt_comp_dir)
+ if exists(alt_comp_dir):
+ err, out = run_cmd("grep ' Compilation failed' %s/runDQMComp-*.log" % alt_comp_dir)
+ print(out)
+ if not err:
+ err_wfs = {}
+ for line in out.split("\n"):
+ wf = line.split(".log:", 1)[0].split("runDQMComp-")[-1]
+ err_wfs[wf] = 1
+ if err_wfs:
+ message += (
+ "\n\nAlternative comparison was/were failed for workflow(s):\n"
+ + "\n".join(list(err_wfs.keys()))
+ )
+
+ JRCompSummaryLog = join(
+ dirname(comparison_errors_file), "upload/validateJR/qaResultsSummary.log"
+ )
+ print("JR comparison Summary: ", JRCompSummaryLog)
+ if exists(JRCompSummaryLog):
+ err, out = run_cmd("cat %s" % JRCompSummaryLog)
+ if (not err) and out:
+ message += "\n\n**Summary**:\n"
+ for l in out.split("\n"):
+ if l.strip():
+ message += " - %s\n" % l.strip()
+
+ send_message_pr(message)
+
def complain_missing_param(param_name):
- print('\n')
- print('I need a %s to continue' % param_name)
- print('\n')
- parser.print_help()
- exit()
-
-#----------------------------------------------------------------------------------------
-#---- Global variables
-#---------------------------------------------------------------------------------------
-
-COMPARISON_INCOMPLETE_MSG = 'There are some workflows for which there are errors in the baseline:\n {workflows} ' \
- 'The results for the comparisons for these workflows could be incomplete \n' \
- 'This means most likely that the IB is having errors in the relvals.'\
- 'The error does NOT come from this pull request'
-DAS_INCONSISTENCY_MSG = 'The workflows {workflows} have different files in step1_dasquery.log than the ones ' \
- 'found in the baseline. You may want to check and retrigger the tests if necessary. ' \
- 'You can check it in the "files" directory in the results of the comparisons'
-
-MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND = 'Not Found'
-MATRIX_WORKFLOW_STEP_NA = 'N/A'
-
-#----------------------------------------------------------------------------------------
-#---- Check arguments and options
-#---------------------------------------------------------------------------------------
-
-if (len(args)==0):
- print('you have to choose an action')
- parser.print_help()
- exit()
+ print("\n")
+ print("I need a %s to continue" % param_name)
+ print("\n")
+ parser.print_help()
+ exit()
+
+
+# ----------------------------------------------------------------------------------------
+# ---- Global variables
+# ---------------------------------------------------------------------------------------
+
+COMPARISON_INCOMPLETE_MSG = (
+ "There are some workflows for which there are errors in the baseline:\n {workflows} "
+ "The results for the comparisons for these workflows could be incomplete \n"
+ "This means most likely that the IB is having errors in the relvals."
+ "The error does NOT come from this pull request"
+)
+DAS_INCONSISTENCY_MSG = (
+ "The workflows {workflows} have different files in step1_dasquery.log than the ones "
+ "found in the baseline. You may want to check and retrigger the tests if necessary. "
+ 'You can check it in the "files" directory in the results of the comparisons'
+)
+
+MATRIX_WORKFLOW_STEP_LOG_FILE_NOT_FOUND = "Not Found"
+MATRIX_WORKFLOW_STEP_NA = "N/A"
+
+# ----------------------------------------------------------------------------------------
+# ---- Check arguments and options
+# ---------------------------------------------------------------------------------------
+
+if len(args) == 0:
+ print("you have to choose an action")
+ parser.print_help()
+ exit()
ACTION = args[0]
-if (ACTION == 'prBot.py'):
- print('you have to choose an action')
- parser.print_help()
- exit()
-
-print('you chose the action %s' % ACTION)
-
-if (options.report_url=='') or (options.report_file==''):
- complain_missing_param( 'report url/report file' )
- exit()
-
-GITLOG_FILE_BASE_URL='%s/git-recent-commits.json' % options.report_url
-GIT_CMS_MERGE_TOPIC_BASE_URL='%s/git-merge-result' % options.report_url
-
-if ( ACTION == 'GET_BASE_MESSAGE' ):
- get_base_message()
-elif ( ACTION == 'PARSE_UNIT_TESTS_FAIL' ):
- read_unit_tests_file(options.unit_tests_file)
-elif ( ACTION == 'PARSE_EXTERNAL_BUILD_FAIL' ):
- read_build_log_file(options.unit_tests_file, toolconf=True )
-elif ( ACTION == 'PARSE_BUILD_FAIL' ):
- read_build_log_file(options.unit_tests_file)
-elif ( ACTION == 'PARSE_MATRIX_FAIL' ):
- read_matrix_log_file(options.unit_tests_file )
-elif ( ACTION == 'PARSE_ADDON_FAIL' ):
- read_addon_log_file(options.unit_tests_file )
-elif ( ACTION == 'COMPARISON_READY' ):
- send_comparison_ready_message(options.unit_tests_file, options.results_file2, options.missing_map )
-elif( ACTION == 'PARSE_CLANG_BUILD_FAIL'):
- read_build_log_file(options.unit_tests_file, isClang=True )
-elif( ACTION == 'PYTHON3_FAIL'):
- read_python3_file(options.unit_tests_file )
-elif( ACTION == 'MATERIAL_BUDGET'):
- read_material_budget_log_file(options.unit_tests_file)
-elif ( ACTION == 'MERGE_COMMITS'):
- add_to_report(get_recent_merges_message())
-elif ( ACTION == 'PARSE_GPU_UNIT_TESTS_FAIL'):
- read_gpu_tests_file(options.unit_tests_file)
+if ACTION == "prBot.py":
+ print("you have to choose an action")
+ parser.print_help()
+ exit()
+
+print("you chose the action %s" % ACTION)
+
+if (options.report_url == "") or (options.report_file == ""):
+ complain_missing_param("report url/report file")
+ exit()
+
+GITLOG_FILE_BASE_URL = "%s/git-recent-commits.json" % options.report_url
+GIT_CMS_MERGE_TOPIC_BASE_URL = "%s/git-merge-result" % options.report_url
+
+if ACTION == "GET_BASE_MESSAGE":
+ get_base_message()
+elif ACTION == "PARSE_UNIT_TESTS_FAIL":
+ read_unit_tests_file(options.unit_tests_file)
+elif ACTION == "PARSE_EXTERNAL_BUILD_FAIL":
+ read_build_log_file(options.unit_tests_file, toolconf=True)
+elif ACTION == "PARSE_BUILD_FAIL":
+ read_build_log_file(options.unit_tests_file)
+elif ACTION == "PARSE_MATRIX_FAIL":
+ read_matrix_log_file(options.unit_tests_file)
+elif ACTION == "PARSE_ADDON_FAIL":
+ read_addon_log_file(options.unit_tests_file)
+elif ACTION == "COMPARISON_READY":
+ send_comparison_ready_message(
+ options.unit_tests_file, options.results_file2, options.missing_map
+ )
+elif ACTION == "PARSE_CLANG_BUILD_FAIL":
+ read_build_log_file(options.unit_tests_file, isClang=True)
+elif ACTION == "PYTHON3_FAIL":
+ read_python3_file(options.unit_tests_file)
+elif ACTION == "MATERIAL_BUDGET":
+ read_material_budget_log_file(options.unit_tests_file)
+elif ACTION == "MERGE_COMMITS":
+ add_to_report(get_recent_merges_message())
+elif ACTION == "PARSE_GPU_UNIT_TESTS_FAIL":
+ read_gpu_tests_file(options.unit_tests_file)
else:
- print("I don't recognize that action!")
+ print("I don't recognize that action!")
diff --git a/report_size.py b/report_size.py
index 43ad3b879b85..6242bf8d6f68 100755
--- a/report_size.py
+++ b/report_size.py
@@ -1,33 +1,37 @@
#!/usr/bin/env python
from __future__ import print_function
import sys
-#run this command for once to create the data file or directly pipe its output to this script
-#for releases
-#find /afs/cern.ch/cms/slc[5-7]* -maxdepth 3 -type d -print -exec fs lq {} \; | grep -v 'Volume Name' | sed 'N;s/\n/ /' | uniq -c -f2
-#for ibs
-#find /afs/cern.ch/cms/sw/ReleaseCandidates/ -maxdepth 3 -type d -print -exec fs lq {} \; |grep -v '^Volume' | sed 'N;s/\n/ /' | uniq -c -f3
+
+# run this command for once to create the data file or directly pipe its output to this script
+# for releases
+# find /afs/cern.ch/cms/slc[5-7]* -maxdepth 3 -type d -print -exec fs lq {} \; | grep -v 'Volume Name' | sed 'N;s/\n/ /' | uniq -c -f2
+# for ibs
+# find /afs/cern.ch/cms/sw/ReleaseCandidates/ -maxdepth 3 -type d -print -exec fs lq {} \; |grep -v '^Volume' | sed 'N;s/\n/ /' | uniq -c -f3
data = {}
-allocated = 0
+allocated = 0
used = 0
volumes = 0
max_volume_len = 0
max_path_len = 0
for line in sys.stdin:
- info = line.strip().split()
- if info[2] in data: continue
- volumes += 1
- allocated = allocated + int(info[3])
- used = used + int(info[4])
- data[info[2]]=info
- if len(info[2])>max_volume_len: max_volume_len=len(info[2])
- if len(info[1])>max_path_len: max_path_len=len(info[1])
+ info = line.strip().split()
+ if info[2] in data:
+ continue
+ volumes += 1
+ allocated = allocated + int(info[3])
+ used = used + int(info[4])
+ data[info[2]] = info
+ if len(info[2]) > max_volume_len:
+ max_volume_len = len(info[2])
+ if len(info[1]) > max_path_len:
+ max_path_len = len(info[1])
max_volume_len = max_volume_len + 4
max_path_len = max_path_len + 4
-print ("Total Volumes :",volumes)
-print ("Allocated Space:",int(allocated/1000000),"GB")
-print ("Used Space :",int(used/1000000),"GB")
+print("Total Volumes :", volumes)
+print("Allocated Space:", int(allocated / 1000000), "GB")
+print("Used Space :", int(used / 1000000), "GB")
for vol in sorted(data):
- msg = "{0:<"+str(max_volume_len)+"}{1:<"+str(max_path_len)+"}"
- print(msg.format(vol, data[vol][1]),data[vol][4]+"/"+data[vol][3])
+ msg = "{0:<" + str(max_volume_len) + "}{1:<" + str(max_path_len) + "}"
+ print(msg.format(vol, data[vol][1]), data[vol][4] + "/" + data[vol][3])
diff --git a/repos/EcalLaserValidation/HLT_EcalLaserValidation/categories.py b/repos/EcalLaserValidation/HLT_EcalLaserValidation/categories.py
index 06b774a2e879..b58d27668b96 100644
--- a/repos/EcalLaserValidation/HLT_EcalLaserValidation/categories.py
+++ b/repos/EcalLaserValidation/HLT_EcalLaserValidation/categories.py
@@ -4,35 +4,35 @@
from repo_config import CMSBUILD_USER, GH_REPO_NAME
CMSSW_L1 = []
-APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1))
+APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1))
REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE
-TRIGGER_PR_TESTS = list(set(['smuzaffar'] + REQUEST_BUILD_RELEASE))
-PR_HOLD_MANAGERS = [ ]
+TRIGGER_PR_TESTS = list(set(["smuzaffar"] + REQUEST_BUILD_RELEASE))
+PR_HOLD_MANAGERS = []
-COMMON_CATEGORIES = [ "tests", "code-checks" ]
-EXTERNAL_CATEGORIES = [ "externals" ]
+COMMON_CATEGORIES = ["tests", "code-checks"]
+EXTERNAL_CATEGORIES = ["externals"]
EXTERNAL_REPOS = []
-CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ]
-CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ]
+CMSSW_REPOS = [gh_user + "/" + gh_cmssw]
+CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist]
CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1))
COMPARISON_MISSING_MAP = []
-#github_user:[list of categories]
+# github_user:[list of categories]
CMSSW_L2 = {
- CMSBUILD_USER : ["tests", "code-checks" ],
- 'degrutto' : ['ecal-laser'],
- 'ndaci' : ['ecal-laser'],
- 'zghiche' : ['ecal-laser'],
- 'gennai' : ['ecal-laser'],
- 'zhenbinwu' : ['ecal-laser'],
- 'wang-hui' : ['ecal-laser'],
- 'abrinke1' : ['ecal-laser'],
- 'mkovac' : ['ecal-laser'],
+ CMSBUILD_USER: ["tests", "code-checks"],
+ "degrutto": ["ecal-laser"],
+ "ndaci": ["ecal-laser"],
+ "zghiche": ["ecal-laser"],
+ "gennai": ["ecal-laser"],
+ "zhenbinwu": ["ecal-laser"],
+ "wang-hui": ["ecal-laser"],
+ "abrinke1": ["ecal-laser"],
+ "mkovac": ["ecal-laser"],
}
-CMSSW_CATEGORIES={
- 'ecal-laser': [GH_REPO_NAME],
+CMSSW_CATEGORIES = {
+ "ecal-laser": [GH_REPO_NAME],
}
USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys()))
diff --git a/repos/EcalLaserValidation/HLT_EcalLaserValidation/releases.py b/repos/EcalLaserValidation/HLT_EcalLaserValidation/releases.py
index 776ad15456f3..6beacaeea30a 100644
--- a/repos/EcalLaserValidation/HLT_EcalLaserValidation/releases.py
+++ b/repos/EcalLaserValidation/HLT_EcalLaserValidation/releases.py
@@ -1,13 +1,14 @@
-#Default development branch
+# Default development branch
# Changes from master branch will be merge in to it
# Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch)
# For new release cycle just change this and make sure to add its milestone and production branches
CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X"
-RELEASE_BRANCH_MILESTONE={}
-RELEASE_BRANCH_CLOSED=[]
-RELEASE_BRANCH_PRODUCTION=[]
-SPECIAL_RELEASE_MANAGERS=[]
-RELEASE_MANAGERS={}
-USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ])
-
+RELEASE_BRANCH_MILESTONE = {}
+RELEASE_BRANCH_CLOSED = []
+RELEASE_BRANCH_PRODUCTION = []
+SPECIAL_RELEASE_MANAGERS = []
+RELEASE_MANAGERS = {}
+USERS_TO_TRIGGER_HOOKS = set(
+ SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel]
+)
diff --git a/repos/EcalLaserValidation/HLT_EcalLaserValidation/repo_config.py b/repos/EcalLaserValidation/HLT_EcalLaserValidation/repo_config.py
index da3c74bc46ca..cfe5cd554235 100644
--- a/repos/EcalLaserValidation/HLT_EcalLaserValidation/repo_config.py
+++ b/repos/EcalLaserValidation/HLT_EcalLaserValidation/repo_config.py
@@ -1,24 +1,28 @@
-from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER
-from os.path import basename,dirname,abspath
-GH_TOKEN="~/.github-token"
-GH_TOKEN_READONLY="~/.github-token-readonly"
-CONFIG_DIR=dirname(abspath(__file__))
-CMSBUILD_USER="cmsbuild"
-GH_REPO_ORGANIZATION="EcalLaserValidation"
-GH_REPO_NAME="HLT_EcalLaserValidation"
-GH_REPO_FULLNAME=GH_REPO_ORGANIZATION+"/"+GH_REPO_NAME
-CREATE_EXTERNAL_ISSUE=False
-JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins"
-GITHUB_WEBHOOK_TOKEN='U2FsdGVkX18OTa0HlxmA6uQ9oimETZqECqGDvkqQsEW/7jod1rl8AF1GnmAu0kGt'
-#GITHUB_WEBHOOK_TOKEN='U2FsdGVkX18uyTkiQtIOYUfVj2PQLV34u5hQAbfNhl8='
-ADD_LABELS=False
-ADD_WEB_HOOK=False
-JENKINS_UPLOAD_DIRECTORY="EcalLaserValidation/HLT_EcalLaserValidation"
-JENKINS_NOTIFICATION_EMAIL=""
-OPEN_ISSUE_FOR_PUSH_TESTS=True
+from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER
+from os.path import basename, dirname, abspath
+
+GH_TOKEN = "~/.github-token"
+GH_TOKEN_READONLY = "~/.github-token-readonly"
+CONFIG_DIR = dirname(abspath(__file__))
+CMSBUILD_USER = "cmsbuild"
+GH_REPO_ORGANIZATION = "EcalLaserValidation"
+GH_REPO_NAME = "HLT_EcalLaserValidation"
+GH_REPO_FULLNAME = GH_REPO_ORGANIZATION + "/" + GH_REPO_NAME
+CREATE_EXTERNAL_ISSUE = False
+JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins"
+GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX18OTa0HlxmA6uQ9oimETZqECqGDvkqQsEW/7jod1rl8AF1GnmAu0kGt"
+# GITHUB_WEBHOOK_TOKEN='U2FsdGVkX18uyTkiQtIOYUfVj2PQLV34u5hQAbfNhl8='
+ADD_LABELS = False
+ADD_WEB_HOOK = False
+JENKINS_UPLOAD_DIRECTORY = "EcalLaserValidation/HLT_EcalLaserValidation"
+JENKINS_NOTIFICATION_EMAIL = ""
+OPEN_ISSUE_FOR_PUSH_TESTS = True
IGNORE_ISSUES = []
-#Valid Web hooks
-VALID_WEB_HOOKS=['push']
-#Set the Jenkins slave label is your tests needs special machines to run.
-JENKINS_SLAVE_LABEL="slc6 && amd64 && cmsbuild"
-def file2Package(filename): return GH_REPO_NAME
+# Valid Web hooks
+VALID_WEB_HOOKS = ["push"]
+# Set the Jenkins slave label is your tests needs special machines to run.
+JENKINS_SLAVE_LABEL = "slc6 && amd64 && cmsbuild"
+
+
+def file2Package(filename):
+ return GH_REPO_NAME
diff --git a/repos/EcalLaserValidation/L1T_EcalLaserValidation/categories.py b/repos/EcalLaserValidation/L1T_EcalLaserValidation/categories.py
index 06b774a2e879..b58d27668b96 100644
--- a/repos/EcalLaserValidation/L1T_EcalLaserValidation/categories.py
+++ b/repos/EcalLaserValidation/L1T_EcalLaserValidation/categories.py
@@ -4,35 +4,35 @@
from repo_config import CMSBUILD_USER, GH_REPO_NAME
CMSSW_L1 = []
-APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1))
+APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1))
REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE
-TRIGGER_PR_TESTS = list(set(['smuzaffar'] + REQUEST_BUILD_RELEASE))
-PR_HOLD_MANAGERS = [ ]
+TRIGGER_PR_TESTS = list(set(["smuzaffar"] + REQUEST_BUILD_RELEASE))
+PR_HOLD_MANAGERS = []
-COMMON_CATEGORIES = [ "tests", "code-checks" ]
-EXTERNAL_CATEGORIES = [ "externals" ]
+COMMON_CATEGORIES = ["tests", "code-checks"]
+EXTERNAL_CATEGORIES = ["externals"]
EXTERNAL_REPOS = []
-CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ]
-CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ]
+CMSSW_REPOS = [gh_user + "/" + gh_cmssw]
+CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist]
CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1))
COMPARISON_MISSING_MAP = []
-#github_user:[list of categories]
+# github_user:[list of categories]
CMSSW_L2 = {
- CMSBUILD_USER : ["tests", "code-checks" ],
- 'degrutto' : ['ecal-laser'],
- 'ndaci' : ['ecal-laser'],
- 'zghiche' : ['ecal-laser'],
- 'gennai' : ['ecal-laser'],
- 'zhenbinwu' : ['ecal-laser'],
- 'wang-hui' : ['ecal-laser'],
- 'abrinke1' : ['ecal-laser'],
- 'mkovac' : ['ecal-laser'],
+ CMSBUILD_USER: ["tests", "code-checks"],
+ "degrutto": ["ecal-laser"],
+ "ndaci": ["ecal-laser"],
+ "zghiche": ["ecal-laser"],
+ "gennai": ["ecal-laser"],
+ "zhenbinwu": ["ecal-laser"],
+ "wang-hui": ["ecal-laser"],
+ "abrinke1": ["ecal-laser"],
+ "mkovac": ["ecal-laser"],
}
-CMSSW_CATEGORIES={
- 'ecal-laser': [GH_REPO_NAME],
+CMSSW_CATEGORIES = {
+ "ecal-laser": [GH_REPO_NAME],
}
USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys()))
diff --git a/repos/EcalLaserValidation/L1T_EcalLaserValidation/releases.py b/repos/EcalLaserValidation/L1T_EcalLaserValidation/releases.py
index 776ad15456f3..6beacaeea30a 100644
--- a/repos/EcalLaserValidation/L1T_EcalLaserValidation/releases.py
+++ b/repos/EcalLaserValidation/L1T_EcalLaserValidation/releases.py
@@ -1,13 +1,14 @@
-#Default development branch
+# Default development branch
# Changes from master branch will be merge in to it
# Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch)
# For new release cycle just change this and make sure to add its milestone and production branches
CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X"
-RELEASE_BRANCH_MILESTONE={}
-RELEASE_BRANCH_CLOSED=[]
-RELEASE_BRANCH_PRODUCTION=[]
-SPECIAL_RELEASE_MANAGERS=[]
-RELEASE_MANAGERS={}
-USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ])
-
+RELEASE_BRANCH_MILESTONE = {}
+RELEASE_BRANCH_CLOSED = []
+RELEASE_BRANCH_PRODUCTION = []
+SPECIAL_RELEASE_MANAGERS = []
+RELEASE_MANAGERS = {}
+USERS_TO_TRIGGER_HOOKS = set(
+ SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel]
+)
diff --git a/repos/EcalLaserValidation/L1T_EcalLaserValidation/repo_config.py b/repos/EcalLaserValidation/L1T_EcalLaserValidation/repo_config.py
index 0accae686106..76056dd78533 100644
--- a/repos/EcalLaserValidation/L1T_EcalLaserValidation/repo_config.py
+++ b/repos/EcalLaserValidation/L1T_EcalLaserValidation/repo_config.py
@@ -1,23 +1,27 @@
-from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER
-from os.path import basename,dirname,abspath
-GH_TOKEN="~/.github-token"
-GH_TOKEN_READONLY="~/.github-token-readonly"
-CONFIG_DIR=dirname(abspath(__file__))
-CMSBUILD_USER="cmsbuild"
-GH_REPO_ORGANIZATION="EcalLaserValidation"
-GH_REPO_NAME="L1T_EcalLaserValidation"
-GH_REPO_FULLNAME=GH_REPO_ORGANIZATION+"/"+GH_REPO_NAME
-CREATE_EXTERNAL_ISSUE=False
-JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins"
-GITHUB_WEBHOOK_TOKEN='U2FsdGVkX18FTI2p/ZkGhERFC/gPJhXtW+bjAF9xtuWoJIDhv3B+ifsXz3gWm5Xq'
-ADD_LABELS=False
-ADD_WEB_HOOK=False
-JENKINS_UPLOAD_DIRECTORY="EcalLaserValidation/L1T_EcalLaserValidation"
-JENKINS_NOTIFICATION_EMAIL=""
-OPEN_ISSUE_FOR_PUSH_TESTS=True
+from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER
+from os.path import basename, dirname, abspath
+
+GH_TOKEN = "~/.github-token"
+GH_TOKEN_READONLY = "~/.github-token-readonly"
+CONFIG_DIR = dirname(abspath(__file__))
+CMSBUILD_USER = "cmsbuild"
+GH_REPO_ORGANIZATION = "EcalLaserValidation"
+GH_REPO_NAME = "L1T_EcalLaserValidation"
+GH_REPO_FULLNAME = GH_REPO_ORGANIZATION + "/" + GH_REPO_NAME
+CREATE_EXTERNAL_ISSUE = False
+JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins"
+GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX18FTI2p/ZkGhERFC/gPJhXtW+bjAF9xtuWoJIDhv3B+ifsXz3gWm5Xq"
+ADD_LABELS = False
+ADD_WEB_HOOK = False
+JENKINS_UPLOAD_DIRECTORY = "EcalLaserValidation/L1T_EcalLaserValidation"
+JENKINS_NOTIFICATION_EMAIL = ""
+OPEN_ISSUE_FOR_PUSH_TESTS = True
IGNORE_ISSUES = []
-#Valid Web hooks
-VALID_WEB_HOOKS=['push']
-#Set the Jenkins slave label is your tests needs special machines to run.
-JENKINS_SLAVE_LABEL="slc6 && amd64 && cmsbuild"
-def file2Package(filename): return GH_REPO_NAME
+# Valid Web hooks
+VALID_WEB_HOOKS = ["push"]
+# Set the Jenkins slave label is your tests needs special machines to run.
+JENKINS_SLAVE_LABEL = "slc6 && amd64 && cmsbuild"
+
+
+def file2Package(filename):
+ return GH_REPO_NAME
diff --git a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/categories.py b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/categories.py
index ea2cb61f6b09..0f19e7b84c73 100644
--- a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/categories.py
+++ b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/categories.py
@@ -4,28 +4,28 @@
from repo_config import CMSBUILD_USER, GH_REPO_NAME
CMSSW_L1 = []
-APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1))
+APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1))
REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE
-TRIGGER_PR_TESTS = list(set(['smuzaffar',CMSBUILD_USER] + REQUEST_BUILD_RELEASE))
-PR_HOLD_MANAGERS = [ ]
+TRIGGER_PR_TESTS = list(set(["smuzaffar", CMSBUILD_USER] + REQUEST_BUILD_RELEASE))
+PR_HOLD_MANAGERS = []
-COMMON_CATEGORIES = [ "tests", "code-checks" ]
-EXTERNAL_CATEGORIES = [ "externals" ]
+COMMON_CATEGORIES = ["tests", "code-checks"]
+EXTERNAL_CATEGORIES = ["externals"]
EXTERNAL_REPOS = []
-CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ]
-CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ]
+CMSSW_REPOS = [gh_user + "/" + gh_cmssw]
+CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist]
CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1))
COMPARISON_MISSING_MAP = []
-#github_user:[list of categories]
+# github_user:[list of categories]
CMSSW_L2 = {
- CMSBUILD_USER : ["tests", "code-checks" ],
- 'emanueledimarco' : ['ecal-pulse-shape'],
+ CMSBUILD_USER: ["tests", "code-checks"],
+ "emanueledimarco": ["ecal-pulse-shape"],
}
-CMSSW_CATEGORIES={
- 'ecal-pulse-shape': [GH_REPO_NAME],
+CMSSW_CATEGORIES = {
+ "ecal-pulse-shape": [GH_REPO_NAME],
}
USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys()))
diff --git a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/releases.py b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/releases.py
index 5a8b19e90cde..50279e124ba8 100644
--- a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/releases.py
+++ b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/releases.py
@@ -1,13 +1,14 @@
-#Default development branch
+# Default development branch
# Changes from master branch will be merge in to it
# Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch)
# For new release cycle just change this and make sure to add its milestone and production branches
CMSSW_DEVEL_BRANCH = "CMSSW_10_2_X"
-RELEASE_BRANCH_MILESTONE={}
-RELEASE_BRANCH_CLOSED=[]
-RELEASE_BRANCH_PRODUCTION=[]
-SPECIAL_RELEASE_MANAGERS=[]
-RELEASE_MANAGERS={}
-USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ])
-
+RELEASE_BRANCH_MILESTONE = {}
+RELEASE_BRANCH_CLOSED = []
+RELEASE_BRANCH_PRODUCTION = []
+SPECIAL_RELEASE_MANAGERS = []
+RELEASE_MANAGERS = {}
+USERS_TO_TRIGGER_HOOKS = set(
+ SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel]
+)
diff --git a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/repo_config.py b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/repo_config.py
index fe2229299320..d45ddf0dad95 100644
--- a/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/repo_config.py
+++ b/repos/EcalLaserValidation/RECO_EcalPulseShapeValidation/repo_config.py
@@ -1,23 +1,27 @@
-from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER
-from os.path import basename,dirname,abspath
-GH_TOKEN="~/.github-token"
-GH_TOKEN_READONLY="~/.github-token-readonly"
-CONFIG_DIR=dirname(abspath(__file__))
-CMSBUILD_USER="cmsbuild"
-GH_REPO_ORGANIZATION="EcalLaserValidation"
-GH_REPO_NAME="RECO_EcalPulseShapeValidation"
-GH_REPO_FULLNAME=GH_REPO_ORGANIZATION+"/"+GH_REPO_NAME
-CREATE_EXTERNAL_ISSUE=False
-JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins"
-GITHUB_WEBHOOK_TOKEN='U2FsdGVkX1+r+XWzRjZHPgURrshDykGdtONgxUa7XBof1Nh1/BiWgt3IyWXu4t60'
-ADD_LABELS=False
-ADD_WEB_HOOK=False
-JENKINS_UPLOAD_DIRECTORY="EcalLaserValidation/RECO_EcalPulseShapeValidation"
-JENKINS_NOTIFICATION_EMAIL=""
-OPEN_ISSUE_FOR_PUSH_TESTS=True
+from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER
+from os.path import basename, dirname, abspath
+
+GH_TOKEN = "~/.github-token"
+GH_TOKEN_READONLY = "~/.github-token-readonly"
+CONFIG_DIR = dirname(abspath(__file__))
+CMSBUILD_USER = "cmsbuild"
+GH_REPO_ORGANIZATION = "EcalLaserValidation"
+GH_REPO_NAME = "RECO_EcalPulseShapeValidation"
+GH_REPO_FULLNAME = GH_REPO_ORGANIZATION + "/" + GH_REPO_NAME
+CREATE_EXTERNAL_ISSUE = False
+JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins"
+GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX1+r+XWzRjZHPgURrshDykGdtONgxUa7XBof1Nh1/BiWgt3IyWXu4t60"
+ADD_LABELS = False
+ADD_WEB_HOOK = False
+JENKINS_UPLOAD_DIRECTORY = "EcalLaserValidation/RECO_EcalPulseShapeValidation"
+JENKINS_NOTIFICATION_EMAIL = ""
+OPEN_ISSUE_FOR_PUSH_TESTS = True
IGNORE_ISSUES = []
-#Valid Web hooks
-VALID_WEB_HOOKS=['push']
-#Set the Jenkins slave label is your tests needs special machines to run.
-JENKINS_SLAVE_LABEL="slc6 && amd64 && cmsbuild"
-def file2Package(filename): return GH_REPO_NAME
+# Valid Web hooks
+VALID_WEB_HOOKS = ["push"]
+# Set the Jenkins slave label is your tests needs special machines to run.
+JENKINS_SLAVE_LABEL = "slc6 && amd64 && cmsbuild"
+
+
+def file2Package(filename):
+ return GH_REPO_NAME
diff --git a/repos/EcalLaserValidation/TPG_EcalLaserValidation/categories.py b/repos/EcalLaserValidation/TPG_EcalLaserValidation/categories.py
index 02c961df2d53..ce7f155453dc 100644
--- a/repos/EcalLaserValidation/TPG_EcalLaserValidation/categories.py
+++ b/repos/EcalLaserValidation/TPG_EcalLaserValidation/categories.py
@@ -4,36 +4,36 @@
from repo_config import CMSBUILD_USER, GH_REPO_NAME
CMSSW_L1 = []
-APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1))
+APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1))
REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE
-TRIGGER_PR_TESTS = list(set(['smuzaffar'] + REQUEST_BUILD_RELEASE))
-PR_HOLD_MANAGERS = [ ]
+TRIGGER_PR_TESTS = list(set(["smuzaffar"] + REQUEST_BUILD_RELEASE))
+PR_HOLD_MANAGERS = []
-COMMON_CATEGORIES = [ "tests", "code-checks" ]
-EXTERNAL_CATEGORIES = [ "externals" ]
+COMMON_CATEGORIES = ["tests", "code-checks"]
+EXTERNAL_CATEGORIES = ["externals"]
EXTERNAL_REPOS = []
-CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ]
-CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ]
+CMSSW_REPOS = [gh_user + "/" + gh_cmssw]
+CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist]
CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1))
COMPARISON_MISSING_MAP = []
-#github_user:[list of categories]
+# github_user:[list of categories]
CMSSW_L2 = {
- CMSBUILD_USER : ["tests", "code-checks" ],
- 'degrutto' : ['ecal-laser'],
- 'ndaci' : ['ecal-laser'],
- 'zghiche' : ['ecal-laser'],
- 'gennai' : ['ecal-laser'],
- 'zhenbinwu' : ['ecal-laser'],
- 'wang-hui' : ['ecal-laser'],
- 'abrinke1' : ['ecal-laser'],
- 'mkovac' : ['ecal-laser'],
- 'InnaKucher' : ['ecal-laser'],
+ CMSBUILD_USER: ["tests", "code-checks"],
+ "degrutto": ["ecal-laser"],
+ "ndaci": ["ecal-laser"],
+ "zghiche": ["ecal-laser"],
+ "gennai": ["ecal-laser"],
+ "zhenbinwu": ["ecal-laser"],
+ "wang-hui": ["ecal-laser"],
+ "abrinke1": ["ecal-laser"],
+ "mkovac": ["ecal-laser"],
+ "InnaKucher": ["ecal-laser"],
}
-CMSSW_CATEGORIES={
- 'ecal-laser': [GH_REPO_NAME],
+CMSSW_CATEGORIES = {
+ "ecal-laser": [GH_REPO_NAME],
}
USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys()))
diff --git a/repos/EcalLaserValidation/TPG_EcalLaserValidation/releases.py b/repos/EcalLaserValidation/TPG_EcalLaserValidation/releases.py
index 776ad15456f3..6beacaeea30a 100644
--- a/repos/EcalLaserValidation/TPG_EcalLaserValidation/releases.py
+++ b/repos/EcalLaserValidation/TPG_EcalLaserValidation/releases.py
@@ -1,13 +1,14 @@
-#Default development branch
+# Default development branch
# Changes from master branch will be merge in to it
# Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch)
# For new release cycle just change this and make sure to add its milestone and production branches
CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X"
-RELEASE_BRANCH_MILESTONE={}
-RELEASE_BRANCH_CLOSED=[]
-RELEASE_BRANCH_PRODUCTION=[]
-SPECIAL_RELEASE_MANAGERS=[]
-RELEASE_MANAGERS={}
-USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ])
-
+RELEASE_BRANCH_MILESTONE = {}
+RELEASE_BRANCH_CLOSED = []
+RELEASE_BRANCH_PRODUCTION = []
+SPECIAL_RELEASE_MANAGERS = []
+RELEASE_MANAGERS = {}
+USERS_TO_TRIGGER_HOOKS = set(
+ SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel]
+)
diff --git a/repos/EcalLaserValidation/TPG_EcalLaserValidation/repo_config.py b/repos/EcalLaserValidation/TPG_EcalLaserValidation/repo_config.py
index de15a7f7db51..dc9168d6345a 100644
--- a/repos/EcalLaserValidation/TPG_EcalLaserValidation/repo_config.py
+++ b/repos/EcalLaserValidation/TPG_EcalLaserValidation/repo_config.py
@@ -1,23 +1,27 @@
-from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER
-from os.path import basename,dirname,abspath
-GH_TOKEN="~/.github-token"
-GH_TOKEN_READONLY="~/.github-token-readonly"
-CONFIG_DIR=dirname(abspath(__file__))
-CMSBUILD_USER="cmsbuild"
-GH_REPO_ORGANIZATION="EcalLaserValidation"
-GH_REPO_NAME="TPG_EcalLaserValidation"
-GH_REPO_FULLNAME=GH_REPO_ORGANIZATION+"/"+GH_REPO_NAME
-CREATE_EXTERNAL_ISSUE=False
-JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins"
-GITHUB_WEBHOOK_TOKEN='U2FsdGVkX1+GGHoH9PB4G9rRvEuoKejWnx1zWHOV39VGswFB1rX9s7F3HFdaTtcs'
-ADD_LABELS=False
-ADD_WEB_HOOK=False
-JENKINS_UPLOAD_DIRECTORY="EcalLaserValidation/TPG_EcalLaserValidation"
-JENKINS_NOTIFICATION_EMAIL=""
-OPEN_ISSUE_FOR_PUSH_TESTS=True
+from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER
+from os.path import basename, dirname, abspath
+
+GH_TOKEN = "~/.github-token"
+GH_TOKEN_READONLY = "~/.github-token-readonly"
+CONFIG_DIR = dirname(abspath(__file__))
+CMSBUILD_USER = "cmsbuild"
+GH_REPO_ORGANIZATION = "EcalLaserValidation"
+GH_REPO_NAME = "TPG_EcalLaserValidation"
+GH_REPO_FULLNAME = GH_REPO_ORGANIZATION + "/" + GH_REPO_NAME
+CREATE_EXTERNAL_ISSUE = False
+JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins"
+GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX1+GGHoH9PB4G9rRvEuoKejWnx1zWHOV39VGswFB1rX9s7F3HFdaTtcs"
+ADD_LABELS = False
+ADD_WEB_HOOK = False
+JENKINS_UPLOAD_DIRECTORY = "EcalLaserValidation/TPG_EcalLaserValidation"
+JENKINS_NOTIFICATION_EMAIL = ""
+OPEN_ISSUE_FOR_PUSH_TESTS = True
IGNORE_ISSUES = []
-#Valid Web hooks
-VALID_WEB_HOOKS=['push']
-#Set the Jenkins slave label if your tests needs special machines to run.
-JENKINS_SLAVE_LABEL="slc6 && amd64 && cmsbuild"
-def file2Package(filename): return GH_REPO_NAME
+# Valid Web hooks
+VALID_WEB_HOOKS = ["push"]
+# Set the Jenkins slave label if your tests needs special machines to run.
+JENKINS_SLAVE_LABEL = "slc6 && amd64 && cmsbuild"
+
+
+def file2Package(filename):
+ return GH_REPO_NAME
diff --git a/repos/HcalConditionsAutomatization/ConditionsValidation/categories.py b/repos/HcalConditionsAutomatization/ConditionsValidation/categories.py
index db8b95d34890..a9d0a71c3b98 100644
--- a/repos/HcalConditionsAutomatization/ConditionsValidation/categories.py
+++ b/repos/HcalConditionsAutomatization/ConditionsValidation/categories.py
@@ -4,28 +4,28 @@
from repo_config import CMSBUILD_USER, GH_REPO_NAME
CMSSW_L1 = []
-APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1))
+APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1))
REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE
-TRIGGER_PR_TESTS = list(set(['smuzaffar'] + REQUEST_BUILD_RELEASE))
-PR_HOLD_MANAGERS = [ ]
+TRIGGER_PR_TESTS = list(set(["smuzaffar"] + REQUEST_BUILD_RELEASE))
+PR_HOLD_MANAGERS = []
-COMMON_CATEGORIES = [ "tests", "code-checks" ]
-EXTERNAL_CATEGORIES = [ "externals" ]
+COMMON_CATEGORIES = ["tests", "code-checks"]
+EXTERNAL_CATEGORIES = ["externals"]
EXTERNAL_REPOS = []
-CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ]
-CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ]
+CMSSW_REPOS = [gh_user + "/" + gh_cmssw]
+CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist]
CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1))
COMPARISON_MISSING_MAP = []
-#github_user:[list of categories]
+# github_user:[list of categories]
CMSSW_L2 = {
- CMSBUILD_USER : ["tests", "code-checks" ],
- 'GilsonCS' : ['hcal-conditions'],
+ CMSBUILD_USER: ["tests", "code-checks"],
+ "GilsonCS": ["hcal-conditions"],
}
-CMSSW_CATEGORIES={
- 'hcal-conditions': [GH_REPO_NAME],
+CMSSW_CATEGORIES = {
+ "hcal-conditions": [GH_REPO_NAME],
}
USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys()))
diff --git a/repos/HcalConditionsAutomatization/ConditionsValidation/releases.py b/repos/HcalConditionsAutomatization/ConditionsValidation/releases.py
index 776ad15456f3..6beacaeea30a 100644
--- a/repos/HcalConditionsAutomatization/ConditionsValidation/releases.py
+++ b/repos/HcalConditionsAutomatization/ConditionsValidation/releases.py
@@ -1,13 +1,14 @@
-#Default development branch
+# Default development branch
# Changes from master branch will be merge in to it
# Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch)
# For new release cycle just change this and make sure to add its milestone and production branches
CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X"
-RELEASE_BRANCH_MILESTONE={}
-RELEASE_BRANCH_CLOSED=[]
-RELEASE_BRANCH_PRODUCTION=[]
-SPECIAL_RELEASE_MANAGERS=[]
-RELEASE_MANAGERS={}
-USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ])
-
+RELEASE_BRANCH_MILESTONE = {}
+RELEASE_BRANCH_CLOSED = []
+RELEASE_BRANCH_PRODUCTION = []
+SPECIAL_RELEASE_MANAGERS = []
+RELEASE_MANAGERS = {}
+USERS_TO_TRIGGER_HOOKS = set(
+ SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel]
+)
diff --git a/repos/HcalConditionsAutomatization/ConditionsValidation/repo_config.py b/repos/HcalConditionsAutomatization/ConditionsValidation/repo_config.py
index 5dcfb2ba808f..fccf54a5e47c 100644
--- a/repos/HcalConditionsAutomatization/ConditionsValidation/repo_config.py
+++ b/repos/HcalConditionsAutomatization/ConditionsValidation/repo_config.py
@@ -1,22 +1,26 @@
-from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER
-from os.path import basename,dirname,abspath
-GH_TOKEN="~/.github-token-cmsbot"
-GH_TOKEN_READONLY="~/.github-token-readonly"
-CONFIG_DIR=dirname(abspath(__file__))
-CMSBUILD_USER="cmsbot"
-GH_REPO_ORGANIZATION="HcalConditionsAutomatization"
-GH_REPO_NAME="ConditionsValidation"
-GH_REPO_FULLNAME=GH_REPO_ORGANIZATION+"/"+GH_REPO_NAME
-CREATE_EXTERNAL_ISSUE=False
-JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins"
-ADD_LABELS=False
-ADD_WEB_HOOK=True
-JENKINS_UPLOAD_DIRECTORY="HcalConditionsAutomatization/ConditionsValidation"
-JENKINS_NOTIFICATION_EMAIL=""
-OPEN_ISSUE_FOR_PUSH_TESTS=True
+from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER
+from os.path import basename, dirname, abspath
+
+GH_TOKEN = "~/.github-token-cmsbot"
+GH_TOKEN_READONLY = "~/.github-token-readonly"
+CONFIG_DIR = dirname(abspath(__file__))
+CMSBUILD_USER = "cmsbot"
+GH_REPO_ORGANIZATION = "HcalConditionsAutomatization"
+GH_REPO_NAME = "ConditionsValidation"
+GH_REPO_FULLNAME = GH_REPO_ORGANIZATION + "/" + GH_REPO_NAME
+CREATE_EXTERNAL_ISSUE = False
+JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins"
+ADD_LABELS = False
+ADD_WEB_HOOK = True
+JENKINS_UPLOAD_DIRECTORY = "HcalConditionsAutomatization/ConditionsValidation"
+JENKINS_NOTIFICATION_EMAIL = ""
+OPEN_ISSUE_FOR_PUSH_TESTS = True
IGNORE_ISSUES = []
-#Valid Web hooks
-VALID_WEB_HOOKS=['push']
-#Set the Jenkins slave label is your tests needs special machines to run.
-JENKINS_SLAVE_LABEL="lxplus7||lxplus6"
-def file2Package(filename): return GH_REPO_NAME
+# Valid Web hooks
+VALID_WEB_HOOKS = ["push"]
+# Set the Jenkins slave label is your tests needs special machines to run.
+JENKINS_SLAVE_LABEL = "lxplus7||lxplus6"
+
+
+def file2Package(filename):
+ return GH_REPO_NAME
diff --git a/repos/cms_patatrack/cmssw/categories.py b/repos/cms_patatrack/cmssw/categories.py
index fca5cfe2e3d3..8533ece43ae3 100644
--- a/repos/cms_patatrack/cmssw/categories.py
+++ b/repos/cms_patatrack/cmssw/categories.py
@@ -5,24 +5,24 @@
from categories_map import CMSSW_CATEGORIES
CMSSW_L1 = []
-APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1))
+APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1))
REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE
-TRIGGER_PR_TESTS = list(set(['smuzaffar','felicepantaleo'] + REQUEST_BUILD_RELEASE))
-PR_HOLD_MANAGERS = [ ]
+TRIGGER_PR_TESTS = list(set(["smuzaffar", "felicepantaleo"] + REQUEST_BUILD_RELEASE))
+PR_HOLD_MANAGERS = []
-COMMON_CATEGORIES = [ "tests", "code-checks" ]
-EXTERNAL_CATEGORIES = [ "externals" ]
+COMMON_CATEGORIES = ["tests", "code-checks"]
+EXTERNAL_CATEGORIES = ["externals"]
EXTERNAL_REPOS = []
-CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ]
-CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ]
+CMSSW_REPOS = [gh_user + "/" + gh_cmssw]
+CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist]
CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1))
COMPARISON_MISSING_MAP = []
-#github_user:[list of categories]
+# github_user:[list of categories]
CMSSW_L2 = {
- CMSBUILD_USER : ["tests", "code-checks" ],
- 'fwyzard' : list(CMSSW_CATEGORIES.keys()),
+ CMSBUILD_USER: ["tests", "code-checks"],
+ "fwyzard": list(CMSSW_CATEGORIES.keys()),
}
USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys()))
diff --git a/repos/cms_patatrack/cmssw/releases.py b/repos/cms_patatrack/cmssw/releases.py
index a69f01107464..90bd5636bdff 100644
--- a/repos/cms_patatrack/cmssw/releases.py
+++ b/repos/cms_patatrack/cmssw/releases.py
@@ -1,13 +1,14 @@
-#Default development branch
+# Default development branch
# Changes from master branch will be merge in to it
# Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch)
# For new release cycle just change this and make sure to add its milestone and production branches
CMSSW_DEVEL_BRANCH = "CMSSW_10_1_X"
-RELEASE_BRANCH_MILESTONE={}
-RELEASE_BRANCH_CLOSED=[]
-RELEASE_BRANCH_PRODUCTION=[]
-SPECIAL_RELEASE_MANAGERS=[]
-RELEASE_MANAGERS={}
-USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ])
-
+RELEASE_BRANCH_MILESTONE = {}
+RELEASE_BRANCH_CLOSED = []
+RELEASE_BRANCH_PRODUCTION = []
+SPECIAL_RELEASE_MANAGERS = []
+RELEASE_MANAGERS = {}
+USERS_TO_TRIGGER_HOOKS = set(
+ SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel]
+)
diff --git a/repos/cms_patatrack/cmssw/repo_config.py b/repos/cms_patatrack/cmssw/repo_config.py
index 21e6f7d51659..bf5e7c4e978c 100644
--- a/repos/cms_patatrack/cmssw/repo_config.py
+++ b/repos/cms_patatrack/cmssw/repo_config.py
@@ -1,36 +1,36 @@
-from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER
-from os.path import basename,dirname,abspath
-#GH read/write token: Use default ~/.github-token-cmsbot
-GH_TOKEN="~/.github-token-cmsbot"
-#GH readonly token: Use default ~/.github-token-readonly
-GH_TOKEN_READONLY="~/.github-token-readonly"
-CONFIG_DIR=dirname(abspath(__file__))
-#GH bot user: Use default cmsbot
-CMSBUILD_USER="cmsbot"
-GH_REPO_ORGANIZATION="cms-patatrack"
-GH_REPO_FULLNAME="cms-patatrack/cmssw"
-CREATE_EXTERNAL_ISSUE=False
-#Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins
-JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins"
-#GH Web hook pass phrase. This is encrypeted used bot keys.
-GITHUB_WEBHOOK_TOKEN='''U2FsdGVkX19C9pvh4GUbgDDUy0G9tSJZu7pFoQ0QodGMQtb/h4AFOKPsBxKlORAz
-KXg7+k1B6egPueUzlaJ9BA=='''
-#Set to True if you want bot to add build/test labels to your repo
-ADD_LABELS=True
-#Set to True if you want bot to add GH webhooks. cmsbot needs admin rights
-ADD_WEB_HOOK=False
-#List of issues/pr which bot should ignore
+from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER
+from os.path import basename, dirname, abspath
+
+# GH read/write token: Use default ~/.github-token-cmsbot
+GH_TOKEN = "~/.github-token-cmsbot"
+# GH readonly token: Use default ~/.github-token-readonly
+GH_TOKEN_READONLY = "~/.github-token-readonly"
+CONFIG_DIR = dirname(abspath(__file__))
+# GH bot user: Use default cmsbot
+CMSBUILD_USER = "cmsbot"
+GH_REPO_ORGANIZATION = "cms-patatrack"
+GH_REPO_FULLNAME = "cms-patatrack/cmssw"
+CREATE_EXTERNAL_ISSUE = False
+# Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins
+JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins"
+# GH Web hook pass phrase. This is encrypeted used bot keys.
+GITHUB_WEBHOOK_TOKEN = """U2FsdGVkX19C9pvh4GUbgDDUy0G9tSJZu7pFoQ0QodGMQtb/h4AFOKPsBxKlORAz
+KXg7+k1B6egPueUzlaJ9BA=="""
+# Set to True if you want bot to add build/test labels to your repo
+ADD_LABELS = True
+# Set to True if you want bot to add GH webhooks. cmsbot needs admin rights
+ADD_WEB_HOOK = False
+# List of issues/pr which bot should ignore
IGNORE_ISSUES = []
-#Set the Jenkins slave label is your tests needs special machines to run.
-JENKINS_SLAVE_LABEL="slc7_amd64 && GPU"
-#For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests
-CMS_STANDARD_TESTS=True
-#Map your branches with cmssw branches for tests
-#User Branch => CMSSW/CMSDIST Bracnh
-CMS_BRANCH_MAP={
- 'CMSSW_10_1_X_Patatrack' : 'CMSSW_10_1_X',
- 'CMSSW_10_2_X_Patatrack' : 'CMSSW_10_2_X'
+# Set the Jenkins slave label is your tests needs special machines to run.
+JENKINS_SLAVE_LABEL = "slc7_amd64 && GPU"
+# For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests
+CMS_STANDARD_TESTS = True
+# Map your branches with cmssw branches for tests
+# User Branch => CMSSW/CMSDIST Bracnh
+CMS_BRANCH_MAP = {
+ "CMSSW_10_1_X_Patatrack": "CMSSW_10_1_X",
+ "CMSSW_10_2_X_Patatrack": "CMSSW_10_2_X",
}
-#Valid Web hooks e.g. '.+' to match all event
-VALID_WEB_HOOKS=['.+']
-
+# Valid Web hooks e.g. '.+' to match all event
+VALID_WEB_HOOKS = [".+"]
diff --git a/repos/dmwm/CRABServer/repo_config.py b/repos/dmwm/CRABServer/repo_config.py
index 6c9f652f865c..ccd51bda5457 100644
--- a/repos/dmwm/CRABServer/repo_config.py
+++ b/repos/dmwm/CRABServer/repo_config.py
@@ -1,31 +1,31 @@
-from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER
-from os.path import basename,dirname,abspath
-#GH read/write token: Use default ~/.github-token-cmsbot
-GH_TOKEN="~/.github-token-cmsdmwmbot"
-#GH readonly token: Use default ~/.github-token-readonly
-GH_TOKEN_READONLY="~/.github-token-cmsdmwmbot"
-CONFIG_DIR=dirname(abspath(__file__))
-#GH bot user: Use default cmsbot
-CMSBUILD_USER="cmsdmwmbot"
-GH_REPO_ORGANIZATION=basename(dirname(CONFIG_DIR))
-GH_REPO_FULLNAME="dmwm/CRABServer"
-CREATE_EXTERNAL_ISSUE=False
-#Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins
-JENKINS_SERVER="http://cmsjenkins11.cern.ch:8080/dmwm-jenkins"
-#Set to True if you want bot to add build/test labels to your repo
-ADD_LABELS=False
-#Set to True if you want bot to add GH webhooks. cmsbot needs admin rights
-ADD_WEB_HOOK=False
-#List of issues/pr which bot should ignore
+from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER
+from os.path import basename, dirname, abspath
+
+# GH read/write token: Use default ~/.github-token-cmsbot
+GH_TOKEN = "~/.github-token-cmsdmwmbot"
+# GH readonly token: Use default ~/.github-token-readonly
+GH_TOKEN_READONLY = "~/.github-token-cmsdmwmbot"
+CONFIG_DIR = dirname(abspath(__file__))
+# GH bot user: Use default cmsbot
+CMSBUILD_USER = "cmsdmwmbot"
+GH_REPO_ORGANIZATION = basename(dirname(CONFIG_DIR))
+GH_REPO_FULLNAME = "dmwm/CRABServer"
+CREATE_EXTERNAL_ISSUE = False
+# Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins
+JENKINS_SERVER = "http://cmsjenkins11.cern.ch:8080/dmwm-jenkins"
+# Set to True if you want bot to add build/test labels to your repo
+ADD_LABELS = False
+# Set to True if you want bot to add GH webhooks. cmsbot needs admin rights
+ADD_WEB_HOOK = False
+# List of issues/pr which bot should ignore
IGNORE_ISSUES = [10]
-#Set the Jenkins slave label is your tests needs special machines to run.
-JENKINS_SLAVE_LABEL=""
-#For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests
-CMS_STANDARD_TESTS=True
-#Map your branches with cmssw branches for tests
-#User Branch => CMSSW/CMSDIST Bracnh
-CMS_BRANCH_MAP={
-}
-#Valid Web hooks e.g. '.+' to match all event
-VALID_WEB_HOOKS=['release', 'workflow_dispatch']
-WEBHOOK_PAYLOAD=True
+# Set the Jenkins slave label is your tests needs special machines to run.
+JENKINS_SLAVE_LABEL = ""
+# For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests
+CMS_STANDARD_TESTS = True
+# Map your branches with cmssw branches for tests
+# User Branch => CMSSW/CMSDIST Bracnh
+CMS_BRANCH_MAP = {}
+# Valid Web hooks e.g. '.+' to match all event
+VALID_WEB_HOOKS = ["release", "workflow_dispatch"]
+WEBHOOK_PAYLOAD = True
diff --git a/repos/smuzaffar/SCRAM/repo_config.py b/repos/smuzaffar/SCRAM/repo_config.py
index 40b9c8f7a1ea..264cc90f5162 100644
--- a/repos/smuzaffar/SCRAM/repo_config.py
+++ b/repos/smuzaffar/SCRAM/repo_config.py
@@ -1,9 +1,9 @@
-from os.path import basename,dirname,abspath
+from os.path import basename, dirname, abspath
-CONFIG_DIR=dirname(abspath(__file__))
-GITHUB_WEBHOOK_TOKEN='U2FsdGVkX1+8ckT0H3wKIUb59hZQrF5PZ2VlBxYyFek='
-RUN_DEFAULT_CMS_BOT=False
+CONFIG_DIR = dirname(abspath(__file__))
+GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX1+8ckT0H3wKIUb59hZQrF5PZ2VlBxYyFek="
+RUN_DEFAULT_CMS_BOT = False
-VALID_WEB_HOOKS=['.*']
-WEBHOOK_PAYLOAD=True
-JENKINS_SERVER="http://cmsjenkins11.cern.ch:8080/dmwm-jenkins"
+VALID_WEB_HOOKS = [".*"]
+WEBHOOK_PAYLOAD = True
+JENKINS_SERVER = "http://cmsjenkins11.cern.ch:8080/dmwm-jenkins"
diff --git a/repos/smuzaffar/cmssw/categories.py b/repos/smuzaffar/cmssw/categories.py
index 3cc2400f831b..b03ba39a16eb 100644
--- a/repos/smuzaffar/cmssw/categories.py
+++ b/repos/smuzaffar/cmssw/categories.py
@@ -5,24 +5,24 @@
from categories_map import CMSSW_CATEGORIES
CMSSW_L1 = []
-APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1))
+APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1))
REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE
TRIGGER_PR_TESTS = list(set([] + REQUEST_BUILD_RELEASE))
-PR_HOLD_MANAGERS = [ ]
+PR_HOLD_MANAGERS = []
-COMMON_CATEGORIES = [ "tests", "code-checks" ]
-EXTERNAL_CATEGORIES = [ "externals" ]
+COMMON_CATEGORIES = ["tests", "code-checks"]
+EXTERNAL_CATEGORIES = ["externals"]
EXTERNAL_REPOS = []
-CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ]
-CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ]
+CMSSW_REPOS = [gh_user + "/" + gh_cmssw]
+CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist]
CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1))
COMPARISON_MISSING_MAP = []
-#github_user:[list of categories]
+# github_user:[list of categories]
CMSSW_L2 = {
- CMSBUILD_USER : ["tests", "code-checks" ],
- gh_user : list(CMSSW_CATEGORIES.keys()),
+ CMSBUILD_USER: ["tests", "code-checks"],
+ gh_user: list(CMSSW_CATEGORIES.keys()),
}
USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys()))
diff --git a/repos/smuzaffar/cmssw/releases.py b/repos/smuzaffar/cmssw/releases.py
index 776ad15456f3..6beacaeea30a 100644
--- a/repos/smuzaffar/cmssw/releases.py
+++ b/repos/smuzaffar/cmssw/releases.py
@@ -1,13 +1,14 @@
-#Default development branch
+# Default development branch
# Changes from master branch will be merge in to it
# Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch)
# For new release cycle just change this and make sure to add its milestone and production branches
CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X"
-RELEASE_BRANCH_MILESTONE={}
-RELEASE_BRANCH_CLOSED=[]
-RELEASE_BRANCH_PRODUCTION=[]
-SPECIAL_RELEASE_MANAGERS=[]
-RELEASE_MANAGERS={}
-USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ])
-
+RELEASE_BRANCH_MILESTONE = {}
+RELEASE_BRANCH_CLOSED = []
+RELEASE_BRANCH_PRODUCTION = []
+SPECIAL_RELEASE_MANAGERS = []
+RELEASE_MANAGERS = {}
+USERS_TO_TRIGGER_HOOKS = set(
+ SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel]
+)
diff --git a/repos/smuzaffar/cmssw/repo_config.py b/repos/smuzaffar/cmssw/repo_config.py
index dcfdc19cac37..00f477b7e2c8 100644
--- a/repos/smuzaffar/cmssw/repo_config.py
+++ b/repos/smuzaffar/cmssw/repo_config.py
@@ -1,32 +1,32 @@
-from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER
-from os.path import basename,dirname,abspath
-#GH read/write token: Use default ~/.github-token-cmsbot
-GH_TOKEN="~/.github-token-cmsbot"
-#GH readonly token: Use default ~/.github-token-readonly
-GH_TOKEN_READONLY="~/.github-token-readonly"
-CONFIG_DIR=dirname(abspath(__file__))
-#GH bot user: Use default cmsbot
-CMSBUILD_USER="cmsbot"
-GH_REPO_ORGANIZATION=basename(dirname(CONFIG_DIR))
-GH_REPO_FULLNAME="smuzaffar/cmssw"
-CREATE_EXTERNAL_ISSUE=False
-#Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins
-JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins"
-#GH Web hook pass phrase. This is encrypeted used bot keys.
-GITHUB_WEBHOOK_TOKEN='U2FsdGVkX1+GEHdp/Cmu73+ctvrzSGXc9OvL+8bZyjOe6ZPkqr/GIPgpJHiEp+hR'
-#Set to True if you want bot to add build/test labels to your repo
-ADD_LABELS=False
-#Set to True if you want bot to add GH webhooks. cmsbot needs admin rights
-ADD_WEB_HOOK=False
-#List of issues/pr which bot should ignore
+from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER
+from os.path import basename, dirname, abspath
+
+# GH read/write token: Use default ~/.github-token-cmsbot
+GH_TOKEN = "~/.github-token-cmsbot"
+# GH readonly token: Use default ~/.github-token-readonly
+GH_TOKEN_READONLY = "~/.github-token-readonly"
+CONFIG_DIR = dirname(abspath(__file__))
+# GH bot user: Use default cmsbot
+CMSBUILD_USER = "cmsbot"
+GH_REPO_ORGANIZATION = basename(dirname(CONFIG_DIR))
+GH_REPO_FULLNAME = "smuzaffar/cmssw"
+CREATE_EXTERNAL_ISSUE = False
+# Jenkins CI server: User default http://cmsjenkins02.cern.ch:8080/cms-jenkins
+JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins"
+# GH Web hook pass phrase. This is encrypeted used bot keys.
+GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX1+GEHdp/Cmu73+ctvrzSGXc9OvL+8bZyjOe6ZPkqr/GIPgpJHiEp+hR"
+# Set to True if you want bot to add build/test labels to your repo
+ADD_LABELS = False
+# Set to True if you want bot to add GH webhooks. cmsbot needs admin rights
+ADD_WEB_HOOK = False
+# List of issues/pr which bot should ignore
IGNORE_ISSUES = [10]
-#Set the Jenkins slave label is your tests needs special machines to run.
-JENKINS_SLAVE_LABEL=""
-#For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests
-CMS_STANDARD_TESTS=True
-#Map your branches with cmssw branches for tests
-#User Branch => CMSSW/CMSDIST Bracnh
-CMS_BRANCH_MAP={
-}
-#Valid Web hooks e.g. '.+' to match all event
-VALID_WEB_HOOKS=['.+']
+# Set the Jenkins slave label is your tests needs special machines to run.
+JENKINS_SLAVE_LABEL = ""
+# For cmsdist/cmssw repos , set it to False if you do not want to run standard cms pr tests
+CMS_STANDARD_TESTS = True
+# Map your branches with cmssw branches for tests
+# User Branch => CMSSW/CMSDIST Bracnh
+CMS_BRANCH_MAP = {}
+# Valid Web hooks e.g. '.+' to match all event
+VALID_WEB_HOOKS = [".+"]
diff --git a/repos/smuzaffar/int_build/categories.py b/repos/smuzaffar/int_build/categories.py
index d48a94fbde15..14c32ed77c74 100644
--- a/repos/smuzaffar/int_build/categories.py
+++ b/repos/smuzaffar/int_build/categories.py
@@ -4,28 +4,28 @@
from repo_config import CMSBUILD_USER
CMSSW_L1 = []
-APPROVE_BUILD_RELEASE = list(set([ ] + CMSSW_L1))
+APPROVE_BUILD_RELEASE = list(set([] + CMSSW_L1))
REQUEST_BUILD_RELEASE = APPROVE_BUILD_RELEASE
TRIGGER_PR_TESTS = list(set([] + REQUEST_BUILD_RELEASE))
-PR_HOLD_MANAGERS = [ ]
+PR_HOLD_MANAGERS = []
-COMMON_CATEGORIES = [ "tests", "code-checks" ]
-EXTERNAL_CATEGORIES = [ "externals" ]
+COMMON_CATEGORIES = ["tests", "code-checks"]
+EXTERNAL_CATEGORIES = ["externals"]
EXTERNAL_REPOS = []
-CMSSW_REPOS = [ gh_user+"/"+gh_cmssw ]
-CMSDIST_REPOS = [ gh_user+"/"+gh_cmsdist ]
+CMSSW_REPOS = [gh_user + "/" + gh_cmssw]
+CMSDIST_REPOS = [gh_user + "/" + gh_cmsdist]
CMSSW_ISSUES_TRACKERS = list(set(CMSSW_L1))
COMPARISON_MISSING_MAP = []
-#github_user:[list of categories]
+# github_user:[list of categories]
CMSSW_L2 = {
- CMSBUILD_USER : ["tests", "code-checks" ],
- gh_user : [gh_user],
+ CMSBUILD_USER: ["tests", "code-checks"],
+ gh_user: [gh_user],
}
-CMSSW_CATEGORIES={
- gh_user: [gh_user],
+CMSSW_CATEGORIES = {
+ gh_user: [gh_user],
}
USERS_TO_TRIGGER_HOOKS = set(TRIGGER_PR_TESTS + CMSSW_ISSUES_TRACKERS + list(CMSSW_L2.keys()))
diff --git a/repos/smuzaffar/int_build/releases.py b/repos/smuzaffar/int_build/releases.py
index 776ad15456f3..6beacaeea30a 100644
--- a/repos/smuzaffar/int_build/releases.py
+++ b/repos/smuzaffar/int_build/releases.py
@@ -1,13 +1,14 @@
-#Default development branch
+# Default development branch
# Changes from master branch will be merge in to it
# Any PR open against this will be automatically closed by cms-bot (Pr should be made for master branch)
# For new release cycle just change this and make sure to add its milestone and production branches
CMSSW_DEVEL_BRANCH = "CMSSW_10_0_X"
-RELEASE_BRANCH_MILESTONE={}
-RELEASE_BRANCH_CLOSED=[]
-RELEASE_BRANCH_PRODUCTION=[]
-SPECIAL_RELEASE_MANAGERS=[]
-RELEASE_MANAGERS={}
-USERS_TO_TRIGGER_HOOKS = set(SPECIAL_RELEASE_MANAGERS + [ m for rel in RELEASE_MANAGERS for m in rel ])
-
+RELEASE_BRANCH_MILESTONE = {}
+RELEASE_BRANCH_CLOSED = []
+RELEASE_BRANCH_PRODUCTION = []
+SPECIAL_RELEASE_MANAGERS = []
+RELEASE_MANAGERS = {}
+USERS_TO_TRIGGER_HOOKS = set(
+ SPECIAL_RELEASE_MANAGERS + [m for rel in RELEASE_MANAGERS for m in rel]
+)
diff --git a/repos/smuzaffar/int_build/repo_config.py b/repos/smuzaffar/int_build/repo_config.py
index c208c5c2b659..b0c045bc69ef 100644
--- a/repos/smuzaffar/int_build/repo_config.py
+++ b/repos/smuzaffar/int_build/repo_config.py
@@ -1,15 +1,19 @@
-from cms_static import GH_CMSSW_ORGANIZATION,GH_CMSSW_REPO,CMSBUILD_GH_USER
-from os.path import basename,dirname,abspath
-GH_TOKEN="~/.github-token-cmsbot"
-GH_TOKEN_READONLY="~/.github-token-readonly"
-CONFIG_DIR=dirname(abspath(__file__))
-CMSBUILD_USER="cmsbot"
-GH_REPO_ORGANIZATION=basename(dirname(CONFIG_DIR))
-GH_REPO_FULLNAME="smuzaffar/int-build"
-CREATE_EXTERNAL_ISSUE=False
-JENKINS_SERVER="http://cmsjenkins02.cern.ch:8080/cms-jenkins"
-GITHUB_WEBHOOK_TOKEN='U2FsdGVkX1+GEHdp/Cmu73+ctvrzSGXc9OvL+8bZyjOe6ZPkqr/GIPgpJHiEp+hR'
-ADD_LABELS=False
-ADD_WEB_HOOK=False
+from cms_static import GH_CMSSW_ORGANIZATION, GH_CMSSW_REPO, CMSBUILD_GH_USER
+from os.path import basename, dirname, abspath
+
+GH_TOKEN = "~/.github-token-cmsbot"
+GH_TOKEN_READONLY = "~/.github-token-readonly"
+CONFIG_DIR = dirname(abspath(__file__))
+CMSBUILD_USER = "cmsbot"
+GH_REPO_ORGANIZATION = basename(dirname(CONFIG_DIR))
+GH_REPO_FULLNAME = "smuzaffar/int-build"
+CREATE_EXTERNAL_ISSUE = False
+JENKINS_SERVER = "http://cmsjenkins02.cern.ch:8080/cms-jenkins"
+GITHUB_WEBHOOK_TOKEN = "U2FsdGVkX1+GEHdp/Cmu73+ctvrzSGXc9OvL+8bZyjOe6ZPkqr/GIPgpJHiEp+hR"
+ADD_LABELS = False
+ADD_WEB_HOOK = False
IGNORE_ISSUES = []
-def file2Package(filename): return GH_REPO_ORGANIZATION
+
+
+def file2Package(filename):
+ return GH_REPO_ORGANIZATION
diff --git a/run-ib-addon.py b/run-ib-addon.py
index 82d571cd852f..649613f54a46 100755
--- a/run-ib-addon.py
+++ b/run-ib-addon.py
@@ -13,14 +13,20 @@
from logUpdater import LogUpdater
if ("CMSSW_BASE" not in environ) or ("SCRAM_ARCH" not in environ):
- print("ERROR: Unable to file the release environment, please make sure you have set the cmssw environment before calling this script")
- exit(1)
+ print(
+ "ERROR: Unable to file the release environment, please make sure you have set the cmssw environment before calling this script"
+ )
+ exit(1)
-timeout=7200
-try: timeout=int(argv[1])
-except: timeout=7200
+timeout = 7200
+try:
+ timeout = int(argv[1])
+except:
+ timeout = 7200
logger = LogUpdater(environ["CMSSW_BASE"])
-ret = doCmd('cd %s; rm -rf addOnTests; timeout %s addOnTests.py -j %s 2>&1 >addOnTests.log' % (environ["CMSSW_BASE"], timeout,cmsRunProcessCount))
-doCmd('cd '+environ["CMSSW_BASE"]+'/addOnTests/logs; zip -r addOnTests.zip *.log')
+ret = doCmd(
+ "cd %s; rm -rf addOnTests; timeout %s addOnTests.py -j %s 2>&1 >addOnTests.log"
+ % (environ["CMSSW_BASE"], timeout, cmsRunProcessCount)
+)
+doCmd("cd " + environ["CMSSW_BASE"] + "/addOnTests/logs; zip -r addOnTests.zip *.log")
logger.updateAddOnTestsLogs()
-
diff --git a/run-ib-relval.py b/run-ib-relval.py
index 5ee819f717a1..1ca9ac1e8dae 100755
--- a/run-ib-relval.py
+++ b/run-ib-relval.py
@@ -19,82 +19,140 @@
from os.path import abspath, dirname
import re, socket
from time import time
+
SCRIPT_DIR = dirname(abspath(argv[0]))
-def process_relvals(threads=None,cmssw_version=None,arch=None,cmssw_base=None,logger=None):
- pass
+
+def process_relvals(threads=None, cmssw_version=None, arch=None, cmssw_base=None, logger=None):
+ pass
+
if __name__ == "__main__":
- parser = OptionParser(usage="%prog -i|--id -l|--list ")
- parser.add_option("-i", "--id", dest="jobid", help="Job Id e.g. 1of3", default="1of1")
- parser.add_option("-l", "--list", dest="workflow", help="List of workflows to run e.g. 1.0,2.0,3.0 or -s", type=str, default=None)
- parser.add_option("-n", "--dry-run",dest="dryRun", action="store_true", help="Do not upload results", default=False)
- parser.add_option("-f", "--force",dest="force", help="Force running of workflows without checking the server for previous run", action="store_true", default=False)
- parser.add_option("-N", "--non-threaded",dest="nonThreaded", action="store_true", help="Do not run in threaded mode", default=False)
- parser.add_option("-J", "--job-config", dest="jobConfig", help="Extra arguments to pass to jobscheduler", type=str, default='')
- opts, args = parser.parse_args()
+ parser = OptionParser(usage="%prog -i|--id -l|--list ")
+ parser.add_option("-i", "--id", dest="jobid", help="Job Id e.g. 1of3", default="1of1")
+ parser.add_option(
+ "-l",
+ "--list",
+ dest="workflow",
+ help="List of workflows to run e.g. 1.0,2.0,3.0 or -s",
+ type=str,
+ default=None,
+ )
+ parser.add_option(
+ "-n",
+ "--dry-run",
+ dest="dryRun",
+ action="store_true",
+ help="Do not upload results",
+ default=False,
+ )
+ parser.add_option(
+ "-f",
+ "--force",
+ dest="force",
+ help="Force running of workflows without checking the server for previous run",
+ action="store_true",
+ default=False,
+ )
+ parser.add_option(
+ "-N",
+ "--non-threaded",
+ dest="nonThreaded",
+ action="store_true",
+ help="Do not run in threaded mode",
+ default=False,
+ )
+ parser.add_option(
+ "-J",
+ "--job-config",
+ dest="jobConfig",
+ help="Extra arguments to pass to jobscheduler",
+ type=str,
+ default="",
+ )
+ opts, args = parser.parse_args()
- if len(args) > 0: parser.error("Too many/few arguments")
- if not opts.workflow: parser.error("Missing -l|--list argument.")
- if ("CMSSW_VERSION" not in environ) or ("CMSSW_BASE" not in environ) or ("SCRAM_ARCH" not in environ):
- print("ERROR: Unable to file the release environment, please make sure you have set the cmssw environment before calling this script")
- exit(1)
+ if len(args) > 0:
+ parser.error("Too many/few arguments")
+ if not opts.workflow:
+ parser.error("Missing -l|--list argument.")
+ if (
+ ("CMSSW_VERSION" not in environ)
+ or ("CMSSW_BASE" not in environ)
+ or ("SCRAM_ARCH" not in environ)
+ ):
+ print(
+ "ERROR: Unable to file the release environment, please make sure you have set the cmssw environment before calling this script"
+ )
+ exit(1)
+
+ if opts.dryRun:
+ environ["CMSSW_DRY_RUN"] = "true"
+ if opts.nonThreaded:
+ environ["CMSSW_NON_THREADED"] = "true"
+ elif "CMSSW_NON_THREADED" in environ:
+ del os.environ["CMSSW_NON_THREADED"]
+ thrds = cmsRunProcessCount
+ cmssw_ver = environ["CMSSW_VERSION"]
+ arch = environ["SCRAM_ARCH"]
+ cmssw_base = environ["CMSSW_BASE"]
+ logger = None
+ if not opts.dryRun:
+ logger = LogUpdater(dirIn=cmssw_base)
+ if logger and not opts.force:
+ doneWFs = logger.getDoneRelvals()
+ print("Already done workflows: ", doneWFs)
+ wfs = opts.workflow.split(",")
+ opts.workflow = ",".join([w for w in wfs if (w not in doneWFs)])
+ print("Workflow to run:", opts.workflow)
+ else:
+ print("Force running all workflows")
- if opts.dryRun: environ["CMSSW_DRY_RUN"]="true"
- if opts.nonThreaded: environ["CMSSW_NON_THREADED"]="true"
- elif "CMSSW_NON_THREADED" in environ: del os.environ['CMSSW_NON_THREADED']
- thrds = cmsRunProcessCount
- cmssw_ver = environ["CMSSW_VERSION"]
- arch = environ["SCRAM_ARCH"]
- cmssw_base = environ["CMSSW_BASE"]
- logger=None
- if not opts.dryRun: logger=LogUpdater(dirIn=cmssw_base)
- if logger and not opts.force:
- doneWFs = logger.getDoneRelvals()
- print("Already done workflows: ",doneWFs)
- wfs = opts.workflow.split(",")
- opts.workflow = ",".join([w for w in wfs if (w not in doneWFs)])
- print("Workflow to run:",opts.workflow)
- else:
- print("Force running all workflows")
+ if re.match("^CMSSW_(9_([3-9]|[1-9][0-9]+)|[1-9][0-9]+)_.*$", cmssw_ver):
+ e = 0
+ if opts.workflow:
+ stime = time()
+ p = Popen("%s/jobs/create-relval-jobs.py %s" % (SCRIPT_DIR, opts.workflow), shell=True)
+ e = waitpid(p.pid, 0)[1]
+ print("Time took to create jobs:", int(time() - stime), "sec")
+ if e:
+ exit(e)
- if re.match("^CMSSW_(9_([3-9]|[1-9][0-9]+)|[1-9][0-9]+)_.*$",cmssw_ver):
- e=0
- if opts.workflow:
- stime = time()
- p=Popen("%s/jobs/create-relval-jobs.py %s" % (SCRIPT_DIR, opts.workflow),shell=True)
- e=waitpid(p.pid,0)[1]
- print("Time took to create jobs:",int(time()-stime),"sec")
- if e: exit(e)
+ p = None
+ stime = time()
+ xopt = "-c 150 -m 85"
+ if "lxplus" in socket.gethostname():
+ xopt = "-c 120 -m 40"
+ p = Popen(
+ "cd %s/pyRelval ; %s/jobs/jobscheduler.py -M 0 %s -o time %s"
+ % (cmssw_base, SCRIPT_DIR, xopt, opts.jobConfig),
+ shell=True,
+ )
+ e = waitpid(p.pid, 0)[1]
+ print("Time took to create jobs:", int(time() - stime), "sec")
+ else:
+ print("No workflow to run.")
+ system("touch " + cmssw_base + "/done." + opts.jobid)
+ if logger:
+ logger.updateRelValMatrixPartialLogs(cmssw_base, "done." + opts.jobid)
+ exit(e)
- p = None
- stime = time()
- xopt="-c 150 -m 85"
- if "lxplus" in socket.gethostname():
- xopt="-c 120 -m 40"
- p = Popen("cd %s/pyRelval ; %s/jobs/jobscheduler.py -M 0 %s -o time %s" % (cmssw_base,SCRIPT_DIR,xopt,opts.jobConfig), shell=True)
- e=waitpid(p.pid,0)[1]
- print("Time took to create jobs:",int(time()-stime),"sec")
+ if isThreaded(cmssw_ver, arch):
+ print("Threaded IB Found")
+ thrds = int(MachineMemoryGB / 4.5)
+ if thrds == 0:
+ thrds = 1
+ elif "fc24_ppc64le_" in arch:
+ print("FC22 IB Found")
+ thrds = int(MachineMemoryGB / 4)
+ elif "fc24_ppc64le_" in arch:
+ print("CentOS 7.2 + PPC64LE Found")
+ thrds = int(MachineMemoryGB / 3)
else:
- print("No workflow to run.")
- system("touch "+cmssw_base+"/done."+opts.jobid)
- if logger: logger.updateRelValMatrixPartialLogs(cmssw_base, "done."+opts.jobid)
- exit(e)
-
- if isThreaded(cmssw_ver,arch):
- print("Threaded IB Found")
- thrds=int(MachineMemoryGB/4.5)
- if thrds==0: thrds=1
- elif "fc24_ppc64le_" in arch:
- print("FC22 IB Found")
- thrds=int(MachineMemoryGB/4)
- elif "fc24_ppc64le_" in arch:
- print("CentOS 7.2 + PPC64LE Found")
- thrds=int(MachineMemoryGB/3)
- else:
- print("Normal IB Found")
- if thrds>cmsRunProcessCount: thrds=cmsRunProcessCount
- known_errs = get_known_errors(cmssw_ver, arch, "relvals")
- matrix = PyRelValsThread(thrds, cmssw_base+"/pyRelval", opts.jobid)
- matrix.setArgs(GetMatrixOptions(cmssw_ver,arch))
- matrix.run_workflows(opts.workflow.split(","),logger,known_errors=known_errs)
+ print("Normal IB Found")
+ if thrds > cmsRunProcessCount:
+ thrds = cmsRunProcessCount
+ known_errs = get_known_errors(cmssw_ver, arch, "relvals")
+ matrix = PyRelValsThread(thrds, cmssw_base + "/pyRelval", opts.jobid)
+ matrix.setArgs(GetMatrixOptions(cmssw_ver, arch))
+ matrix.run_workflows(opts.workflow.split(","), logger, known_errors=known_errs)
diff --git a/runPyRelValThread.py b/runPyRelValThread.py
index c977a624a715..8e6b214c721b 100755
--- a/runPyRelValThread.py
+++ b/runPyRelValThread.py
@@ -8,300 +8,399 @@
import json
from logreaderUtils import transform_and_write_config_file, add_exception_to_config
-def runStep1Only(basedir, workflow, args=''):
- args = FixWFArgs (os.environ["CMSSW_VERSION"],os.environ["SCRAM_ARCH"],workflow,args)
- workdir = os.path.join(basedir, workflow)
- matrixCmd = 'runTheMatrix.py --maxSteps=0 -l ' + workflow +' '+args
- try:
- if not os.path.isdir(workdir):
- os.makedirs(workdir)
- except Exception as e:
- print("runPyRelVal> ERROR during test PyReleaseValidation steps, workflow "+str(workflow)+" : can't create thread folder: " + str(e))
- try:
- ret = doCmd(matrixCmd, False, workdir)
- except Exception as e:
- print("runPyRelVal> ERROR during test PyReleaseValidation steps, workflow "+str(workflow)+" : caught exception: " + str(e))
- return
-def runThreadMatrix(basedir, workflow, args='', logger=None, wf_err={}):
- args = FixWFArgs (os.environ["CMSSW_VERSION"],os.environ["SCRAM_ARCH"],workflow,args)
- workdir = os.path.join(basedir, workflow)
- matrixCmd = 'runTheMatrix.py -l ' + workflow +' '+args
- try:
- if not os.path.isdir(workdir):
- os.makedirs(workdir)
- except Exception as e:
- print("runPyRelVal> ERROR during test PyReleaseValidation, workflow "+str(workflow)+" : can't create thread folder: " + str(e))
- wftime = time.time()
- try:
- ret = doCmd(matrixCmd, False, workdir)
- except Exception as e:
- print("runPyRelVal> ERROR during test PyReleaseValidation, workflow "+str(workflow)+" : caught exception: " + str(e))
- wftime = time.time() - wftime
- outfolders = [file for file in os.listdir(workdir) if re.match("^" + str(workflow) + "_", file)]
- if len(outfolders)==0: return
- outfolder = os.path.join(basedir,outfolders[0])
- wfdir = os.path.join(workdir,outfolders[0])
- ret = doCmd("rm -rf " + outfolder + "; mkdir -p " + outfolder)
- ret = doCmd("find . -mindepth 1 -maxdepth 1 -name '*.xml' -o -name '*.log' -o -name '*.py' -o -name '*.json' -o -name 'cmdLog' -type f | xargs -i mv '{}' "+outfolder+"/", False, wfdir)
- logRE = re.compile('^(.*/[0-9]+(\.[0-9]+|)_([^/]+))/step1_dasquery.log$')
- for logFile in glob.glob(outfolder+"/step1_dasquery.log"):
- m = logRE.match(logFile)
- if not m : continue
- ret = doCmd ("cp "+logFile+" "+m.group(1)+"/step1_"+m.group(3)+".log")
- ret = doCmd("mv "+os.path.join(workdir,"runall-report-step*.log")+" "+os.path.join(outfolder,"workflow.log"))
- ret = doCmd("echo " + str(wftime) +" > " + os.path.join(outfolder,"time.log"))
- ret = doCmd("hostname -s > " + os.path.join(outfolder,"hostname"))
- if wf_err: json.dump(wf_err, open("%s/known_error.json" % outfolder,"w"))
- if logger: logger.updateRelValMatrixPartialLogs(basedir, outfolders[0])
- shutil.rmtree(workdir)
- return
+def runStep1Only(basedir, workflow, args=""):
+ args = FixWFArgs(os.environ["CMSSW_VERSION"], os.environ["SCRAM_ARCH"], workflow, args)
+ workdir = os.path.join(basedir, workflow)
+ matrixCmd = "runTheMatrix.py --maxSteps=0 -l " + workflow + " " + args
+ try:
+ if not os.path.isdir(workdir):
+ os.makedirs(workdir)
+ except Exception as e:
+ print(
+ "runPyRelVal> ERROR during test PyReleaseValidation steps, workflow "
+ + str(workflow)
+ + " : can't create thread folder: "
+ + str(e)
+ )
+ try:
+ ret = doCmd(matrixCmd, False, workdir)
+ except Exception as e:
+ print(
+ "runPyRelVal> ERROR during test PyReleaseValidation steps, workflow "
+ + str(workflow)
+ + " : caught exception: "
+ + str(e)
+ )
+ return
+
+
+def runThreadMatrix(basedir, workflow, args="", logger=None, wf_err={}):
+ args = FixWFArgs(os.environ["CMSSW_VERSION"], os.environ["SCRAM_ARCH"], workflow, args)
+ workdir = os.path.join(basedir, workflow)
+ matrixCmd = "runTheMatrix.py -l " + workflow + " " + args
+ try:
+ if not os.path.isdir(workdir):
+ os.makedirs(workdir)
+ except Exception as e:
+ print(
+ "runPyRelVal> ERROR during test PyReleaseValidation, workflow "
+ + str(workflow)
+ + " : can't create thread folder: "
+ + str(e)
+ )
+ wftime = time.time()
+ try:
+ ret = doCmd(matrixCmd, False, workdir)
+ except Exception as e:
+ print(
+ "runPyRelVal> ERROR during test PyReleaseValidation, workflow "
+ + str(workflow)
+ + " : caught exception: "
+ + str(e)
+ )
+ wftime = time.time() - wftime
+ outfolders = [
+ file for file in os.listdir(workdir) if re.match("^" + str(workflow) + "_", file)
+ ]
+ if len(outfolders) == 0:
+ return
+ outfolder = os.path.join(basedir, outfolders[0])
+ wfdir = os.path.join(workdir, outfolders[0])
+ ret = doCmd("rm -rf " + outfolder + "; mkdir -p " + outfolder)
+ ret = doCmd(
+ "find . -mindepth 1 -maxdepth 1 -name '*.xml' -o -name '*.log' -o -name '*.py' -o -name '*.json' -o -name 'cmdLog' -type f | xargs -i mv '{}' "
+ + outfolder
+ + "/",
+ False,
+ wfdir,
+ )
+ logRE = re.compile("^(.*/[0-9]+(\.[0-9]+|)_([^/]+))/step1_dasquery.log$")
+ for logFile in glob.glob(outfolder + "/step1_dasquery.log"):
+ m = logRE.match(logFile)
+ if not m:
+ continue
+ ret = doCmd("cp " + logFile + " " + m.group(1) + "/step1_" + m.group(3) + ".log")
+ ret = doCmd(
+ "mv "
+ + os.path.join(workdir, "runall-report-step*.log")
+ + " "
+ + os.path.join(outfolder, "workflow.log")
+ )
+ ret = doCmd("echo " + str(wftime) + " > " + os.path.join(outfolder, "time.log"))
+ ret = doCmd("hostname -s > " + os.path.join(outfolder, "hostname"))
+ if wf_err:
+ json.dump(wf_err, open("%s/known_error.json" % outfolder, "w"))
+ if logger:
+ logger.updateRelValMatrixPartialLogs(basedir, outfolders[0])
+ shutil.rmtree(workdir)
+ return
+
def find_argv(args, arg):
- val=""
- fullval = ""
- reX = re.compile('\s*(('+arg+')(\s+|=)([^ ]+))')
- m=reX.search(args)
- if m: glen = len(m.groups())
- while m:
- fullval = m.group(1)
- val = m.group(glen)
- args = args.replace(fullval,"")
- m=reX.search(args)
- return (args, fullval, val)
+ val = ""
+ fullval = ""
+ reX = re.compile("\s*((" + arg + ")(\s+|=)([^ ]+))")
+ m = reX.search(args)
+ if m:
+ glen = len(m.groups())
+ while m:
+ fullval = m.group(1)
+ val = m.group(glen)
+ args = args.replace(fullval, "")
+ m = reX.search(args)
+ return (args, fullval, val)
+
def splitWorkflows(workflows, max_wf_pre_set):
- print(workflows)
- avg_t = sum ([ x[1] for x in workflows ] ) / len(workflows)
- wf_max = len(workflows)
- wf_pre_set = wf_max
- wf_sets = 1
- while (wf_pre_set > max_wf_pre_set):
- wf_sets=wf_sets+1
- wf_pre_set = int(wf_max/wf_sets)
- long_wf=int(wf_pre_set/2)
- short_wf=wf_pre_set-long_wf
- merged = []
- for i in range (1, wf_sets):
- wf_count = len(workflows)
- sub_set=workflows[0:long_wf]+workflows[-short_wf:]
- new_avg = sum([ x[1] for x in sub_set])/len(sub_set)
- new_index=0
- while (new_avg > avg_t) and (new_index max_wf_pre_set:
+ wf_sets = wf_sets + 1
+ wf_pre_set = int(wf_max / wf_sets)
+ long_wf = int(wf_pre_set / 2)
+ short_wf = wf_pre_set - long_wf
+ merged = []
+ for i in range(1, wf_sets):
+ wf_count = len(workflows)
+ sub_set = workflows[0:long_wf] + workflows[-short_wf:]
+ new_avg = sum([x[1] for x in sub_set]) / len(sub_set)
+ new_index = 0
+ while (new_avg > avg_t) and (new_index < long_wf):
+ new_index += 1
+ sub_set = workflows[0 : long_wf - new_index] + workflows[-short_wf - new_index :]
+ new_avg = sum([x[1] for x in sub_set]) / len(sub_set)
+ merged.append([x[0] for x in sub_set])
+ workflows = workflows[long_wf - new_index : wf_count - short_wf - new_index]
+ merged.append([x[0] for x in workflows])
+ return merged
+
class PyRelValsThread(object):
- def __init__(self, jobs, basedir, jobid="1of1", outdir=None):
- if not outdir: outdir = basedir
- self.jobs = jobs
- self.basedir = basedir
- self.jobid=jobid
- self.outdir = outdir
- self.args = {}
- self.setArgs("")
+ def __init__(self, jobs, basedir, jobid="1of1", outdir=None):
+ if not outdir:
+ outdir = basedir
+ self.jobs = jobs
+ self.basedir = basedir
+ self.jobid = jobid
+ self.outdir = outdir
+ self.args = {}
+ self.setArgs("")
- def setArgs(self, args):
- args = args.replace('\\"','"')
- args, self.args['w'], tmp = find_argv(args,"-w|--what")
- args, self.args['l'], tmp = find_argv(args,"-l|--list")
- args, self.args['j'], tmp = find_argv(args,"-j|--nproc")
- if ' -s ' in args:
- self.args['s']='-s'
- args = args.replace(' -s ','')
- else: self.args['s']= ""
- self.args['rest'] = args
+ def setArgs(self, args):
+ args = args.replace('\\"', '"')
+ args, self.args["w"], tmp = find_argv(args, "-w|--what")
+ args, self.args["l"], tmp = find_argv(args, "-l|--list")
+ args, self.args["j"], tmp = find_argv(args, "-j|--nproc")
+ if " -s " in args:
+ self.args["s"] = "-s"
+ args = args.replace(" -s ", "")
+ else:
+ self.args["s"] = ""
+ self.args["rest"] = args
- def getWorkFlows(self, args):
- self.setArgs(args)
- workflowsCmd = "runTheMatrix.py -n "+self.args['w']+" "+self.args['s']+" "+self.args['l']+" | grep -v ' workflows with ' | grep -E '^[0-9][0-9]*(\.[0-9][0-9]*|)\s\s*' | sort -nr | awk '{print $1}'"
- print("RunTheMatrix>>",workflowsCmd)
- cmsstat, workflows = doCmd(workflowsCmd)
- if not cmsstat:
- return workflows.split("\n")
- print("runPyRelVal> ERROR during test PyReleaseValidation : could not get output of " + workflowsCmd)
- return []
+ def getWorkFlows(self, args):
+ self.setArgs(args)
+ workflowsCmd = (
+ "runTheMatrix.py -n "
+ + self.args["w"]
+ + " "
+ + self.args["s"]
+ + " "
+ + self.args["l"]
+ + " | grep -v ' workflows with ' | grep -E '^[0-9][0-9]*(\.[0-9][0-9]*|)\s\s*' | sort -nr | awk '{print $1}'"
+ )
+ print("RunTheMatrix>>", workflowsCmd)
+ cmsstat, workflows = doCmd(workflowsCmd)
+ if not cmsstat:
+ return workflows.split("\n")
+ print(
+ "runPyRelVal> ERROR during test PyReleaseValidation : could not get output of "
+ + workflowsCmd
+ )
+ return []
- def isNewRunTheMatrix(self):
- e, o = doCmd("runTheMatrix.py --help | grep 'maxSteps=MAXSTEPS' | wc -l")
- if e: return False
- return o=="1"
+ def isNewRunTheMatrix(self):
+ e, o = doCmd("runTheMatrix.py --help | grep 'maxSteps=MAXSTEPS' | wc -l")
+ if e:
+ return False
+ return o == "1"
- def getWorkflowSteps(self, workflows):
- threads = []
- while(len(workflows) > 0):
- threads = [t for t in threads if t.is_alive()]
- if(len(threads) < self.jobs):
- try:
- t = threading.Thread(target=runStep1Only, args=(self.basedir, workflows.pop(), self.args['rest']+" "+self.args['w']))
- t.start()
- threads.append(t)
- except Exception as e:
- print("runPyRelVal> ERROR threading matrix step1 : caught exception: " + str(e))
- for t in threads: t.join()
- return
+ def getWorkflowSteps(self, workflows):
+ threads = []
+ while len(workflows) > 0:
+ threads = [t for t in threads if t.is_alive()]
+ if len(threads) < self.jobs:
+ try:
+ t = threading.Thread(
+ target=runStep1Only,
+ args=(
+ self.basedir,
+ workflows.pop(),
+ self.args["rest"] + " " + self.args["w"],
+ ),
+ )
+ t.start()
+ threads.append(t)
+ except Exception as e:
+ print(
+ "runPyRelVal> ERROR threading matrix step1 : caught exception: " + str(e)
+ )
+ for t in threads:
+ t.join()
+ return
- def run_workflows(self, workflows=[], logger=None, known_errors={}):
- if not workflows: return
- workflows = workflows[::-1]
- threads = []
- while(len(workflows) > 0):
- threads = [t for t in threads if t.is_alive()]
- if(len(threads) < self.jobs):
- try:
- wf = workflows.pop()
- wf_err = {}
- if wf in known_errors: wf_err = known_errors[wf]
- t = threading.Thread(target=runThreadMatrix, args=(self.basedir, wf, self.args['rest']+" "+self.args['w'], logger, wf_err))
- t.start()
- threads.append(t)
- except Exception as e:
- print("runPyRelVal> ERROR threading matrix : caught exception: " + str(e))
- else:
- time.sleep(5)
- for t in threads: t.join()
- ret, out = doCmd("touch "+self.basedir+"/done."+self.jobid)
- if logger: logger.updateRelValMatrixPartialLogs(self.basedir, "done."+self.jobid)
- return
-
- def update_runall(self):
- self.update_known_errors()
- runall = os.path.join(self.outdir,"runall-report-step123-.log")
- outFile = open(runall+".tmp","w")
- status_ok = []
- status_err = []
- len_ok = 0
- len_err = 0
- for logFile in glob.glob(self.basedir+'/*/workflow.log'):
- inFile = open(logFile)
- for line in inFile:
- if re.match("^\s*(\d+\s+)+tests passed,\s+(\d+\s+)+failed\s*$",line):
- res = line.strip().split(" tests passed, ")
- res[0] = res[0].split()
- res[1]=res[1].replace(" failed","").split()
- len_res = len(res[0])
- if len_res>len_ok:
- for i in range(len_ok,len_res): status_ok.append(0)
- len_ok = len_res
- for i in range(0,len_res):
- status_ok[i]=status_ok[i]+int(res[0][i])
- len_res = len(res[1])
- if len_res>len_err:
- for i in range(len_err,len_res): status_err.append(0)
- len_err = len_res
- for i in range(0,len_res):
- status_err[i]=status_err[i]+int(res[1][i])
- else: outFile.write(line)
- inFile.close()
- outFile.write(" ".join(str(x) for x in status_ok)+" tests passed, "+" ".join(str(x) for x in status_err)+" failed\n")
- outFile.close()
- save = True
- if os.path.exists(runall):
- e, o = run_cmd("diff %s.tmp %s | wc -l" % (runall, runall))
- if o=="0": save=False
- if save: run_cmd("mv %s.tmp %s" % (runall, runall))
- return
+ def run_workflows(self, workflows=[], logger=None, known_errors={}):
+ if not workflows:
+ return
+ workflows = workflows[::-1]
+ threads = []
+ while len(workflows) > 0:
+ threads = [t for t in threads if t.is_alive()]
+ if len(threads) < self.jobs:
+ try:
+ wf = workflows.pop()
+ wf_err = {}
+ if wf in known_errors:
+ wf_err = known_errors[wf]
+ t = threading.Thread(
+ target=runThreadMatrix,
+ args=(
+ self.basedir,
+ wf,
+ self.args["rest"] + " " + self.args["w"],
+ logger,
+ wf_err,
+ ),
+ )
+ t.start()
+ threads.append(t)
+ except Exception as e:
+ print("runPyRelVal> ERROR threading matrix : caught exception: " + str(e))
+ else:
+ time.sleep(5)
+ for t in threads:
+ t.join()
+ ret, out = doCmd("touch " + self.basedir + "/done." + self.jobid)
+ if logger:
+ logger.updateRelValMatrixPartialLogs(self.basedir, "done." + self.jobid)
+ return
- def update_known_errors(self):
- known_errors = {}
- for logFile in glob.glob(self.basedir+'/*/known_error.json'):
- try:
- wf = logFile.split("/")[-2].split("_")[0]
- known_errors[wf] = json.load(open(logFile))
- except Exception as e:
- print("ERROR:",e)
- outFile = open(os.path.join(self.outdir,"all_known_errors.json"),"w")
- json.dump(known_errors, outFile)
- outFile.close()
+ def update_runall(self):
+ self.update_known_errors()
+ runall = os.path.join(self.outdir, "runall-report-step123-.log")
+ outFile = open(runall + ".tmp", "w")
+ status_ok = []
+ status_err = []
+ len_ok = 0
+ len_err = 0
+ for logFile in glob.glob(self.basedir + "/*/workflow.log"):
+ inFile = open(logFile)
+ for line in inFile:
+ if re.match("^\s*(\d+\s+)+tests passed,\s+(\d+\s+)+failed\s*$", line):
+ res = line.strip().split(" tests passed, ")
+ res[0] = res[0].split()
+ res[1] = res[1].replace(" failed", "").split()
+ len_res = len(res[0])
+ if len_res > len_ok:
+ for i in range(len_ok, len_res):
+ status_ok.append(0)
+ len_ok = len_res
+ for i in range(0, len_res):
+ status_ok[i] = status_ok[i] + int(res[0][i])
+ len_res = len(res[1])
+ if len_res > len_err:
+ for i in range(len_err, len_res):
+ status_err.append(0)
+ len_err = len_res
+ for i in range(0, len_res):
+ status_err[i] = status_err[i] + int(res[1][i])
+ else:
+ outFile.write(line)
+ inFile.close()
+ outFile.write(
+ " ".join(str(x) for x in status_ok)
+ + " tests passed, "
+ + " ".join(str(x) for x in status_err)
+ + " failed\n"
+ )
+ outFile.close()
+ save = True
+ if os.path.exists(runall):
+ e, o = run_cmd("diff %s.tmp %s | wc -l" % (runall, runall))
+ if o == "0":
+ save = False
+ if save:
+ run_cmd("mv %s.tmp %s" % (runall, runall))
+ return
- def update_wftime(self):
- time_info = {}
- for logFile in glob.glob(self.basedir+'/*/time.log'):
- try:
- wf = logFile.split("/")[-2].split("_")[0]
- inFile = open(logFile)
- line = inFile.readline().strip()
- inFile.close()
- m = re.match("^(\d+)(\.\d+|)$",line)
- if m: time_info[wf]=int(m.group(1))
- except Exception as e:
- print("ERROR:",e)
- outFile = open(os.path.join(self.outdir,"relval-times.json"),"w")
- json.dump(time_info, outFile)
- outFile.close()
+ def update_known_errors(self):
+ known_errors = {}
+ for logFile in glob.glob(self.basedir + "/*/known_error.json"):
+ try:
+ wf = logFile.split("/")[-2].split("_")[0]
+ known_errors[wf] = json.load(open(logFile))
+ except Exception as e:
+ print("ERROR:", e)
+ outFile = open(os.path.join(self.outdir, "all_known_errors.json"), "w")
+ json.dump(known_errors, outFile)
+ outFile.close()
- def parseLog(self):
- logData = {}
- logRE = re.compile('^.*/([1-9][0-9]*(\.[0-9]+|))_[^/]+/step([1-9])_.*\.log$')
- max_steps = 0
- for logFile in glob.glob(self.basedir+'/[1-9]*/step[0-9]*.log'):
- m = logRE.match(logFile)
- if not m: continue
- wf = m.group(1)
- step = int(m.group(3))
- if step>max_steps: max_steps=step
- if wf not in logData:
- logData[wf] = {'steps': {}, 'events' : [], 'failed' : [], 'warning' : []}
- if step not in logData[wf]['steps']:
- logData[wf]['steps'][step]=logFile
- cache_read=0
- log_processed=0
- for wf in logData:
- for k in logData[wf]:
- if k == 'steps': continue
- for s in range(0, max_steps):
- logData[wf][k].append(-1)
- index =0
- for step in sorted(logData[wf]['steps']):
- data = [0, 0, 0]
- logFile = logData[wf]['steps'][step]
- json_cache = os.path.dirname(logFile)+"/logcache_"+str(step)+".json"
- log_reader_config_path = logFile + "-read_config"
- config_list = []
- cache_ok = False
- if (os.path.exists(json_cache)) and (os.path.getmtime(logFile)<=os.path.getmtime(json_cache)):
- try:
- jfile = open(json_cache,"r")
- data = json.load(jfile)
- jfile.close()
- cache_read+=1
- cache_ok = True
- except:
- os.remove(json_cache)
- if not cache_ok:
- try:
- es_parse_log(logFile)
- except Exception as e:
- print("Sending log information to elasticsearch failed" , str(e))
- inFile = open(logFile)
- for line_nr, line in enumerate(inFile):
- config_list = add_exception_to_config(line, line_nr, config_list)
- if '%MSG-w' in line: data[1]=data[1]+1
- if '%MSG-e' in line: data[2]=data[2]+1
- if 'Begin processing the ' in line: data[0]=data[0]+1
- inFile.close()
- jfile = open(json_cache,"w")
- json.dump(data,jfile)
- jfile.close()
- transform_and_write_config_file(log_reader_config_path, config_list)
- log_processed+=1
- logData[wf]['events'][index] = data[0]
- logData[wf]['failed'][index] = data[2]
- logData[wf]['warning'][index] = data[1]
- index+=1
- del logData[wf]['steps']
+ def update_wftime(self):
+ time_info = {}
+ for logFile in glob.glob(self.basedir + "/*/time.log"):
+ try:
+ wf = logFile.split("/")[-2].split("_")[0]
+ inFile = open(logFile)
+ line = inFile.readline().strip()
+ inFile.close()
+ m = re.match("^(\d+)(\.\d+|)$", line)
+ if m:
+ time_info[wf] = int(m.group(1))
+ except Exception as e:
+ print("ERROR:", e)
+ outFile = open(os.path.join(self.outdir, "relval-times.json"), "w")
+ json.dump(time_info, outFile)
+ outFile.close()
- print("Log processed: ",log_processed)
- print("Caches read:",cache_read)
- from pickle import Pickler
- outFile = open(os.path.join(self.outdir,'runTheMatrixMsgs.pkl'), 'wb')
- pklFile = Pickler(outFile, protocol=2)
- pklFile.dump(logData)
- outFile.close()
- return
+ def parseLog(self):
+ logData = {}
+ logRE = re.compile("^.*/([1-9][0-9]*(\.[0-9]+|))_[^/]+/step([1-9])_.*\.log$")
+ max_steps = 0
+ for logFile in glob.glob(self.basedir + "/[1-9]*/step[0-9]*.log"):
+ m = logRE.match(logFile)
+ if not m:
+ continue
+ wf = m.group(1)
+ step = int(m.group(3))
+ if step > max_steps:
+ max_steps = step
+ if wf not in logData:
+ logData[wf] = {"steps": {}, "events": [], "failed": [], "warning": []}
+ if step not in logData[wf]["steps"]:
+ logData[wf]["steps"][step] = logFile
+ cache_read = 0
+ log_processed = 0
+ for wf in logData:
+ for k in logData[wf]:
+ if k == "steps":
+ continue
+ for s in range(0, max_steps):
+ logData[wf][k].append(-1)
+ index = 0
+ for step in sorted(logData[wf]["steps"]):
+ data = [0, 0, 0]
+ logFile = logData[wf]["steps"][step]
+ json_cache = os.path.dirname(logFile) + "/logcache_" + str(step) + ".json"
+ log_reader_config_path = logFile + "-read_config"
+ config_list = []
+ cache_ok = False
+ if (os.path.exists(json_cache)) and (
+ os.path.getmtime(logFile) <= os.path.getmtime(json_cache)
+ ):
+ try:
+ jfile = open(json_cache, "r")
+ data = json.load(jfile)
+ jfile.close()
+ cache_read += 1
+ cache_ok = True
+ except:
+ os.remove(json_cache)
+ if not cache_ok:
+ try:
+ es_parse_log(logFile)
+ except Exception as e:
+ print("Sending log information to elasticsearch failed", str(e))
+ inFile = open(logFile)
+ for line_nr, line in enumerate(inFile):
+ config_list = add_exception_to_config(line, line_nr, config_list)
+ if "%MSG-w" in line:
+ data[1] = data[1] + 1
+ if "%MSG-e" in line:
+ data[2] = data[2] + 1
+ if "Begin processing the " in line:
+ data[0] = data[0] + 1
+ inFile.close()
+ jfile = open(json_cache, "w")
+ json.dump(data, jfile)
+ jfile.close()
+ transform_and_write_config_file(log_reader_config_path, config_list)
+ log_processed += 1
+ logData[wf]["events"][index] = data[0]
+ logData[wf]["failed"][index] = data[2]
+ logData[wf]["warning"][index] = data[1]
+ index += 1
+ del logData[wf]["steps"]
+
+ print("Log processed: ", log_processed)
+ print("Caches read:", cache_read)
+ from pickle import Pickler
+ outFile = open(os.path.join(self.outdir, "runTheMatrixMsgs.pkl"), "wb")
+ pklFile = Pickler(outFile, protocol=2)
+ pklFile.dump(logData)
+ outFile.close()
+ return
diff --git a/runTests.py b/runTests.py
index 7fe14679befe..b99aab9e5e41 100755
--- a/runTests.py
+++ b/runTests.py
@@ -24,7 +24,7 @@
if scriptPath not in sys.path:
sys.path.append(scriptPath)
-sys.path.append(os.path.join(scriptPath,"python"))
+sys.path.append(os.path.join(scriptPath, "python"))
from cmsutils import doCmd, MachineCPUCount, getHostName
@@ -34,7 +34,7 @@
# ================================================================================
def runCmd(cmd):
- cmd = cmd.rstrip(';')
+ cmd = cmd.rstrip(";")
print("Running cmd> ", cmd)
ret, out = run_cmd(cmd)
if out:
@@ -87,6 +87,7 @@ def checkTestLogs(self):
# --------------------------------------------------------------------------------
def checkUnitTestLog(self):
import checkTestLog
+
print("unitTest>Going to check log file from unit-tests in ", self.startDir)
# noinspection PyBroadException
try:
@@ -100,39 +101,57 @@ def checkUnitTestLog(self):
# --------------------------------------------------------------------------------
def splitUnitTestLogs(self):
import splitUnitTestLog
+
print("unitTest>Going to split log file from unit-tests in ", self.startDir)
tls = splitUnitTestLog.LogSplitter(self.startDir + "/unitTests-summary.log", True)
tls.split(self.startDir + "/unitTests.log")
- runCmd('cd ' + self.startDir + '; zip -r unitTestLogs.zip unitTestLogs')
+ runCmd("cd " + self.startDir + "; zip -r unitTestLogs.zip unitTestLogs")
return
# --------------------------------------------------------------------------------
def run(self):
IBThreadBase.run(self)
- arch = os.environ['SCRAM_ARCH']
- if platform.system() == 'Darwin':
- print('unitTest> Skipping unit tests for MacOS')
+ arch = os.environ["SCRAM_ARCH"]
+ if platform.system() == "Darwin":
+ print("unitTest> Skipping unit tests for MacOS")
return
- precmd=""
+ precmd = ""
paralleJobs = MachineCPUCount
- if ('_ASAN_X' in os.environ["CMSSW_VERSION"]) or ('_UBSAN_X' in os.environ["CMSSW_VERSION"]):
- paralleJobs = int(MachineCPUCount/2)
- if (self.xType == 'GPU') or ("_GPU_X" in os.environ["CMSSW_VERSION"]):
- precmd="export USER_UNIT_TESTS=cuda ;"
+ if ("_ASAN_X" in os.environ["CMSSW_VERSION"]) or (
+ "_UBSAN_X" in os.environ["CMSSW_VERSION"]
+ ):
+ paralleJobs = int(MachineCPUCount / 2)
+ if (self.xType == "GPU") or ("_GPU_X" in os.environ["CMSSW_VERSION"]):
+ precmd = "export USER_UNIT_TESTS=cuda ;"
skiptests = ""
- if 'lxplus' in getHostName():
- skiptests = 'SKIP_UNITTESTS=ExpressionEvaluatorUnitTest'
- TEST_PATH = os.environ['CMSSW_RELEASE_BASE'] + "/test/" + arch
+ if "lxplus" in getHostName():
+ skiptests = "SKIP_UNITTESTS=ExpressionEvaluatorUnitTest"
+ TEST_PATH = os.environ["CMSSW_RELEASE_BASE"] + "/test/" + arch
err, cmd = run_cmd(
- "cd " + self.startDir + ";scram tool info cmssw 2>&1 | grep CMSSW_BASE= | sed 's|^CMSSW_BASE=||'")
+ "cd "
+ + self.startDir
+ + ";scram tool info cmssw 2>&1 | grep CMSSW_BASE= | sed 's|^CMSSW_BASE=||'"
+ )
if cmd:
TEST_PATH = TEST_PATH + ":" + cmd + "/test/" + arch
try:
- cmd = precmd+"cd " + self.startDir + r"; touch nodelete.root nodelete.txt nodelete.log; sed -i -e 's|testing.log; *$(CMD_rm) *-f *$($(1)_objdir)/testing.log;|testing.log;|;s|test $(1) had ERRORS\") *\&\&|test $(1) had ERRORS\" >> $($(1)_objdir)/testing.log) \&\&|' config/SCRAM/GMake/Makefile.rules; "
- cmd += 'PATH=' + TEST_PATH + ':$PATH scram b -f -k -j ' + str(
- paralleJobs) + ' unittests ' + skiptests + ' >unitTests1.log 2>&1 ; '
- cmd += 'touch nodelete.done; ls -l nodelete.*'
- print('unitTest> Going to run ' + cmd)
+ cmd = (
+ precmd
+ + "cd "
+ + self.startDir
+ + r"; touch nodelete.root nodelete.txt nodelete.log; sed -i -e 's|testing.log; *$(CMD_rm) *-f *$($(1)_objdir)/testing.log;|testing.log;|;s|test $(1) had ERRORS\") *\&\&|test $(1) had ERRORS\" >> $($(1)_objdir)/testing.log) \&\&|' config/SCRAM/GMake/Makefile.rules; "
+ )
+ cmd += (
+ "PATH="
+ + TEST_PATH
+ + ":$PATH scram b -f -k -j "
+ + str(paralleJobs)
+ + " unittests "
+ + skiptests
+ + " >unitTests1.log 2>&1 ; "
+ )
+ cmd += "touch nodelete.done; ls -l nodelete.*"
+ print("unitTest> Going to run " + cmd)
ret = runCmd(cmd)
if ret != 0:
print("ERROR when running unit-tests: cmd returned " + str(ret))
@@ -141,16 +160,18 @@ def run(self):
pass
# noinspection PyBroadException
try:
- testLog = self.startDir + '/tmp/' + arch + '/src/'
- logFile = self.startDir + '/unitTests.log'
- runCmd('rm -f %s; touch %s' % (logFile, logFile))
- for packDir in glob.glob(testLog + '*/*'):
- pack = packDir.replace(testLog, '')
+ testLog = self.startDir + "/tmp/" + arch + "/src/"
+ logFile = self.startDir + "/unitTests.log"
+ runCmd("rm -f %s; touch %s" % (logFile, logFile))
+ for packDir in glob.glob(testLog + "*/*"):
+ pack = packDir.replace(testLog, "")
runCmd("echo '>> Entering Package %s' >> %s" % (pack, logFile))
- packDir += '/test'
+ packDir += "/test"
if os.path.exists(packDir):
- err, testFiles = run_cmd('find ' + packDir + ' -maxdepth 2 -mindepth 2 -name testing.log -type f')
- for lFile in testFiles.strip().split('\n'):
+ err, testFiles = run_cmd(
+ "find " + packDir + " -maxdepth 2 -mindepth 2 -name testing.log -type f"
+ )
+ for lFile in testFiles.strip().split("\n"):
if lFile:
runCmd("cat %s >> %s" % (lFile, logFile))
runCmd("echo '>> Leaving Package %s' >> %s" % (pack, logFile))
@@ -164,6 +185,7 @@ def run(self):
# ================================================================================
+
class LibDepsTester(IBThreadBase):
def __init__(self, startDirIn, Logger, deps=None):
if deps is None:
@@ -176,8 +198,17 @@ def __init__(self, startDirIn, Logger, deps=None):
def run(self):
IBThreadBase.run(self)
- cmd = 'cd ' + self.startDir + ' ; ' + scriptPath + '/checkLibDeps.py -d ' + os.environ[
- "CMSSW_RELEASE_BASE"] + ' --plat ' + os.environ['SCRAM_ARCH'] + ' > chkLibDeps.log 2>&1'
+ cmd = (
+ "cd "
+ + self.startDir
+ + " ; "
+ + scriptPath
+ + "/checkLibDeps.py -d "
+ + os.environ["CMSSW_RELEASE_BASE"]
+ + " --plat "
+ + os.environ["SCRAM_ARCH"]
+ + " > chkLibDeps.log 2>&1"
+ )
try:
ret = runCmd(cmd)
if ret != 0:
@@ -187,12 +218,13 @@ def run(self):
print(" cmd as of now : '" + cmd + "'")
self.logger.updateLogFile("chkLibDeps.log")
- self.logger.updateLogFile("libchk.pkl", 'new')
+ self.logger.updateLogFile("libchk.pkl", "new")
return
# ================================================================================
+
class DirSizeTester(IBThreadBase):
def __init__(self, startDirIn, Logger, deps=None):
if deps is None:
@@ -205,12 +237,16 @@ def __init__(self, startDirIn, Logger, deps=None):
def run(self):
IBThreadBase.run(self)
- cmd = 'cd ' + self.startDir + '; ' + scriptPath + '/checkDirSizes.py '
+ cmd = "cd " + self.startDir + "; " + scriptPath + "/checkDirSizes.py "
ret = runCmd(cmd)
if ret != 0:
print("ERROR when running DirSizeTester: cmd returned " + str(ret))
- cmd = 'cd ' + self.startDir + '; storeTreeInfo.py --checkDir src --outFile treeInfo-IBsrc.json '
+ cmd = (
+ "cd "
+ + self.startDir
+ + "; storeTreeInfo.py --checkDir src --outFile treeInfo-IBsrc.json "
+ )
ret = runCmd(cmd)
if ret != 0:
print("ERROR when running DirSizeTester: cmd returned " + str(ret))
@@ -221,6 +257,7 @@ def run(self):
# ================================================================================
+
class ReleaseProductsDump(IBThreadBase):
def __init__(self, startDirIn, Logger, deps=None):
IBThreadBase.__init__(self, deps)
@@ -231,22 +268,25 @@ def __init__(self, startDirIn, Logger, deps=None):
def run(self):
IBThreadBase.run(self)
- logDir = os.path.join(self.startDir, 'logs', os.environ['SCRAM_ARCH'])
+ logDir = os.path.join(self.startDir, "logs", os.environ["SCRAM_ARCH"])
if not os.path.exists(logDir):
os.makedirs(logDir)
- rperrFileName = os.path.join(logDir, 'relProducts.err')
+ rperrFileName = os.path.join(logDir, "relProducts.err")
- cmd = 'cd ' + self.startDir + '; RelProducts.pl > ReleaseProducts.list 2> ' + rperrFileName
+ cmd = (
+ "cd " + self.startDir + "; RelProducts.pl > ReleaseProducts.list 2> " + rperrFileName
+ )
ret = runCmd(cmd)
if ret != 0:
print("ERROR when running ReleaseProductsChecks: cmd returned " + str(ret))
self.logger.updateLogFile(self.startDir + "/ReleaseProducts.list")
- self.logger.updateLogFile(rperrFileName, "logs/" + os.environ['SCRAM_ARCH'])
+ self.logger.updateLogFile(rperrFileName, "logs/" + os.environ["SCRAM_ARCH"])
# ================================================================================
+
class BuildFileDependencyCheck(IBThreadBase):
def __init__(self, startDirIn, Logger, deps=None):
IBThreadBase.__init__(self, deps)
@@ -256,36 +296,46 @@ def __init__(self, startDirIn, Logger, deps=None):
def run(self):
IBThreadBase.run(self)
- logDir = os.path.join(self.startDir, 'logs', os.environ['SCRAM_ARCH'])
+ logDir = os.path.join(self.startDir, "logs", os.environ["SCRAM_ARCH"])
if not os.path.exists(logDir):
os.makedirs(logDir)
- dverrFileName = os.path.join(logDir, 'depsViolations.err')
+ dverrFileName = os.path.join(logDir, "depsViolations.err")
- depDir = os.path.join(self.startDir, 'etc/dependencies')
+ depDir = os.path.join(self.startDir, "etc/dependencies")
if not os.path.exists(depDir):
os.makedirs(depDir)
- depFile = os.path.join(depDir, 'depsViolations.txt')
-
- cmd = 'cd ' + self.startDir + '; ReleaseDepsChecks.pl --detail > ' + depFile + ' 2> ' + dverrFileName
+ depFile = os.path.join(depDir, "depsViolations.txt")
+
+ cmd = (
+ "cd "
+ + self.startDir
+ + "; ReleaseDepsChecks.pl --detail > "
+ + depFile
+ + " 2> "
+ + dverrFileName
+ )
ret = runCmd(cmd)
if ret != 0:
print("ERROR when running BuildFileDependencyCheck: cmd returned " + str(ret))
- cmd = 'cd ' + self.startDir + '; ' + scriptPath + '/splitDepViolationLog.py --log ' + depFile
+ cmd = (
+ "cd " + self.startDir + "; " + scriptPath + "/splitDepViolationLog.py --log " + depFile
+ )
ret = runCmd(cmd)
if ret != 0:
print("ERROR when running BuildFileDependencyCheck: cmd returned " + str(ret))
bdir = os.path.join(depDir, "depViolationLogs")
import fnmatch
+
for root, dirnames, filenames in os.walk(bdir):
- for filename in fnmatch.filter(filenames, 'depViolation.log'):
- pkg = "/".join(root.replace(bdir, "").split('/')[1:3])
+ for filename in fnmatch.filter(filenames, "depViolation.log"):
+ pkg = "/".join(root.replace(bdir, "").split("/")[1:3])
log = os.path.join(bdir, pkg, "log.txt")
runCmd("touch " + log + "; cat " + os.path.join(root, filename) + " >> " + log)
self.logger.updateLogFile(self.startDir + "/depViolationSummary.pkl", "testLogs")
- self.logger.updateLogFile(dverrFileName, "logs/" + os.environ['SCRAM_ARCH'])
+ self.logger.updateLogFile(dverrFileName, "logs/" + os.environ["SCRAM_ARCH"])
self.logger.updateLogFile(depFile, "etc/dependencies/")
self.logger.updateLogFile(bdir, "etc/dependencies/")
return
@@ -293,6 +343,7 @@ def run(self):
# ================================================================================
+
class CodeRulesChecker(IBThreadBase):
def __init__(self, startDirIn, Logger, deps=None):
IBThreadBase.__init__(self, deps)
@@ -302,12 +353,15 @@ def __init__(self, startDirIn, Logger, deps=None):
def run(self):
IBThreadBase.run(self)
- cmd = 'cd ' + self.startDir + '; rm -rf codeRules; mkdir codeRules; cd codeRules; '
- cmd += 'cmsCodeRulesChecker.py -r 1,2,3,4,5 -d ' + os.environ[
- 'CMSSW_RELEASE_BASE'] + '/src -S . -html 2>&1 >CodeRulesChecker.log ;'
+ cmd = "cd " + self.startDir + "; rm -rf codeRules; mkdir codeRules; cd codeRules; "
+ cmd += (
+ "cmsCodeRulesChecker.py -r 1,2,3,4,5 -d "
+ + os.environ["CMSSW_RELEASE_BASE"]
+ + "/src -S . -html 2>&1 >CodeRulesChecker.log ;"
+ )
cmd += "find . -name log.html -type f | xargs --no-run-if-empty sed -i -e 's|cmslxr.fnal.gov|cmssdt.cern.ch|'"
- print('CodeRulesChecker: in: ', os.getcwd())
- print(' ... going to execute:', cmd)
+ print("CodeRulesChecker: in: ", os.getcwd())
+ print(" ... going to execute:", cmd)
try:
ret = runCmd(cmd)
if ret != 0:
@@ -322,8 +376,8 @@ def run(self):
# ================================================================================
-class ReleaseTester(object):
+class ReleaseTester(object):
def __init__(self, releaseDir, dryRun=False):
self.dryRun = dryRun
self.plat = os.environ["SCRAM_ARCH"]
@@ -333,8 +387,10 @@ def __init__(self, releaseDir, dryRun=False):
self.relTag = self.release
self.threadList = {}
from cmsutils import getIBReleaseInfo
+
self.relCycle, day, hour = getIBReleaseInfo(self.release)
from logUpdater import LogUpdater
+
self.logger = LogUpdater(self.cmsswBuildDir, self.dryRun)
return
@@ -355,60 +411,60 @@ def doTest(self, only=None):
return
self.runProjectInit()
- if not only or 'dirsize' in only:
- print('\n' + 80 * '-' + ' dirsize \n')
- self.threadList['dirsize'] = self.runDirSize()
+ if not only or "dirsize" in only:
+ print("\n" + 80 * "-" + " dirsize \n")
+ self.threadList["dirsize"] = self.runDirSize()
- if not only or 'depViolation' in only:
- print('\n' + 80 * '-' + ' depViolation \n')
- self.threadList['depViolation'] = self.runBuildFileDeps()
+ if not only or "depViolation" in only:
+ print("\n" + 80 * "-" + " depViolation \n")
+ self.threadList["depViolation"] = self.runBuildFileDeps()
- if not only or 'relProducts' in only:
- print('\n' + 80 * '-' + ' relProducts \n')
- self.threadList['relProducts'] = self.runReleaseProducts()
+ if not only or "relProducts" in only:
+ print("\n" + 80 * "-" + " relProducts \n")
+ self.threadList["relProducts"] = self.runReleaseProducts()
- if not only or 'unit' in only:
- print('\n' + 80 * '-' + ' unit \n')
- self.threadList['unit'] = self.runUnitTests()
+ if not only or "unit" in only:
+ print("\n" + 80 * "-" + " unit \n")
+ self.threadList["unit"] = self.runUnitTests()
# We only want to explicitly run this test.
- if only and 'gpu_unit' in only:
- print('\n' + 80 * '-' + ' gpu_unit \n')
- self.threadList['gpu_unit'] = self.runUnitTests([], 'GPU')
+ if only and "gpu_unit" in only:
+ print("\n" + 80 * "-" + " gpu_unit \n")
+ self.threadList["gpu_unit"] = self.runUnitTests([], "GPU")
- if not only or 'codeRules' in only:
- print('\n' + 80 * '-' + ' codeRules \n')
- self.threadList['codeRules'] = self.runCodeRulesChecker()
+ if not only or "codeRules" in only:
+ print("\n" + 80 * "-" + " codeRules \n")
+ self.threadList["codeRules"] = self.runCodeRulesChecker()
- if not only or 'libcheck' in only:
- print('\n' + 80 * '-' + ' libcheck\n')
- self.threadList['libcheck'] = self.checkLibDeps()
+ if not only or "libcheck" in only:
+ print("\n" + 80 * "-" + " libcheck\n")
+ self.threadList["libcheck"] = self.checkLibDeps()
- if not only or 'pyConfigs' in only:
- print('\n' + 80 * '-' + ' pyConfigs \n')
+ if not only or "pyConfigs" in only:
+ print("\n" + 80 * "-" + " pyConfigs \n")
# noinspection PyNoneFunctionAssignment
- self.threadList['pyConfigs'] = self.checkPyConfigs()
+ self.threadList["pyConfigs"] = self.checkPyConfigs()
- if not only or 'dupDict' in only:
- print('\n' + 80 * '-' + ' dupDict \n')
+ if not only or "dupDict" in only:
+ print("\n" + 80 * "-" + " dupDict \n")
# noinspection PyNoneFunctionAssignment
- self.threadList['dupDict'] = self.runDuplicateDictCheck()
+ self.threadList["dupDict"] = self.runDuplicateDictCheck()
- print('TestWait> waiting for tests to finish ....')
+ print("TestWait> waiting for tests to finish ....")
for task in self.threadList:
if self.threadList[task]:
self.threadList[task].join()
- print('TestWait> Tests finished ')
+ print("TestWait> Tests finished ")
return
# --------------------------------------------------------------------------------
# noinspection PyUnusedLocal
def checkPyConfigs(self, deps=None):
print("Going to check python configs in ", os.getcwd())
- cmd = scriptPath + '/checkPyConfigs.py > chkPyConf.log 2>&1'
+ cmd = scriptPath + "/checkPyConfigs.py > chkPyConf.log 2>&1"
doCmd(cmd, self.dryRun, self.cmsswBuildDir)
self.logger.updateLogFile("chkPyConf.log")
- self.logger.updateLogFile("chkPyConf.log", 'testLogs')
+ self.logger.updateLogFile("chkPyConf.log", "testLogs")
return None
# --------------------------------------------------------------------------------
@@ -456,9 +512,9 @@ def runCodeRulesChecker(self, deps=None):
# noinspection PyUnusedLocal
def runDuplicateDictCheck(self, deps=None):
print("runDuplicateDictTests> Going to run duplicateReflexLibrarySearch.py ... ")
- script = 'export USER_SCRAM_TARGET=default ; eval $(scram run -sh) ; duplicateReflexLibrarySearch.py'
- for opt in ['dup', 'lostDefs', 'edmPD']:
- cmd = script + ' --' + opt + ' 2>&1 >dupDict-' + opt + '.log'
+ script = "export USER_SCRAM_TARGET=default ; eval $(scram run -sh) ; duplicateReflexLibrarySearch.py"
+ for opt in ["dup", "lostDefs", "edmPD"]:
+ cmd = script + " --" + opt + " 2>&1 >dupDict-" + opt + ".log"
try:
doCmd(cmd, self.dryRun, self.cmsswBuildDir)
except Exception as e:
@@ -521,6 +577,7 @@ def runBuildFileDeps(self, deps=None):
# ================================================================================
+
def main():
try:
import argparse
@@ -528,11 +585,11 @@ def main():
import archived_argparse as argparse
parser = argparse.ArgumentParser()
- parser.add_argument('--dryRun', default=False, action='store_true')
- parser.add_argument('--only')
+ parser.add_argument("--dryRun", default=False, action="store_true")
+ parser.add_argument("--only")
args = parser.parse_args()
- rel = os.environ.get('CMSSW_BASE')
+ rel = os.environ.get("CMSSW_BASE")
dryRun = args.dryRun
if args.only is not None:
only = args.only.split(",")
diff --git a/shift/libib.py b/shift/libib.py
index deba5a740a21..746e095c0126 100644
--- a/shift/libib.py
+++ b/shift/libib.py
@@ -201,9 +201,7 @@ def check_ib(data, compilation_only=False):
for itm in pkg_errors.items():
res[arch]["build"].append(
- LogEntry(
- name=pkg.name(), url=f"{url_prefix}/{pkg.name()}", data=itm
- )
+ LogEntry(name=pkg.name(), url=f"{url_prefix}/{pkg.name()}", data=itm)
)
if not compilation_only:
diff --git a/shift/report.py b/shift/report.py
index 2cabd319fa53..748fbddd85e7 100644
--- a/shift/report.py
+++ b/shift/report.py
@@ -9,9 +9,7 @@
# noinspection PyUnresolvedReferences
from libib import PackageInfo, ErrorInfo
-if sys.version_info.major < 3 or (
- sys.version_info.major == 3 and sys.version_info.minor < 6
-):
+if sys.version_info.major < 3 or (sys.version_info.major == 3 and sys.version_info.minor < 6):
print("This script requires Python 3.6 or newer!", file=sys.stderr)
exit(0)
@@ -67,14 +65,11 @@ def main():
file=f,
)
for error in errors[arch]["utest"]:
- print(
- f"| [{error.name}]({error.url}) | TBD | TBD |", file=f
- )
+ print(f"| [{error.name}]({error.url}) | TBD | TBD |", file=f)
for error in errors[arch]["relval"]:
print(
- f"| [{error.name}]({error.url}) | {error.data} | "
- f"TBD |",
+ f"| [{error.name}]({error.url}) | {error.data} | " f"TBD |",
file=f,
)
else:
diff --git a/shift/uniq-errors.py b/shift/uniq-errors.py
index 877d0afcdf56..f6945b8b769f 100644
--- a/shift/uniq-errors.py
+++ b/shift/uniq-errors.py
@@ -24,28 +24,20 @@ class CompError:
def main():
parser = argparse.ArgumentParser()
- parser.add_argument(
- "-a", "--architecture", help="Release architecture (e.g. el9_amd64_gcc13)"
- )
+ parser.add_argument("-a", "--architecture", help="Release architecture (e.g. el9_amd64_gcc13)")
parser.add_argument("-d", "--date", help="IB date")
parser.add_argument("-s", "--series", help="IB series (e.g. CMSSW_13_3_X)")
- parser.add_argument(
- "-f", "--filter", help="Only display errors containing given text"
- )
+ parser.add_argument("-f", "--filter", help="Only display errors containing given text")
args = parser.parse_args()
print(f"Getting IB data for {args.series} on {args.date}")
comp = libib.get_ib_comparision(args.date, args.series)[args.series]
if comp is None:
- print(
- f"No errors found for IB {args.series} on {args.date} arch {args.architecture}"
- )
+ print(f"No errors found for IB {args.series} on {args.date} arch {args.architecture}")
return
- print(
- f"Extracting build errors for {args.series} on {args.date} arch {args.architecture}"
- )
+ print(f"Extracting build errors for {args.series} on {args.date} arch {args.architecture}")
_, errors = libib.check_ib(comp, True)
errors = errors[args.architecture]["build"]
seen_errors = set()
diff --git a/show-ibs-schedule.py b/show-ibs-schedule.py
index 8478b85dc6e1..b29df70e8160 100755
--- a/show-ibs-schedule.py
+++ b/show-ibs-schedule.py
@@ -4,51 +4,58 @@
specs = get_config_map_properties({"DISABLED": "1"})
data = {}
days = range(7)
-hours = (0,11,23)
+hours = (0, 11, 23)
for day in days:
- data[day] = {}
- for hour in hours[1:]:
- data[day][hour] = []
-data[0]={0:[], 23:[]}
-data[6]={11:[]}
+ data[day] = {}
+ for hour in hours[1:]:
+ data[day][hour] = []
+data[0] = {0: [], 23: []}
+data[6] = {11: []}
dev_rel = []
for spec in specs:
- rel = "_".join(spec['CMSDIST_TAG'].split("/")[1].split("_")[:3])
- if ('BUILD_PATCH_RELEASE' in spec): dev_rel.append(rel)
- sel_days = days[:]
- sel_hours = hours[:]
- if 'BUILD_DAY' in spec:
- sel_days=[]
- for day in spec['BUILD_DAY'].split(","):
- try:
- day = int(day.strip())
- if not day in data: continue
- sel_days.append(day)
- except: pass
- if 'BUILD_HOUR' in spec:
- sel_hours = []
- for hour in spec['BUILD_HOUR'].split(","):
- try:
- hour = int(hour.strip())
- if not hour in hours: continue
- sel_hours.append(hour)
- except: pass
- for day in data.keys():
- if not day in sel_days: continue
- for hour in data[day].keys():
- if not hour in sel_hours: continue
- if (rel in dev_rel) or ((day==0) and (hour==0)):
- data[day][hour].append(spec)
- elif (not 0 in sel_days) or (not not 0 in sel_hours):
- data[day][hour].append(spec)
+ rel = "_".join(spec["CMSDIST_TAG"].split("/")[1].split("_")[:3])
+ if "BUILD_PATCH_RELEASE" in spec:
+ dev_rel.append(rel)
+ sel_days = days[:]
+ sel_hours = hours[:]
+ if "BUILD_DAY" in spec:
+ sel_days = []
+ for day in spec["BUILD_DAY"].split(","):
+ try:
+ day = int(day.strip())
+ if not day in data:
+ continue
+ sel_days.append(day)
+ except:
+ pass
+ if "BUILD_HOUR" in spec:
+ sel_hours = []
+ for hour in spec["BUILD_HOUR"].split(","):
+ try:
+ hour = int(hour.strip())
+ if not hour in hours:
+ continue
+ sel_hours.append(hour)
+ except:
+ pass
+ for day in data.keys():
+ if not day in sel_days:
+ continue
+ for hour in data[day].keys():
+ if not hour in sel_hours:
+ continue
+ if (rel in dev_rel) or ((day == 0) and (hour == 0)):
+ data[day][hour].append(spec)
+ elif (not 0 in sel_days) or (not not 0 in sel_hours):
+ data[day][hour].append(spec)
print("Day\tHour\tx86_64\tppc64le\taarch64")
for day in data.keys():
- for hour in data[day].keys():
- str = "%s\t%s\t" % (day, hour)
- cnt = {"amd64":0, "ppc64le":0, "aarch64":0}
- for spec in data[day][hour]:
- arch = spec['SCRAM_ARCH'].split("_")[1]
- cnt[arch]+=1
- str += "%s\t%s\t%s" % (cnt["amd64"], cnt["ppc64le"], cnt["aarch64"])
- print (str)
+ for hour in data[day].keys():
+ str = "%s\t%s\t" % (day, hour)
+ cnt = {"amd64": 0, "ppc64le": 0, "aarch64": 0}
+ for spec in data[day][hour]:
+ arch = spec["SCRAM_ARCH"].split("_")[1]
+ cnt[arch] += 1
+ str += "%s\t%s\t%s" % (cnt["amd64"], cnt["ppc64le"], cnt["aarch64"])
+ print(str)
diff --git a/splitDepViolationLog.py b/splitDepViolationLog.py
index a136ca230a34..8d96eb076e68 100755
--- a/splitDepViolationLog.py
+++ b/splitDepViolationLog.py
@@ -10,11 +10,10 @@
class DepViolSplitter(object):
def __init__(self, outFileIn=None, verbIn=False):
-
self.outFile = sys.stdout
if outFileIn:
print("Summary file:", outFileIn)
- self.outFile = open(outFileIn, 'w')
+ self.outFile = open(outFileIn, "w")
self.verbose = verbIn
@@ -31,20 +30,19 @@ def setVerbose(self, verbIn=False):
# --------------------------------------------------------------------------------
def split(self, logFile):
+ self.outFile.write("going to check " + logFile + "\n")
- self.outFile.write("going to check " + logFile + '\n')
-
- pkgStartRe = re.compile(r'^>> Checking dependency for (.*)\s*$')
- pkgEndRe = re.compile(r'^>> Done Checking dependency for (.*)\s*$')
+ pkgStartRe = re.compile(r"^>> Checking dependency for (.*)\s*$")
+ pkgEndRe = re.compile(r"^>> Done Checking dependency for (.*)\s*$")
- depViolRe = re.compile(r'\s*\*+ERROR: Dependency violation')
+ depViolRe = re.compile(r"\s*\*+ERROR: Dependency violation")
- logDirs = os.path.join(os.path.split(logFile)[0], 'depViolationLogs')
+ logDirs = os.path.join(os.path.split(logFile)[0], "depViolationLogs")
print("logDirs ", logDirs)
if not os.path.exists(logDirs):
os.makedirs(logDirs)
- lf = open(logFile, 'r')
+ lf = open(logFile, "r")
lines = lf
startTime = time.time()
@@ -58,7 +56,6 @@ def split(self, logFile):
actLogLines = []
startFound = False
for line in lines:
-
# write out log to individual log file ...
if startFound and ">> Done Checking dependency " not in line:
actLogLines.append(line)
@@ -78,7 +75,13 @@ def split(self, logFile):
if pkgEndMatch:
pkg = pkgEndMatch.group(1)
if actPkg != pkg:
- self.outFile.write("pkgEndMatch> package mismatch: pkg found " + pkg + ' actPkg=' + actPkg + '\n')
+ self.outFile.write(
+ "pkgEndMatch> package mismatch: pkg found "
+ + pkg
+ + " actPkg="
+ + actPkg
+ + "\n"
+ )
if len(actLogLines) > 2:
pkgViol[pkg] = len(depViolRe.findall("".join(actLogLines)))
@@ -88,7 +91,7 @@ def split(self, logFile):
os.makedirs(actLogDir)
# os.makedirs(actLogDir)
###############################################
- actLogFile = open(os.path.join(actLogDir, 'depViolation.log'), 'w')
+ actLogFile = open(os.path.join(actLogDir, "depViolation.log"), "w")
actLogFile.write("".join(actLogLines))
actLogFile.close()
actLogLines = []
@@ -97,17 +100,21 @@ def split(self, logFile):
stopTime = time.time()
lf.close()
- self.outFile.write("found a total of " + str(nLines) + ' lines in logfile.\n')
- self.outFile.write("analysis took " + str(stopTime - startTime) + ' sec.\n')
+ self.outFile.write("found a total of " + str(nLines) + " lines in logfile.\n")
+ self.outFile.write("analysis took " + str(stopTime - startTime) + " sec.\n")
- self.outFile.write("total number of packages with violations: " + str(len(list(pkgViol.keys()))) + '\n')
+ self.outFile.write(
+ "total number of packages with violations: " + str(len(list(pkgViol.keys()))) + "\n"
+ )
import pprint
+
pprint.pprint(pkgViol)
try:
from pickle import Pickler
- resFile = open('depViolationSummary.pkl', 'wb')
+
+ resFile = open("depViolationSummary.pkl", "wb")
pklr = Pickler(resFile, protocol=2)
pklr.dump(pkgViol)
resFile.close()
@@ -126,9 +133,9 @@ def main():
import archived_argparse as argparse
parser = argparse.ArgumentParser()
- parser.add_argument('-l', '--logFile', default=None, required=True)
- parser.add_argument('-v', '--verbose', action='store_true', default=False)
- parser.add_argument('-s', '--outFile', default=None)
+ parser.add_argument("-l", "--logFile", default=None, required=True)
+ parser.add_argument("-v", "--verbose", action="store_true", default=False)
+ parser.add_argument("-s", "--outFile", default=None)
args = parser.parse_args()
logFile = args.logFile
diff --git a/splitUnitTestLog.py b/splitUnitTestLog.py
index 09fe1779ce2c..34b7990efc24 100755
--- a/splitUnitTestLog.py
+++ b/splitUnitTestLog.py
@@ -10,11 +10,10 @@
class LogSplitter(object):
def __init__(self, outFileIn=None, verbIn=False):
-
self.outFile = sys.stdout
if outFileIn:
print("Summary file:", outFileIn)
- self.outFile = open(outFileIn, 'w')
+ self.outFile = open(outFileIn, "w")
self.verbose = verbIn
@@ -31,14 +30,13 @@ def setVerbose(self, verbIn=False):
# --------------------------------------------------------------------------------
def split(self, logFile):
+ self.outFile.write("going to check " + logFile + "\n")
- self.outFile.write("going to check " + logFile + '\n')
-
- subsysRe = re.compile('^>> Tests for package ([A-Z].*/[A-Z].*) ran.')
+ subsysRe = re.compile("^>> Tests for package ([A-Z].*/[A-Z].*) ran.")
- pkgTestStartRe = re.compile('^===== Test \"(.*)\" ====')
- pkgTestEndRe = re.compile(r'^\^\^\^\^ End Test (.*) \^\^\^\^')
- pkgTestResultRe = re.compile('.*---> test ([^ ]+) (had ERRORS|succeeded)')
+ pkgTestStartRe = re.compile('^===== Test "(.*)" ====')
+ pkgTestEndRe = re.compile(r"^\^\^\^\^ End Test (.*) \^\^\^\^")
+ pkgTestResultRe = re.compile(".*---> test ([^ ]+) (had ERRORS|succeeded)")
pkgStartRe = re.compile("^>> Entering Package (.*)")
# pkgEndRe = re.compile("^>> Leaving Package (.*)")
@@ -48,7 +46,7 @@ def split(self, logFile):
subsysPkgMap = {}
baseDir = os.path.split(logFile)[0]
- logDirs = os.path.join(baseDir, 'unitTestLogs')
+ logDirs = os.path.join(baseDir, "unitTestLogs")
print("logDirs ", logDirs)
if not os.path.exists(logDirs):
os.makedirs(logDirs)
@@ -85,7 +83,7 @@ def split(self, logFile):
actPkgLines += 1
subsysMatch = subsysRe.match(line)
if subsysMatch:
- subsys, pkg = subsysMatch.group(1).split('/')
+ subsys, pkg = subsysMatch.group(1).split("/")
if pkg not in pkgSubsysMap:
pkgSubsysMap[pkg] = subsys
if subsys in subsysPkgMap:
@@ -105,13 +103,19 @@ def split(self, logFile):
if pkgEndMatch:
pkg = pkgEndMatch.group(1)
if actPkg != pkg:
- self.outFile.write("pkgEndMatch> package mismatch: pkg found " + pkg + ' actPkg=' + actPkg + '\n')
+ self.outFile.write(
+ "pkgEndMatch> package mismatch: pkg found "
+ + pkg
+ + " actPkg="
+ + actPkg
+ + "\n"
+ )
pkgLines[pkg] = actPkgLines
if len(actLogLines) > 2:
actLogDir = os.path.join(logDirs, pkg)
os.makedirs(actLogDir)
- actLogFile = open(os.path.join(actLogDir, 'unitTest.log'), 'w')
+ actLogFile = open(os.path.join(actLogDir, "unitTest.log"), "w")
actLogFile.write("".join(actLogLines))
actLogFile.close()
actLogLines = []
@@ -140,26 +144,31 @@ def split(self, logFile):
tst = pkgTestEndMatch.group(1)
if actTest != tst:
self.outFile.write(
- "pkgTestEndMatch> test mismatch: tst found " + tst + ' actTest=' + actTest + '\n')
+ "pkgTestEndMatch> test mismatch: tst found "
+ + tst
+ + " actTest="
+ + actTest
+ + "\n"
+ )
testLines[tst] = actTstLines
stopTime = time.time()
lf.close()
- self.outFile.write("found a total of " + str(nLines) + ' lines in logfile.\n')
- self.outFile.write("analysis took " + str(stopTime - startTime) + ' sec.\n')
+ self.outFile.write("found a total of " + str(nLines) + " lines in logfile.\n")
+ self.outFile.write("analysis took " + str(stopTime - startTime) + " sec.\n")
- self.outFile.write("total number of tests: " + str(len(list(results.keys()))) + '\n')
+ self.outFile.write("total number of tests: " + str(len(list(results.keys()))) + "\n")
nMax = 1000
self.outFile.write("tests with more than " + str(nMax) + " lines of logs:\n")
for pkg, lines in list(testLines.items()):
if lines > nMax:
- self.outFile.write(" " + pkg + ' : ' + str(lines) + '\n')
+ self.outFile.write(" " + pkg + " : " + str(lines) + "\n")
self.outFile.write("Number of tests for packages: \n")
noTests = 0
nrTests = 0
- indent = ' '
+ indent = " "
totalOK = 0
totalFail = 0
unitTestResults = {}
@@ -169,35 +178,59 @@ def split(self, logFile):
else:
nrTests += 1
if self.verbose:
- self.outFile.write('-' * 80 + '\n')
- self.outFile.write(indent + pkg + ' : ')
+ self.outFile.write("-" * 80 + "\n")
+ self.outFile.write(indent + pkg + " : ")
nOK = 0
if self.verbose:
self.outFile.write("\n")
for tNam in testNames[pkg]:
- if results[tNam] == 'succeeded':
+ if results[tNam] == "succeeded":
nOK += 1
totalOK += 1
else:
totalFail += 1
if self.verbose:
- self.outFile.write(indent * 2 + tNam + ' ' + results[tNam] + '\n')
+ self.outFile.write(indent * 2 + tNam + " " + results[tNam] + "\n")
if self.verbose:
self.outFile.write(indent + pkg + " : ")
self.outFile.write(
- indent + str(len(testNames[pkg])) + ' tests in total, OK:' + str(nOK) + ' fail:' + str(
- len(testNames[pkg]) - nOK) + '\n')
+ indent
+ + str(len(testNames[pkg]))
+ + " tests in total, OK:"
+ + str(nOK)
+ + " fail:"
+ + str(len(testNames[pkg]) - nOK)
+ + "\n"
+ )
unitTestResults[pkg] = [testNames[pkg], nOK, len(testNames[pkg]) - nOK]
- self.outFile.write(indent + str(nrTests) + " packages with tests (" + str(
- float(nrTests) / float(len(list(pkgTests.keys())))) + ")\n")
- self.outFile.write(indent + str(noTests) + " packages without tests (" + str(
- float(noTests) / float(len(list(pkgTests.keys())))) + ")\n")
- self.outFile.write(indent + "in total: tests OK : " + str(totalOK) + ' tests FAIL : ' + str(totalFail) + '\n')
+ self.outFile.write(
+ indent
+ + str(nrTests)
+ + " packages with tests ("
+ + str(float(nrTests) / float(len(list(pkgTests.keys()))))
+ + ")\n"
+ )
+ self.outFile.write(
+ indent
+ + str(noTests)
+ + " packages without tests ("
+ + str(float(noTests) / float(len(list(pkgTests.keys()))))
+ + ")\n"
+ )
+ self.outFile.write(
+ indent
+ + "in total: tests OK : "
+ + str(totalOK)
+ + " tests FAIL : "
+ + str(totalFail)
+ + "\n"
+ )
try:
from pickle import Pickler
- resFile = open(baseDir + '/unitTestResults.pkl', 'wb')
+
+ resFile = open(baseDir + "/unitTestResults.pkl", "wb")
pklr = Pickler(resFile, protocol=2)
pklr.dump(unitTestResults)
pklr.dump(results)
@@ -211,6 +244,7 @@ def split(self, logFile):
# ================================================================================
+
def main():
try:
import argparse
@@ -218,9 +252,9 @@ def main():
import archived_argparse as argparse
parser = argparse.ArgumentParser()
- parser.add_argument('-l', '--logFile', dest='logFile', required=True)
- parser.add_argument('-v', '--verbose', default=False, action='store_true')
- parser.add_argument('-s', '--outFile', dest='outFile')
+ parser.add_argument("-l", "--logFile", dest="logFile", required=True)
+ parser.add_argument("-v", "--verbose", default=False, action="store_true")
+ parser.add_argument("-s", "--outFile", dest="outFile")
args = parser.parse_args()
logFile = args.logFile
diff --git a/tests/test_config-map.py b/tests/test_config-map.py
index c80aa74908d9..a8dd47628ef5 100755
--- a/tests/test_config-map.py
+++ b/tests/test_config-map.py
@@ -2,14 +2,14 @@
import re
-KEYS_RE="(CMS_BOT_BRANCH|CVMFS_INSTALL_IMAGE|DEBUG_EXTERNALS|SKIP_TESTS|REQUIRED_TEST|FORCE_FULL_IB|SLAVE_LABELS|SINGULARITY|IB_ONLY|BUILD_DAY|NO_IB|SCRAM_ARCH|RELEASE_QUEUE|BUILD_PATCH_RELEASE|PKGTOOLS_TAG|CMSDIST_TAG|RELEASE_BRANCH|ADDITIONAL_TESTS|PR_TESTS|DISABLED|ALWAYS_TAG_CMSSW|DO_STATIC_CHECKS|PROD_ARCH|ENABLE_DEBUG|PRS_TEST_CLANG|MESOS_QUEUE|DO_NOT_INSTALL|BUILD_HOUR|IB_WEB_PAGE|DOCKER_IMG|SPACK)"
+KEYS_RE = "(CMS_BOT_BRANCH|CVMFS_INSTALL_IMAGE|DEBUG_EXTERNALS|SKIP_TESTS|REQUIRED_TEST|FORCE_FULL_IB|SLAVE_LABELS|SINGULARITY|IB_ONLY|BUILD_DAY|NO_IB|SCRAM_ARCH|RELEASE_QUEUE|BUILD_PATCH_RELEASE|PKGTOOLS_TAG|CMSDIST_TAG|RELEASE_BRANCH|ADDITIONAL_TESTS|PR_TESTS|DISABLED|ALWAYS_TAG_CMSSW|DO_STATIC_CHECKS|PROD_ARCH|ENABLE_DEBUG|PRS_TEST_CLANG|MESOS_QUEUE|DO_NOT_INSTALL|BUILD_HOUR|IB_WEB_PAGE|DOCKER_IMG|SPACK)"
if __name__ == "__main__":
- for l in open("config.map").read().split("\n"):
- if not l:
- continue
- l = l.strip(";")
- for p in l.split(";"):
- assert("=" in p)
- (key, value) = p.split("=")
- assert(re.match(KEYS_RE, key))
+ for l in open("config.map").read().split("\n"):
+ if not l:
+ continue
+ l = l.strip(";")
+ for p in l.split(";"):
+ assert "=" in p
+ (key, value) = p.split("=")
+ assert re.match(KEYS_RE, key)
diff --git a/tests/test_logreaderUtils.py b/tests/test_logreaderUtils.py
index 34e54cfe330e..ef527ffaebb1 100644
--- a/tests/test_logreaderUtils.py
+++ b/tests/test_logreaderUtils.py
@@ -114,12 +114,19 @@
class TestSequenceFunctions(unittest.TestCase):
-
def test_unittestlogs(self):
config_list = []
custom_rule_set = [
- {"str_to_match": "test (.*) had ERRORS", "name": "{0}{1}{2} failed", "control_type": ResultTypeEnum.ISSUE},
- {"str_to_match": '===== Test "([^\s]+)" ====', "name": "{0}", "control_type": ResultTypeEnum.TEST}
+ {
+ "str_to_match": "test (.*) had ERRORS",
+ "name": "{0}{1}{2} failed",
+ "control_type": ResultTypeEnum.ISSUE,
+ },
+ {
+ "str_to_match": '===== Test "([^\s]+)" ====',
+ "name": "{0}",
+ "control_type": ResultTypeEnum.TEST,
+ },
]
for index, l in enumerate(unittestlog.split("\n")):
config_list = add_exception_to_config(l, index, config_list, custom_rule_set)
@@ -127,5 +134,5 @@ def test_unittestlogs(self):
print("Example config file in %s" % ("/tmp/unittestlogs.log-read_config"))
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/tests/test_watchers.py b/tests/test_watchers.py
index 965a4b0fce8c..3bcb87f2927c 100755
--- a/tests/test_watchers.py
+++ b/tests/test_watchers.py
@@ -2,55 +2,59 @@
from __future__ import print_function
import os
import sys
+
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from releases import *
from categories import *
import yaml
+
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
import re
+
# Validate the schema of watchers.
KEY_RE = "^[^@]+"
VALUE_RE = "[A-Za-z0-0.*+]"
w = yaml.load(open("watchers.yaml", "r"), Loader=Loader)
-assert(type(w) == dict)
-for (key, value) in w.items():
- assert(type(key) == str)
- assert(re.match(KEY_RE, key))
- assert(type(value) == list)
- for x in value:
- assert(type(x) == str)
- assert(re.match(VALUE_RE, x))
+assert type(w) == dict
+for key, value in w.items():
+ assert type(key) == str
+ assert re.match(KEY_RE, key)
+ assert type(value) == list
+ for x in value:
+ assert type(x) == str
+ assert re.match(VALUE_RE, x)
-assert(CMSSW_CATEGORIES)
-assert(type(CMSSW_CATEGORIES) == dict)
+assert CMSSW_CATEGORIES
+assert type(CMSSW_CATEGORIES) == dict
PACKAGE_RE = "^([A-Z][0-9A-Za-z]*(/[a-zA-Z][0-9A-Za-z]*|)|.gitignore|pull_request_template.md|.clang-[^/]+)$"
-for (key, value) in CMSSW_CATEGORIES.items():
- assert(type(key) == str)
- assert(type(value) == list)
- if len(value)==0:continue
- if key == "externals":
- assert(len(value)>0)
- continue
- for p in value:
- print("checking", p)
- assert(type(p) == str)
- assert(re.match(PACKAGE_RE, p))
+for key, value in CMSSW_CATEGORIES.items():
+ assert type(key) == str
+ assert type(value) == list
+ if len(value) == 0:
+ continue
+ if key == "externals":
+ assert len(value) > 0
+ continue
+ for p in value:
+ print("checking", p)
+ assert type(p) == str
+ assert re.match(PACKAGE_RE, p)
if os.path.exists("super-users.yaml"):
- w = yaml.load(open("super-users.yaml", "r"), Loader=Loader)
- assert(type(w) == list)
- for p in w:
- assert(type(p) == str)
- assert(re.match(KEY_RE, p))
+ w = yaml.load(open("super-users.yaml", "r"), Loader=Loader)
+ assert type(w) == list
+ for p in w:
+ assert type(p) == str
+ assert re.match(KEY_RE, p)
print("Finished with success")
diff --git a/trigger_jenkins_job.py b/trigger_jenkins_job.py
index ee459abd5c94..ed32a39f84fb 100755
--- a/trigger_jenkins_job.py
+++ b/trigger_jenkins_job.py
@@ -3,21 +3,41 @@
from jenkins_callback import build_jobs
import json
+
def process(opts):
- xparam = []
- for param in opts.params:
- p,v=param.split("=",1)
- xparam.append({"name":p,"value":v})
- build_jobs(opts.server, [(json.dumps({"parameter":xparam}),opts.job)], headers={}, user=opts.user)
+ xparam = []
+ for param in opts.params:
+ p, v = param.split("=", 1)
+ xparam.append({"name": p, "value": v})
+ build_jobs(
+ opts.server, [(json.dumps({"parameter": xparam}), opts.job)], headers={}, user=opts.user
+ )
+
if __name__ == "__main__":
- parser = OptionParser(usage="%prog")
- parser.add_option("-j", "--job", dest="job", help="Jenkins jobs to trigger", default=None)
- parser.add_option("-s", "--server", dest="server", help="Jenkins server URL e.g. https://cmssdt.cern.ch/cms-jenkins", default=None)
- parser.add_option("-u", "--user", dest="user", help="Jenkins user name to trigger the job", default="cmssdt")
- parser.add_option('-p', '--parameter', dest='params', help="Job parameter e.g. -p Param=Value. One can use this multiple times.",
- action="append", type="string", metavar="PARAMETERS")
- opts, args = parser.parse_args()
+ parser = OptionParser(usage="%prog")
+ parser.add_option("-j", "--job", dest="job", help="Jenkins jobs to trigger", default=None)
+ parser.add_option(
+ "-s",
+ "--server",
+ dest="server",
+ help="Jenkins server URL e.g. https://cmssdt.cern.ch/cms-jenkins",
+ default=None,
+ )
+ parser.add_option(
+ "-u", "--user", dest="user", help="Jenkins user name to trigger the job", default="cmssdt"
+ )
+ parser.add_option(
+ "-p",
+ "--parameter",
+ dest="params",
+ help="Job parameter e.g. -p Param=Value. One can use this multiple times.",
+ action="append",
+ type="string",
+ metavar="PARAMETERS",
+ )
+ opts, args = parser.parse_args()
- if (not opts.job) or (not opts.server): parser.error("Missing job/server parameter.")
- process(opts)
+ if (not opts.job) or (not opts.server):
+ parser.error("Missing job/server parameter.")
+ process(opts)
diff --git a/utils/cmsdist_pip_pkgs_update.py b/utils/cmsdist_pip_pkgs_update.py
index c5eff938f665..6a33aa35e776 100755
--- a/utils/cmsdist_pip_pkgs_update.py
+++ b/utils/cmsdist_pip_pkgs_update.py
@@ -1,159 +1,232 @@
#!/usr/bin/env python
from __future__ import print_function
-import sys,re, json, os
+import sys, re, json, os
import subprocess
from os.path import exists, join
+
def check_python_require(py_str, condition):
- if not condition: return True
- if 'py3' in condition: return py_str.startswith('3.')
- py_version = list(map(int,py_str.split('.')))
- for cond in condition.split(","):
- m = re.match('^(.*?)([0-9].*)', cond.replace(" ",""))
- if m:
- op = m.group(1)
- regex = False
- req = m.group(2).split('.')
- if op=='':
- op = '=='
- req.append('*')
- while req[-1]=="*":
- req.pop()
- regex = True
- if regex:
- req_str = '^'+'.'.join(req)+'\..+$'
- if op == '==':
- if not re.match(req_str, py_str): return False
- elif op == '!=':
- if re.match(req_str, py_str): return False
- try:
- req = list(map(int,req))
- except:
- if "'" in req: continue
- #print(py_str,"A", condition,"B",req)
- #raise
- if op == '>':
- if py_version<=req: return False
- elif op == '>=':
- if py_version=req: return False
- elif op == '<=':
- if py_version>req: return False
- return True
+ if not condition:
+ return True
+ if "py3" in condition:
+ return py_str.startswith("3.")
+ py_version = list(map(int, py_str.split(".")))
+ for cond in condition.split(","):
+ m = re.match("^(.*?)([0-9].*)", cond.replace(" ", ""))
+ if m:
+ op = m.group(1)
+ regex = False
+ req = m.group(2).split(".")
+ if op == "":
+ op = "=="
+ req.append("*")
+ while req[-1] == "*":
+ req.pop()
+ regex = True
+ if regex:
+ req_str = "^" + ".".join(req) + "\..+$"
+ if op == "==":
+ if not re.match(req_str, py_str):
+ return False
+ elif op == "!=":
+ if re.match(req_str, py_str):
+ return False
+ try:
+ req = list(map(int, req))
+ except:
+ if "'" in req:
+ continue
+ # print(py_str,"A", condition,"B",req)
+ # raise
+ if op == ">":
+ if py_version <= req:
+ return False
+ elif op == ">=":
+ if py_version < req:
+ return False
+ elif op == "<":
+ if py_version >= req:
+ return False
+ elif op == "<=":
+ if py_version > req:
+ return False
+ return True
+
def requirements_file(cmsdist):
- return join(cmsdist, "pip", "requirements.txt")
+ return join(cmsdist, "pip", "requirements.txt")
+
def read_requirements(cmsdist):
- print("Reading requirements ...")
- req_file = requirements_file(cmsdist)
- req_data = []
- proc = subprocess.Popen('grep "^### RPM" %s/python3.spec | sed "s|^.* python3 *||"' % (cmsdist), stdout=subprocess.PIPE,shell=True, universal_newlines=True)
- py3_version = proc.stdout.read().strip()
- print(" Python3:", py3_version)
- if exists(req_file):
- with open(req_file) as ref:
- for line in ref.readlines():
- req_data.append({'line': line.strip(), 'data': {}})
- line=line.strip().replace(" ","")
- if line.startswith("#"): continue
- if "==" in line:
- p,v = line.split("==",1)
- req_data[-1]['data']['name'] = p
- req_data[-1]['data']['pip_name'] = p
- req_data[-1]['data']['version'] = v
- req_data[-1]['data']['python']=py3_version
- exfile = join(cmsdist, "pip",p+".file")
- if exists (exfile):
- with open(exfile) as xref:
- for xline in xref.readlines():
- m = re.match("^%define\s+pip_name\s+([^\s]+)\s*$",xline.strip())
- if m:
- req_data[-1]['data']['pip_name'] = m.group(1)
- break
- return req_data
+ print("Reading requirements ...")
+ req_file = requirements_file(cmsdist)
+ req_data = []
+ proc = subprocess.Popen(
+ 'grep "^### RPM" %s/python3.spec | sed "s|^.* python3 *||"' % (cmsdist),
+ stdout=subprocess.PIPE,
+ shell=True,
+ universal_newlines=True,
+ )
+ py3_version = proc.stdout.read().strip()
+ print(" Python3:", py3_version)
+ if exists(req_file):
+ with open(req_file) as ref:
+ for line in ref.readlines():
+ req_data.append({"line": line.strip(), "data": {}})
+ line = line.strip().replace(" ", "")
+ if line.startswith("#"):
+ continue
+ if "==" in line:
+ p, v = line.split("==", 1)
+ req_data[-1]["data"]["name"] = p
+ req_data[-1]["data"]["pip_name"] = p
+ req_data[-1]["data"]["version"] = v
+ req_data[-1]["data"]["python"] = py3_version
+ exfile = join(cmsdist, "pip", p + ".file")
+ if exists(exfile):
+ with open(exfile) as xref:
+ for xline in xref.readlines():
+ m = re.match("^%define\s+pip_name\s+([^\s]+)\s*$", xline.strip())
+ if m:
+ req_data[-1]["data"]["pip_name"] = m.group(1)
+ break
+ return req_data
+
def check_updates(req_data):
- from datetime import datetime
- epoch = datetime.utcfromtimestamp(0)
- ignore_line = []
- ignored = []
- ignore_count = 0
- if not exists('cache'): os.system("mkdir -p cache")
- print("Checking for updates ...")
- for data in req_data:
- xline = data['line'].replace(" ","")
- if xline=="": continue
- if xline.startswith('#'):
- m = re.match('#NO_AUTO_UPDATE:((\d+):|).*', xline)
- if m:
- try: ignore_count = int(m.group(2))
- except: ignore_count = 1
- ignore_line = [data['line']]
- elif ignore_count: ignore_line.append(" "+data['line'])
- continue
- p = data['data']['name']
- op = data['data']['pip_name']
- ov = data['data']['version']
- if exists("cache/%s.json" % p):
- jdata = json.load(open("cache/%s.json" % p))
- else:
- o = subprocess.Popen('curl -s -k -L https://pypi.python.org/pypi/%s/json' %(op), stdout=subprocess.PIPE,shell=True, universal_newlines=True)
- jdata = json.loads(o.stdout.read())
- json.dump(jdata, open("cache/%s.json" % p, 'w'), sort_keys=True, indent=2)
- if True:
- v = jdata['info']['version']
- if ignore_count:
- ignore_count-=1
- if ov!=v:
- ignored.append("*** WARNING: %s: Newer version %s found (existing: %s) but not updating due to following comment in requitements.txt." % (p, v, ov))
- if ignore_line: ignored.append(" %s" % ("\n".join(ignore_line)))
- ignore_line= []
- continue
- if 'python' in data['data']:
- py_ver = data['data']['python']
- #FIXME: Ignore python version check
- if False and not check_python_require(py_ver, jdata['info']['requires_python']):
- releases = []
- msg = []
- for i in jdata['releases']:
- for d in jdata['releases'][i]:
- if d['python_version']!='source': continue
- if not check_python_require(py_ver, d['requires_python']):
- msg.append(" INFO: %s: Ignoring version %s due to python requirement: %s%s" % (p,i,py_ver,d['requires_python']))
+ from datetime import datetime
+
+ epoch = datetime.utcfromtimestamp(0)
+ ignore_line = []
+ ignored = []
+ ignore_count = 0
+ if not exists("cache"):
+ os.system("mkdir -p cache")
+ print("Checking for updates ...")
+ for data in req_data:
+ xline = data["line"].replace(" ", "")
+ if xline == "":
+ continue
+ if xline.startswith("#"):
+ m = re.match("#NO_AUTO_UPDATE:((\d+):|).*", xline)
+ if m:
+ try:
+ ignore_count = int(m.group(2))
+ except:
+ ignore_count = 1
+ ignore_line = [data["line"]]
+ elif ignore_count:
+ ignore_line.append(" " + data["line"])
+ continue
+ p = data["data"]["name"]
+ op = data["data"]["pip_name"]
+ ov = data["data"]["version"]
+ if exists("cache/%s.json" % p):
+ jdata = json.load(open("cache/%s.json" % p))
+ else:
+ o = subprocess.Popen(
+ "curl -s -k -L https://pypi.python.org/pypi/%s/json" % (op),
+ stdout=subprocess.PIPE,
+ shell=True,
+ universal_newlines=True,
+ )
+ jdata = json.loads(o.stdout.read())
+ json.dump(jdata, open("cache/%s.json" % p, "w"), sort_keys=True, indent=2)
+ if True:
+ v = jdata["info"]["version"]
+ if ignore_count:
+ ignore_count -= 1
+ if ov != v:
+ ignored.append(
+ "*** WARNING: %s: Newer version %s found (existing: %s) but not updating due to following comment in requitements.txt."
+ % (p, v, ov)
+ )
+ if ignore_line:
+ ignored.append(" %s" % ("\n".join(ignore_line)))
+ ignore_line = []
+ continue
+ if "python" in data["data"]:
+ py_ver = data["data"]["python"]
+ # FIXME: Ignore python version check
+ if False and not check_python_require(py_ver, jdata["info"]["requires_python"]):
+ releases = []
+ msg = []
+ for i in jdata["releases"]:
+ for d in jdata["releases"][i]:
+ if d["python_version"] != "source":
+ continue
+ if not check_python_require(py_ver, d["requires_python"]):
+ msg.append(
+ " INFO: %s: Ignoring version %s due to python requirement: %s%s"
+ % (p, i, py_ver, d["requires_python"])
+ )
+ continue
+ uptime = (
+ datetime.strptime(d["upload_time"], "%Y-%m-%dT%H:%M:%S") - epoch
+ ).total_seconds()
+ releases.append(
+ {
+ "version": i,
+ "upload": uptime,
+ "requires_python": d["requires_python"],
+ }
+ )
+ msg.append(
+ " INFO: %s: Matched version %s due to python requirement: %s %s"
+ % (p, i, py_ver, d["requires_python"])
+ )
+ newlist = sorted(releases, key=lambda k: k["upload"])
+ if newlist:
+ v = newlist[-1]["version"]
+ if ov != v:
+ for m in msg:
+ print(m)
+ if ov == v:
continue
- uptime = (datetime.strptime(d['upload_time'],'%Y-%m-%dT%H:%M:%S')-epoch).total_seconds()
- releases.append({'version':i, 'upload': uptime, 'requires_python': d['requires_python']})
- msg.append(" INFO: %s: Matched version %s due to python requirement: %s %s" % (p,i,py_ver,d['requires_python']))
- newlist = sorted(releases, key=lambda k: k['upload'])
- if newlist: v = newlist[-1]['version']
- if ov != v:
- for m in msg: print(m)
- if ov==v: continue
- m = re.match('^\s*%s\s*==\s*%s(\s*;.+|)$' % (p,ov),data['line'])
- try:
- data['line'] = '%s==%s%s' % (p,v,m.group(1))
- print("NEW:",p,ov,v)
- except:
- print("Wrong data:",p,ov,v)
- for i in ignored: print (i)
+ m = re.match("^\s*%s\s*==\s*%s(\s*;.+|)$" % (p, ov), data["line"])
+ try:
+ data["line"] = "%s==%s%s" % (p, v, m.group(1))
+ print("NEW:", p, ov, v)
+ except:
+ print("Wrong data:", p, ov, v)
+ for i in ignored:
+ print(i)
+
def rewrite_requiremets(red_data, cmsdist):
- req_file = requirements_file(cmsdist)
- with open(req_file, "w") as ref:
- for d in req_data:
- ref.write(d['line']+"\n")
+ req_file = requirements_file(cmsdist)
+ with open(req_file, "w") as ref:
+ for d in req_data:
+ ref.write(d["line"] + "\n")
+
if __name__ == "__main__":
- from optparse import OptionParser
- parser = OptionParser(usage="%prog")
- parser.add_option("-C", "--clean-cache",dest="clean_cache",action="store_true", help="Cleanup cache directory and re-check PyPI for updates.", default=False)
- parser.add_option("-u", "--update", dest="update", action="store_true", help="Update requirements.txt", default=False)
- parser.add_option("-c", "--cmsdist", dest="cmsdist", help="cmsdist directory", type=str, default="cmsdist")
- opts, args = parser.parse_args()
- if opts.clean_cache: os.system("rm -rf cache")
- req_data = read_requirements(opts.cmsdist)
- check_updates(req_data)
- if opts.update: rewrite_requiremets(req_data, opts.cmsdist)
+ from optparse import OptionParser
+ parser = OptionParser(usage="%prog")
+ parser.add_option(
+ "-C",
+ "--clean-cache",
+ dest="clean_cache",
+ action="store_true",
+ help="Cleanup cache directory and re-check PyPI for updates.",
+ default=False,
+ )
+ parser.add_option(
+ "-u",
+ "--update",
+ dest="update",
+ action="store_true",
+ help="Update requirements.txt",
+ default=False,
+ )
+ parser.add_option(
+ "-c", "--cmsdist", dest="cmsdist", help="cmsdist directory", type=str, default="cmsdist"
+ )
+ opts, args = parser.parse_args()
+ if opts.clean_cache:
+ os.system("rm -rf cache")
+ req_data = read_requirements(opts.cmsdist)
+ check_updates(req_data)
+ if opts.update:
+ rewrite_requiremets(req_data, opts.cmsdist)