diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4dedda6f..d7adca11 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [2.7, 3.8] + python-version: [3.8] # Steps represent a sequence of tasks that will be executed as part of the job steps: diff --git a/CMakeLists.txt b/CMakeLists.txt index 63c310a5..f459663f 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -58,6 +58,7 @@ install(TARGETS xpedite-pic DESTINATION "lib" COMPONENT libraries) ######################### Python bindings ############################# +set(PYBIND11_PYTHON_VERSION 3.8 CACHE STRING "") find_package(pybind11 REQUIRED) pybind11_add_module(xpediteBindings lib/xpedite/pybind/Bindings.cpp lib/xpedite/framework/SamplesLoader.C) install(TARGETS xpediteBindings DESTINATION "lib" COMPONENT libraries) diff --git a/build.sh b/build.sh index 2fb548f9..66d2c3a2 100755 --- a/build.sh +++ b/build.sh @@ -33,7 +33,7 @@ BUILD_TYPE=Release BUILD_VIVIFY=0 BUILD_JAVA=0 VERBOSE=0 -PYTHON_VERSION=2.7 +PYTHON_VERSION=3.8 while true ; do case "$1" in diff --git a/demo/Demo.H b/demo/Demo.H index 1fa80a43..72f8f383 100644 --- a/demo/Demo.H +++ b/demo/Demo.H @@ -30,9 +30,6 @@ namespace xpedite { namespace demo { using Indices = std::array; inline void initialize(size_t core_ = 0) { - if(mlockall(MCL_CURRENT | MCL_FUTURE)) { - std::cerr << "failed to mlock all pages" << std::endl; - } using namespace xpedite::framework; if(!xpedite::framework::initialize("xpedite-appinfo.txt", {AWAIT_PROFILE_BEGIN})) { throw std::runtime_error {"failed to init xpedite"}; diff --git a/include/xpedite/probes/CallSite.H b/include/xpedite/probes/CallSite.H index 89547310..1e18896e 100644 --- a/include/xpedite/probes/CallSite.H +++ b/include/xpedite/probes/CallSite.H @@ -12,6 +12,7 @@ #pragma once #include +#include namespace xpedite { namespace probes { diff --git a/include/xpedite/probes/ProbeKey.H b/include/xpedite/probes/ProbeKey.H index ff24d58a..357d3062 100644 --- a/include/xpedite/probes/ProbeKey.H +++ b/include/xpedite/probes/ProbeKey.H @@ -8,6 +8,7 @@ #pragma once #include +#include namespace xpedite { namespace probes { diff --git a/install.sh b/install.sh index 60485a52..25e6cd71 100755 --- a/install.sh +++ b/install.sh @@ -31,7 +31,7 @@ fi eval set -- "$ARGS" ENABLE_PMU=0 VERBOSE=0 -PYTHON_VERSION=2 +PYTHON_VERSION=3 while true ; do case "$1" in diff --git a/jni/build.gradle b/jni/build.gradle index 76de4d83..e23d8723 100644 --- a/jni/build.gradle +++ b/jni/build.gradle @@ -25,7 +25,7 @@ sourceSets { } task xpediteJar(type: Jar) { - baseName = 'xpedite' + archiveBaseName = 'xpedite' exclude(['**/demo/**']) manifest { attributes( @@ -42,7 +42,7 @@ task xpediteJar(type: Jar) { task demoJar(type: Jar) { include(['**/demo/**']) exclude(['**/com/xpedite/probes']) - baseName = 'xpediteDemo' + archiveBaseName = 'xpediteDemo' manifest { attributes( 'Implementation-Title': 'Xpedite Demo Jar', diff --git a/lib/xpedite/framework/StorageMgr.H b/lib/xpedite/framework/StorageMgr.H index e940d811..8288535c 100644 --- a/lib/xpedite/framework/StorageMgr.H +++ b/lib/xpedite/framework/StorageMgr.H @@ -11,6 +11,7 @@ #pragma once #include +#include namespace xpedite { namespace framework { diff --git a/scripts/.pylintrc b/scripts/.pylintrc old mode 100755 new mode 100644 index e46b3cc0..996c4f7f --- a/scripts/.pylintrc +++ b/scripts/.pylintrc @@ -1,4 +1,4 @@ -[MASTER] +[MAIN] ############## xpedite customizations ############## @@ -32,349 +32,730 @@ ignored-modules=datetime,jnius,cStringIO,itertools,gevent,select,numpy,pygments. # List of classes names for which member attributes should not be checked # (useful for classes with attributes dynamically set). This supports can work # with qualified names. -ignored-classes=Connection,_socketobject +ignored-classes=Connection,_socketobject,optparse.Values,thread._local,_thread._local,argparse.Namespace #################################################### -# Specify a configuration file. -#rcfile= +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +#ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +#ignored-modules= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= -# Pickle collected data for later comparisons. -persistent=yes +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 -# List of plugins (as comma separated values of python modules names) to load, +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, # usually to register additional checkers. load-plugins= -# Use multiple processes to speed up Pylint. -jobs=1 +# Pickle collected data for later comparisons. +persistent=yes + +# Resolve imports to .pyi stubs if available. May reduce no-member messages and +# increase not-an-iterable messages. +prefer-stubs=no + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.12 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. -optimize-ast=no +[BASIC] -[MESSAGES CONTROL] +# Naming style matching correct argument names. +argument-naming-style=snake_case -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. -#enable= +# Naming style matching correct attribute names. +attr-naming-style=snake_case -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -# Python 3 specific disable - all disables following 'bad-option-value' -# are disabled to keep code compatible with both python2 and python3 -disable=import-star-module-level,old-octal-literal,oct-method,no-member,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,duplicate-code,range-builtin-not-iterating,bad-continuation,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,broad-except,wrong-import-order,hex-method,nonzero-method,ungrouped-imports,map-builtin-not-iterating,chained-comparison,bad-option-value,useless-object-inheritance,import-outside-toplevel,invalid-name,raise-missing-from,deprecated-method,consider-using-dict-items,super-with-arguments,unspecified-encoding,consider-using-f-string +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata -[REPORTS] +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=colorized +# Naming style matching correct class attribute names. +class-attribute-naming-style=any -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= -# Tells whether to display a full report or only the messages -reports=yes +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= +# Naming style matching correct class names. +class-naming-style=PascalCase +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= -[BASIC] +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,input +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= # Colon-delimited sets of names that determine each other's naming style when # the name regexes allow several styles. name-group= -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,30}$ +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +#typealias-rgx= -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ +# Naming style matching correct variable names. +variable-naming-style=snake_case -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ +[CLASSES] -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ +[DESIGN] -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= -[ELIF] +# Maximum number of arguments for function / method. +max-args=5 -# Maximum number of nested blocks for function / method body -max-nested-blocks=7 +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception [FORMAT] -# Maximum number of characters on a single line. -max-line-length=120 +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )??$ +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=2 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=" " + +# Maximum number of characters on a single line. +max-line-length=160 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + # Allow the body of an if to be on the same line as the test if there is no # else. single-line-if-stmt=no -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator -# Maximum number of lines in a module -max-module-lines=2000 +[IMPORTS] -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=2 +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= [LOGGING] +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + # Logging modules to check that the string format arguments are in logging -# function parameter format +# function parameter format. logging-modules=logging +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + import-star-module-level, + old-octal-literal, + oct-method, + no-member, + print-statement, + unpacking-in-except, + parameter-unpacking, + backtick, + old-raise-syntax, + old-ne-operator, + long-suffix, + dict-view-method, + dict-iter-method, + metaclass-assignment, + next-method-called, + raising-string, + indexing-exception, + raw_input-builtin, + long-builtin, + file-builtin, + execfile-builtin, + coerce-builtin, + cmp-builtin, + buffer-builtin, + basestring-builtin, + apply-builtin, + filter-builtin-not-iterating, + using-cmp-argument, + useless-suppression, + duplicate-code, + range-builtin-not-iterating, + bad-continuation, + suppressed-message, + no-absolute-import, + old-division, + cmp-method, + reload-builtin, + zip-builtin-not-iterating, + intern-builtin, + unichr-builtin, + reduce-builtin, + standarderror-builtin, + unicode-builtin, + xrange-builtin, + coerce-method, + delslice-method, + getslice-method, + setslice-method, + input-builtin, + round-builtin, + broad-except, + wrong-import-order, + hex-method, + nonzero-method, + ungrouped-imports, + map-builtin-not-iterating, + chained-comparison, + bad-option-value, + useless-object-inheritance, + import-outside-toplevel, + invalid-name, + raise-missing-from, + deprecated-method, + consider-using-dict-items, + super-with-arguments, + unspecified-encoding, + consider-using-f-string, + broad-exception-raised, + too-few-public-methods, + too-many-instance-attributes, + too-many-arguments, + too-many-locals, + too-many-branches, + too-many-statements, + too-many-nested-blocks, + too-many-public-methods, + consider-using-generator, + superfluous-parens, + unnecessary-list-index-lookup, + use-yield-from, + deprecated-module + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO +notes=FIXME, + XXX, + TODO +# Regular expression of note tags to take in consideration. +notes-rgx= -[SIMILARITIES] -# Minimum lines number of a similarity. -min-similarity-lines=50 +[REFACTORING] -# Ignore comments when computing similarities. -ignore-comments=yes +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 -# Ignore docstrings when computing similarities. -ignore-docstrings=yes +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error -# Ignore imports when computing similarities. -ignore-imports=no +# Let 'consider-using-join' be raised when the separator to join on would be +# non-empty (resulting in expected fixes of the type: ``"- " + " - +# ".join(items)``) +suggest-join-with-non-empty-separator=yes -[SPELLING] +[REPORTS] -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) -# List of comma separated words that should not be checked. -spelling-ignore-words= +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no +# Tells whether to display a full report or only the messages. +reports=no +# Activate the evaluation score. +score=yes -[TYPECHECK] -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes +[SIMILARITIES] -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= +# Comments are removed from the similarity computation +ignore-comments=yes +# Docstrings are removed from the similarity computation +ignore-docstrings=yes -[VARIABLES] +# Imports are removed from the similarity computation +ignore-imports=yes -# Tells whether we should check for unused import in __init__ files. -init-import=no +# Signatures are removed from the similarity computation +ignore-signatures=yes -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_$|dummy +# Minimum lines number of a similarity. +min-similarity-lines=4 -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb +[SPELLING] +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 -[CLASSES] +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls +# List of comma separated words that should not be checked. +spelling-ignore-words= -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no -[DESIGN] +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no -# Maximum number of arguments for function / method -max-args=25 +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* -# Maximum number of locals for function / method body -max-locals=25 +[TYPECHECK] -# Maximum number of return / yield for function / method body -max-returns=28 +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager -# Maximum number of branch for function / method body -max-branches=36 +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= -# Maximum number of statements in function / method body -max-statements=104 +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes -# Maximum number of parents for a class (see R0901). -max-parents=7 +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes -# Maximum number of attributes for a class (see R0902). -max-attributes=50 +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init -# Minimum number of public methods for a class (see R0903). -min-public-methods=0 +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace -# Maximum number of public methods for a class (see R0904). -max-public-methods=38 +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 -[IMPORTS] +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec +# List of decorators that change the signature of a decorated function. +signature-mutators= -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= +[VARIABLES] -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes -[EXCEPTIONS] +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/scripts/bin/xpedite b/scripts/bin/xpedite index 30ade75c..97e6a5e9 100755 --- a/scripts/bin/xpedite +++ b/scripts/bin/xpedite @@ -26,8 +26,8 @@ logger.init() from xpedite.dependencies import Package, DEPENDENCY_LOADER DEPENDENCY_LOADER.load(Package.Cement) -from cement.core.foundation import CementApp -from cement.ext.ext_argparse import ArgparseController, expose +from cement import App, CaughtSignal, Controller, ex + import logging import xpedite @@ -54,15 +54,16 @@ def _loadDriver(name = None): return Driver() return driver -class AppController(ArgparseController): + +class AppController(Controller): class Meta: label = 'base' - @expose(hide=True) + @ex(hide=True) def default(self): self.probes() - @expose( + @ex( arguments=[ (['-p', '--profileInfo'], dict(action='store', required=True, help=profileInfoHelp)), (['-b', '--createBenchmark'], dict(action='store', help='store Live profile report at the given path, for future benchmarking')), @@ -94,7 +95,7 @@ class AppController(ArgparseController): driver = _loadDriver(self.app.pargs.driver) driver.render(profileInfo, report, leanReports = self.app.pargs.lean, cprofile = cprofile) - @expose( + @ex( arguments=[ (['-p', '--profileInfo'], dict(action='store', required=True, help=profileInfoHelp)), ], @@ -113,7 +114,7 @@ class AppController(ArgparseController): LOGGER.error('failed to locate probes in app %s. Have you instrumented any ?', profileInfo.appName) LOGGER.info('') - @expose( + @ex( arguments=[ (['-H', '--hostname'], dict(action='store', default='127.0.0.1', help='host where the app runs')), (['-a', '--appInfo'], dict(action='store', required=True, help='path to appInfo file, used in xpedite framework initilization')), @@ -124,7 +125,7 @@ class AppController(ArgparseController): from xpedite.profiler import Profiler Profiler.generate(self.app.pargs.appInfo, hostname=self.app.pargs.hostname) - @expose( + @ex( arguments=[ (['-H', '--home'], dict(action='store', required=False, help='Home dir for xpedite shell')), (['-z', '--zip'], dict(action='store', required=False, help='Archive a xpedite report. Takes path of a xpedite notebook as argument')), @@ -141,7 +142,7 @@ class AppController(ArgparseController): return home = self.app.pargs.home if not home: - LOGGER.warn('No home directory specified for shell - using current working directory') + LOGGER.warning('No home directory specified for shell - using current working directory') home = os.getcwd() if self.app.pargs.unzip: @@ -149,13 +150,13 @@ class AppController(ArgparseController): deflator.deflate(home) launchJupyter(home) -class FrameworkController(ArgparseController): +class FrameworkController(Controller): class Meta: label = 'FrameworkController' stacked_on = 'base' stacked_type = 'embedded' - @expose( + @ex( arguments=[ (['-p', '--profileInfo'], dict(action='store', required=True, help=profileInfoHelp)), (['-b', '--createBenchmark'], dict(action='store', help='store report at the given path, for future benchmarking')), @@ -191,7 +192,7 @@ class FrameworkController(ArgparseController): driver = _loadDriver(self.app.pargs.driver) driver.render(profileInfo, report, leanReports = self.app.pargs.lean, cprofile = cprofile) - @expose( + @ex( arguments=[ ], help='Print the CPU info for the current machine' @@ -201,7 +202,7 @@ class FrameworkController(ArgparseController): cpuId = getCpuId() LOGGER.info('%s\n', cpuId) - @expose( + @ex( arguments=[ (['-c', '--cpuId'], dict(action='store', required=False, help='list pmu counters for the give cpu identifier')), ], @@ -213,7 +214,7 @@ class FrameworkController(ArgparseController): LOGGER.info(e) LOGGER.info('') - @expose( + @ex( arguments=[ (['-n', '--node'], dict(action='store', help='list pmu events for node - enter "all" to list events for all nodes')), (['-c', '--cpuId'], dict(action='store', required=False, help='list pmu counters for the give cpu identifier')), @@ -227,7 +228,7 @@ class FrameworkController(ArgparseController): LOGGER.info('%s', topdown.metricsToString(n)) LOGGER.info('') - @expose( + @ex( arguments=[ (['-n', '--node'], dict(action='store', help='list pmu events for node - enter "all" to list events for all nodes')), (['-c', '--cpuId'], dict(action='store', required=False, help='list pmu counters for the give cpu identifier')), @@ -244,7 +245,7 @@ class FrameworkController(ArgparseController): return LOGGER.info('%s\n', topdown.hierarchy) - @expose( + @ex( arguments=[ (['-e', '--enable'], dict(action='store_true', help='load xpedite kernel module to ENABLE hardware performance counters')), (['-d', '--disable'], dict(action='store_true', help='unload xpedite kernel module to DISABLE hardware performance counters')), @@ -264,7 +265,7 @@ class FrameworkController(ArgparseController): extractor = subprocess.call(cmdList) LOGGER.info('pmc status - %s\n', 'enabled' if isDriverLoaded() else 'disabled') -class XpediteClient(CementApp): +class XpediteClient(App): class Meta: label = 'Xpedite' base_controller = 'base' diff --git a/scripts/lib/setup.py b/scripts/lib/setup.py index a5e57422..22912e00 100644 --- a/scripts/lib/setup.py +++ b/scripts/lib/setup.py @@ -65,5 +65,7 @@ 'py-cpuinfo', 'jupyter', 'six', + 'ipynbname', + 'iinit', ], zip_safe=False) diff --git a/scripts/lib/xpedite/analytics/__init__.py b/scripts/lib/xpedite/analytics/__init__.py index a2edeeed..8795e723 100644 --- a/scripts/lib/xpedite/analytics/__init__.py +++ b/scripts/lib/xpedite/analytics/__init__.py @@ -60,7 +60,7 @@ def buildElapsedTimeBundles(txnCollections, classifier): else: if category not in categorySet: categorySet.add(category) - LOGGER.warn('current run missing trasactions for category "%s"', category) + LOGGER.warning('current run missing trasactions for category "%s"', category) else: scopeList = ', '.join([probe.getCanonicalName() for probe in probes if not probe.isAnonymous]) errMsg = ( diff --git a/scripts/lib/xpedite/benchmark/__init__.py b/scripts/lib/xpedite/benchmark/__init__.py index d78b8151..e004749e 100644 --- a/scripts/lib/xpedite/benchmark/__init__.py +++ b/scripts/lib/xpedite/benchmark/__init__.py @@ -102,14 +102,14 @@ def gatherBenchmarks(self, count): benchmark.dataSource = dataSource benchmarks.append(benchmark) else: - LOGGER.warn('skip processing benchmark %s. failed to load benchmark info', path) + LOGGER.warning('skip processing benchmark %s. failed to load benchmark info', path) if len(benchmarks) >= count: if i + 1 < len(self.benchmarkPaths): LOGGER.debug('skip processing %s benchmarks. limit reached.', self.benchmarkPaths[i+1:]) break else: - LOGGER.warn('skip processing benchmark %s. failed to locate benchmark files', path) + LOGGER.warning('skip processing benchmark %s. failed to locate benchmark files', path) return benchmarks @staticmethod diff --git a/scripts/lib/xpedite/benchmark/info.py b/scripts/lib/xpedite/benchmark/info.py index 12401dfe..ebd638e4 100644 --- a/scripts/lib/xpedite/benchmark/info.py +++ b/scripts/lib/xpedite/benchmark/info.py @@ -73,7 +73,7 @@ def loadBenchmarkInfo(path): legend = configParser.get(BENCHMARK_SECTION, BENCHMARK_LEGEND_KEY) if not configParser.has_section(BENCHMARK_CPU_INFO_SECTION): - LOGGER.warn('failed to load benchmark %s - cpu info missing', benchmarkName) + LOGGER.warning('failed to load benchmark %s - cpu info missing', benchmarkName) return None cpuId = configParser.get(BENCHMARK_CPU_INFO_SECTION, BENCHMARK_CPU_ID_KEY) cpuFrequency = configParser.get(BENCHMARK_CPU_INFO_SECTION, BENCHMARK_CPU_FREQUENCY_KEY) diff --git a/scripts/lib/xpedite/jupyter/archive.py b/scripts/lib/xpedite/jupyter/archive.py index 9adc5e11..552da0db 100644 --- a/scripts/lib/xpedite/jupyter/archive.py +++ b/scripts/lib/xpedite/jupyter/archive.py @@ -93,7 +93,7 @@ def deflate(self, extractPath): """ if not extractPath: import tempfile - LOGGER.warn('No directory specified for extracting files, setting /tmp as notebook directory') + LOGGER.warning('No directory specified for extracting files, setting /tmp as notebook directory') extractPath = tempfile.mkdtemp(prefix=EXPORT_PREFIX, dir='/tmp') self.archive.extractall(path=extractPath) return extractPath diff --git a/scripts/lib/xpedite/jupyter/commands.py b/scripts/lib/xpedite/jupyter/commands.py index a21032e9..22162ef9 100644 --- a/scripts/lib/xpedite/jupyter/commands.py +++ b/scripts/lib/xpedite/jupyter/commands.py @@ -14,7 +14,7 @@ """ import logging -from IPython.core.display import display, HTML +from IPython.display import display, HTML from xpedite.jupyter.context import context from xpedite.report.markup import ERROR_TEXT @@ -241,7 +241,7 @@ def __repr__(self): ) if self.profile.current.isEventsEnabled() else '' threshold = 1000 if len(self.profile.current) > threshold: - LOGGER.warn('too many transaction - showing only %d out of %d', threshold, len(self.profile.current)) + LOGGER.warning('too many transaction - showing only %d out of %d', threshold, len(self.profile.current)) strRepr += str(ReportBuilder().buildTimelineTable( self.profile.current, self.profile.probes, ResultOrder.Chronological, threshold, uid )) diff --git a/scripts/lib/xpedite/jupyter/data/config/custom/custom.css b/scripts/lib/xpedite/jupyter/data/config/custom/custom.css index b94cc5ed..61ce2a7f 100644 --- a/scripts/lib/xpedite/jupyter/data/config/custom/custom.css +++ b/scripts/lib/xpedite/jupyter/data/config/custom/custom.css @@ -6,12 +6,12 @@ ** *******************************************************************************************/ -@import 'flot.css'; -@import 'notebook.css'; -@import 'sunburst.css'; -@import 'bipartite.css'; -@import 'xpedite.css'; -@import 'darkTheme.css'; +@import '/static/flot.css'; +@import '/static/notebook.css'; +@import '/static/sunburst.css'; +@import '/static/bipartite.css'; +@import '/static/xpedite.css'; +@import '/static/darkTheme.css'; body.notebook_app, body.notebook_app.command_mode { font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; diff --git a/scripts/lib/xpedite/jupyter/data/config/jupyter_server_config.py b/scripts/lib/xpedite/jupyter/data/config/jupyter_server_config.py new file mode 100644 index 00000000..a3bc8e0d --- /dev/null +++ b/scripts/lib/xpedite/jupyter/data/config/jupyter_server_config.py @@ -0,0 +1,18 @@ +# Configuration file for jupyter-notebook. + +# Add serverextension to sys.path for jupyter to load tornadoExtension module on startup +import os +import sys + +currDir = os.path.dirname(__file__) +packagePath = os.path.join(currDir, '../config/serverextensions') +sys.path.insert(0, packagePath) +c = get_config() +c.ServerApp.jpserver_extensions = { + 'tornadoExtension' : True, +} + +staticPath = os.path.join(currDir, '../config/custom') +c.ServerApp.extra_static_paths = [os.path.join(currDir, '../js'), staticPath] +c.ServerApp.allow_origin = '*' #allow all origins +c.ServerApp.ip = '0.0.0.0' # listen on all IPs diff --git a/scripts/lib/xpedite/jupyter/data/config/serverextensions/__init__.py b/scripts/lib/xpedite/jupyter/data/config/serverextensions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/scripts/lib/xpedite/jupyter/data/config/serverextensions/tornadoExtension.py b/scripts/lib/xpedite/jupyter/data/config/serverextensions/tornadoExtension/__init__.py similarity index 71% rename from scripts/lib/xpedite/jupyter/data/config/serverextensions/tornadoExtension.py rename to scripts/lib/xpedite/jupyter/data/config/serverextensions/tornadoExtension/__init__.py index 902d6107..1b77e6e6 100644 --- a/scripts/lib/xpedite/jupyter/data/config/serverextensions/tornadoExtension.py +++ b/scripts/lib/xpedite/jupyter/data/config/serverextensions/tornadoExtension/__init__.py @@ -5,8 +5,10 @@ Author: Dhruv Shekhawat, Morgan Stanley """ -from notebook.utils import url_path_join -from notebook.base.handlers import IPythonHandler +#from notebook.utils import url_path_join +import jupyter_server +from jupyter_server.base.handlers import JupyterHandler +import tornado from tornado import template import tornado.web import json @@ -15,13 +17,14 @@ import os import sys -class HtmlReportHandler(tornado.web.RequestHandler): +class HtmlReportHandler(JupyterHandler): """Class to serve html reports through links with query params as notebook path and (cellId, reportId) as indices to read metadata from notebook """ + @tornado.web.authenticated def get(self): - xpeditePath = os.path.normpath(os.path.join(__file__, '../../../../../..')) + xpeditePath = os.path.normpath(os.path.join(__file__, '../../../../../../..')) sys.path.append(xpeditePath) from xpedite.jupyter.xpediteData import XpediteDataReader @@ -57,11 +60,16 @@ def get_init_cell(jsonReport): if(('isInit' in metadata) and (metadata['isInit'] == '0xFFFFFFFFA5A55A5DUL')): return cell -def load_jupyter_server_extension(nb_server_app): - """Method called first to load the - server extension on jupyter startup - """ - web_app = nb_server_app.web_app - host_pattern = '.*$' - route_pattern = url_path_join(web_app.settings['base_url'], '/xpedite') - web_app.add_handlers(host_pattern, [(route_pattern, HtmlReportHandler)]) +def _load_jupyter_server_extension(serverapp: jupyter_server.serverapp.ServerApp): + """ + This function is called when the extension is loaded. + """ + handlers = [('/xpedite', HtmlReportHandler)] + serverapp.web_app.add_handlers(".*$", handlers) + +def _jupyter_server_extension_points(): + """ + Returns a list of dictionaries with metadata describing + where to find the `_load_jupyter_server_extension` function. + """ + return [{"module": "tornadoExtension"}] diff --git a/scripts/lib/xpedite/jupyter/driver.py b/scripts/lib/xpedite/jupyter/driver.py index 4a22adb1..c5164eaa 100644 --- a/scripts/lib/xpedite/jupyter/driver.py +++ b/scripts/lib/xpedite/jupyter/driver.py @@ -138,7 +138,7 @@ def buildReportCells(nb, result, dataFilePath): nb['cells'].append( nbf.new_code_cell(source=cellCode, metadata={ - 'init_cell': True, 'hide_input': True, 'editable': False, 'deletable': True + 'init_cell': True, 'iinit' : True, 'hide_input': True, 'editable': True, 'deletable': True }) ) @@ -163,7 +163,7 @@ def buildInitCell(nb, numOfCategories, d3Flots, appName, runId): LOGGER.exception(typeErr) raise InvariantViloation(typeErr) - nb['cells'] = [nbf.new_code_cell(source=initCode, metadata={'init_cell': True, 'isInit': '0xFFFFFFFFA5A55A5DUL',\ + nb['cells'] = [nbf.new_code_cell(source=initCode, metadata={'init_cell': True, 'iinit' : True, 'isInit': '0xFFFFFFFFA5A55A5DUL',\ 'hide_input': True, 'editable': False, 'deletable': False,\ 'd3Flots': d3Flots})] + nb['cells'] @@ -217,7 +217,7 @@ def validatePath(homeDir, reportName): from xpedite.jupyter import DATA_DIR, DATA_FILE_EXT, TEMP_PREFIX, NOTEBOOK_EXT if homeDir is None: homeDir = tempfile.mkdtemp(prefix=TEMP_PREFIX, dir='/tmp') - LOGGER.warn('Xpedite home directory not found in profileInfo (using temp dir).\n' + LOGGER.warning('Xpedite home directory not found in profileInfo (using temp dir).\n' 'To keep all reports in one place, set variable homeDir in profileInfo to a valid path.') dataDir = os.path.join(homeDir, DATA_DIR) diff --git a/scripts/lib/xpedite/jupyter/templates/initCell.fmt b/scripts/lib/xpedite/jupyter/templates/initCell.fmt index c45522b7..916baa48 100644 --- a/scripts/lib/xpedite/jupyter/templates/initCell.fmt +++ b/scripts/lib/xpedite/jupyter/templates/initCell.fmt @@ -2,13 +2,13 @@ import os, sys from IPython.display import display, HTML sys.path.append(os.environ['XPEDITE_PATH']) import xpedite +import ipynbname from xpedite.jupyter.commands import routes, txns, plot, stat, filter, diff from xpedite.analytics.timelineTree import buildTimelineTree from xpedite.jupyter.templates.initCell import INTRO_FRMT from xpedite.jupyter.context import Context, context -thismodule = sys.modules[__name__] -notebookPath = getattr(thismodule, Context.notebookPathKey) +notebookPath = ipynbname.path() result = 'Failed to resolve jupyter notebook path' if notebookPath: context.initialize(notebookPath) diff --git a/scripts/lib/xpedite/pmu/hierarchy.py b/scripts/lib/xpedite/pmu/hierarchy.py index 938c107b..aad442d0 100644 --- a/scripts/lib/xpedite/pmu/hierarchy.py +++ b/scripts/lib/xpedite/pmu/hierarchy.py @@ -94,8 +94,7 @@ def run(self, node): """ node.thresh = False - if node.level > self.maxLevel: - self.maxLevel = node.level + self.maxLevel = max(node.level, self.maxLevel) node.children = [] node.name = self.formatName(node.name) self.nodes.update({node.name: node}) diff --git a/scripts/lib/xpedite/profiler/app.py b/scripts/lib/xpedite/profiler/app.py index 5d6a1da9..bdd674e1 100644 --- a/scripts/lib/xpedite/profiler/app.py +++ b/scripts/lib/xpedite/profiler/app.py @@ -123,7 +123,7 @@ def start(self): Environment(self.ip, self._appInfoPath, self.dryRun, self.workspace) if isLocal else RemoteEnvironment(self.ip, self._appInfoPath, self.dryRun, self.workspace) ) - self.env.__enter__() + self.env.__enter__() # pylint: disable=unnecessary-dunder-call def stop(self): """Disconnects and detaches from target application""" @@ -165,7 +165,7 @@ def pingApp(app): except socket.error as socketError: errMsg = 'encounter a socket error: {}'.format(str(socketError)) - LOGGER.warn('restarting xpedite client - application is not responding to ping - %s', errMsg) + LOGGER.warning('restarting xpedite client - application is not responding to ping - %s', errMsg) app.restart() try: @@ -189,7 +189,7 @@ def __init__(self, name, ip, appInfoPath, runId=None, dataSourcePath=None, works """Constructs an instance of XpediteDormantApp""" dataSource = CsvDataSourceFactory().gather(dataSourcePath) if dataSourcePath else None if dataSource: - LOGGER.warn('Data source detected. overriding appinfo to %s', dataSource.appInfoPath) + LOGGER.warning('Data source detected. overriding appinfo to %s', dataSource.appInfoPath) appInfoPath = dataSource.appInfoPath XpediteApp.__init__(self, name, ip, appInfoPath, dryRun=True, workspace=workspace) self.dataSource = dataSource diff --git a/scripts/lib/xpedite/profiler/environment.py b/scripts/lib/xpedite/profiler/environment.py index e265696c..e533b6ea 100644 --- a/scripts/lib/xpedite/profiler/environment.py +++ b/scripts/lib/xpedite/profiler/environment.py @@ -97,13 +97,13 @@ def enablePMU(self, eventsDb, cpuSet, events): if not cpuSet or len(cpuSet) <= 0: raise Exception('Invalid argument - cpu set missing. need explicit cpu set to enable pmu') self.pmuCtrl = PMUCtrl(eventsDb) - self.pmuCtrl.__enter__() + self.pmuCtrl.__enter__() # pylint: disable=unnecessary-dunder-call return self.pmuCtrl.enable(cpuSet, events) def disablePMU(self): """Disables user space pmc collection and restores cpu core to original state""" if self.pmuCtrl: - self.pmuCtrl.__exit__() + self.pmuCtrl.__exit__() # pylint: disable=unnecessary-dunder-call @staticmethod def getVmStats(pid): @@ -229,7 +229,7 @@ def enablePMU(self, eventsDb, cpuSet, events): if not self.isDriverLoaded(): (eventSet, request) = PMUCtrl.buildPerfEventsRequest(eventsDb, events) if eventSet and request: - LOGGER.warn('xpedite device driver not loaded - falling back to perf events api') + LOGGER.warning('xpedite device driver not loaded - falling back to perf events api') LOGGER.debug('sending request (%d bytes) to xpedite [%s]', len(request), request) rc = self.admin('ActivatePerfEvents --data {}'.format(request)) if rc: diff --git a/scripts/lib/xpedite/profiler/profileInfo.py b/scripts/lib/xpedite/profiler/profileInfo.py index 3c21d2e6..a97404de 100644 --- a/scripts/lib/xpedite/profiler/profileInfo.py +++ b/scripts/lib/xpedite/profiler/profileInfo.py @@ -9,7 +9,7 @@ import os import sys -import imp #pylint: disable=deprecated-module +import importlib.util import logging LOGGER = logging.getLogger(__name__) @@ -74,7 +74,9 @@ def loadProfileInfo(profilePath): path = os.path.abspath(profilePath) fileName = os.path.split(profilePath)[1] moduleName = str.split(fileName, '.')[0] - profileInfo = imp.load_source(moduleName, path) + spec = importlib.util.spec_from_file_location(moduleName, path) + profileInfo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(profileInfo) benchmarkPaths = getattr(profileInfo, 'benchmarkPaths', None) pmc = getattr(profileInfo, 'pmc', None) cpuSet = getattr(profileInfo, 'cpuSet', None) diff --git a/scripts/lib/xpedite/profiler/runtime.py b/scripts/lib/xpedite/profiler/runtime.py index 06a6819f..58727f9f 100644 --- a/scripts/lib/xpedite/profiler/runtime.py +++ b/scripts/lib/xpedite/profiler/runtime.py @@ -82,7 +82,7 @@ def enableProbes(self, probes): LOGGER.error(msg) raise Exception(msg) else: - LOGGER.warn('failed to enable probes - Invalid or empty probes argument') + LOGGER.warning('failed to enable probes - Invalid or empty probes argument') def resolveProbes(self, probes): """ @@ -224,7 +224,7 @@ def __init__(self, app, probes, pmc=None, cpuSet=None, pollInterval=4, samplesFi else: if pmc: self.eventSet = self.resolveEvents(eventsDb, cpuSet, pmc) - LOGGER.warn('DRY Run selected - xpedite won\'t enable probes') + LOGGER.warning('DRY Run selected - xpedite won\'t enable probes') except Exception as ex: LOGGER.exception('failed to start profiling') raise ex @@ -263,7 +263,7 @@ def report(self, reportName=None, benchmarkPaths=None, classifier=DefaultClassif try: self.app.endProfile() except Exception as ex: - LOGGER.warn('Detected unclean profile termination - %s', ex) + LOGGER.warning('Detected unclean profile termination - %s', ex) if self.eventSet: self.app.disablePMU() diff --git a/scripts/lib/xpedite/report/env.py b/scripts/lib/xpedite/report/env.py index 83baea4e..8e54e781 100644 --- a/scripts/lib/xpedite/report/env.py +++ b/scripts/lib/xpedite/report/env.py @@ -109,5 +109,5 @@ def buildEnvironmentReportFile(self, app, repo, resultOrder, classifier, txnFilt tabBody = TAB_BODY_FMT.format('hostInfo', envBodyClass + tabContentState(True), hostReport) tabBody += TAB_BODY_FMT.format('profileInfo', envBodyClass + tabContentState(False), profileReport) tabBody = TAB_BODY_PREFIX + tabBody + TAB_BODY_SUFFIX - report = (HTML_BEGIN + TAB_CONTAINER_FMT.format(tabHeader, tabBody) + TAB_JS + HTML_END) + report = HTML_BEGIN + TAB_CONTAINER_FMT.format(tabHeader, tabBody) + TAB_JS + HTML_END return report diff --git a/scripts/lib/xpedite/requirements.txt b/scripts/lib/xpedite/requirements.txt index 3f313fd7..6107fd25 100644 --- a/scripts/lib/xpedite/requirements.txt +++ b/scripts/lib/xpedite/requirements.txt @@ -5,8 +5,11 @@ netifaces>=0.10.4 numpy>=1.15.2 pygments>=2.0.2 rpyc>=4.0.1 -cement==2.8.2 +cement>=3.0.10 termcolor>=1.1.0 py-cpuinfo>=0.1.2 jupyter>=1.0.0 six>=1.12.0 +ipynbname +setuptools +iinit diff --git a/scripts/lib/xpedite/txn/extractor.py b/scripts/lib/xpedite/txn/extractor.py index 3a6bdbb9..47fa6449 100644 --- a/scripts/lib/xpedite/txn/extractor.py +++ b/scripts/lib/xpedite/txn/extractor.py @@ -61,10 +61,10 @@ def gatherCounters(self, app, loader): elapsed = time.time() - begin self.logCounterFilterReport() if self.orphanedSamplesCount: - LOGGER.warn('detected mismatch in binary vs app info - %d counters ignored', self.orphanedSamplesCount) + LOGGER.warning('detected mismatch in binary vs app info - %d counters ignored', self.orphanedSamplesCount) LOGGER.completed('%d records | %d txns loaded in %0.2f sec.', recordCount-1, loader.getCount(), elapsed) if loader.isCompromised() or loader.getTxnCount() <= 0: - LOGGER.warn(loader.report()) + LOGGER.warning(loader.report()) elif loader.isNotAccounted(): LOGGER.debug(loader.report()) loader.endCollection() diff --git a/scripts/lib/xpedite/txn/repo.py b/scripts/lib/xpedite/txn/repo.py index 840359a2..d2d4cad2 100644 --- a/scripts/lib/xpedite/txn/repo.py +++ b/scripts/lib/xpedite/txn/repo.py @@ -96,7 +96,7 @@ def loaderFactory(loaderType, benchmark, probes, benchmarkProbes, topdownCache, benchmarkTopdownMetrics = None if benchmarkProbes and benchmark.name in benchmarkProbes: loaderProbes = benchmarkProbes[benchmark.name] - LOGGER.warn('overriding probes for benchmark run \'%s\'', benchmark.name) + LOGGER.warning('overriding probes for benchmark run \'%s\'', benchmark.name) if benchmark.events and topdownMetrics: from xpedite.pmu.event import TopdownMetrics benchmarkTopdownMetrics = TopdownMetrics() diff --git a/scripts/lib/xpedite/util/__init__.py b/scripts/lib/xpedite/util/__init__.py index dbd99234..3e006cae 100644 --- a/scripts/lib/xpedite/util/__init__.py +++ b/scripts/lib/xpedite/util/__init__.py @@ -40,7 +40,7 @@ def timeAction(action, delegate): retVal = delegate() elapsed = time.time() - begin if elapsed > 10: - LOGGER.warn('timed action exceeded threshold %s completed in %s.1f seconds', action, elapsed) + LOGGER.warning('timed action exceeded threshold %s completed in %s.1f seconds', action, elapsed) return retVal def shell(cmd, cwd=None, closeFds=True): diff --git a/test/pytest/test_xpedite/test_profiler/profile.py b/test/pytest/test_xpedite/test_profiler/profile.py index e9e0eafe..3bfdc8fe 100644 --- a/test/pytest/test_xpedite/test_profiler/profile.py +++ b/test/pytest/test_xpedite/test_profiler/profile.py @@ -35,13 +35,13 @@ def generateProfiles(app, scenario, context): assert len(profile.current) == context.txnCount return report -def runXpediteReport(runId, context, scenario, sampleFilePath=None, cpuInfoOverride=False): +def runXpediteReport(runId, context, scenario, sampleFilePath=None, cpuInfoOverride=None): """ Run xpedite report """ with scenario.makeXpediteDormantApp(runId, context.workspace, sampleFilePath) as xpediteApp: if cpuInfoOverride: - xpediteApp.env.proxy.fullCpuInfo = scenario.fullCpuInfo + xpediteApp.env.proxy.fullCpuInfo = cpuInfoOverride xpediteApp.appInfoPath = os.path.join(scenario.dataDir, XPEDITE_APP_INFO_PARAMETER_PATH) return generateProfiles(xpediteApp, scenario, context) @@ -69,7 +69,7 @@ def compareVsBaseline(context, scenario): runId = scenario.discoverRunId() sampleFilePath = SAMPLE_FILE_PATH.format(dataDir=scenario.dataDir, runId=runId) report = runXpediteReport( - runId, context, scenario, sampleFilePath=sampleFilePath, cpuInfoOverride=True + runId, context, scenario, sampleFilePath=sampleFilePath, cpuInfoOverride=scenario.fullCpuInfo ) reportProfiles = report.profiles reportProfiles.transactionRepo = None diff --git a/test/pytest/test_xpedite/test_profiler/test_profiler.py b/test/pytest/test_xpedite/test_profiler/test_profiler.py index fb879533..6295f234 100755 --- a/test/pytest/test_xpedite/test_profiler/test_profiler.py +++ b/test/pytest/test_xpedite/test_profiler/test_profiler.py @@ -54,7 +54,7 @@ def setTestParameters(hostname, transactions, multithreaded, workspace, rundir, ProbeIndexFactory.reset() if not isIpLocal(hostname): remote = Remote(hostname, makeLogPath('remote')) - remote.__enter__() + remote.__enter__() # pylint: disable=unnecessary-dunder-call CAN_RECORD_PMC = recordPMC CONTEXT = Context(transactions, multithreaded, workspace) SCENARIO_LOADER.loadScenarios(rundir, apps, scenarioTypes, remote) @@ -77,8 +77,8 @@ def test_record_vs_report(capsys, scenarioName): checkPmcSupport(scenarioName) with SCENARIO_LOADER[scenarioName] as scenarios: with capsys.disabled(): - currentReport, _, _ = runXpediteRecord(CONTEXT, scenarios) - report = runXpediteReport(currentReport.runId, CONTEXT, scenarios) + currentReport, fullCpuInfo, _ = runXpediteRecord(CONTEXT, scenarios) + report = runXpediteReport(currentReport.runId, CONTEXT, scenarios, cpuInfoOverride=fullCpuInfo) findDiff(report.profiles.__dict__, currentReport.profiles.__dict__) assert report.profiles == currentReport.profiles diff --git a/test/runTest.sh b/test/runTest.sh index 73598900..82818dd0 100755 --- a/test/runTest.sh +++ b/test/runTest.sh @@ -36,6 +36,8 @@ usage: ${PROGRAM_NAME} [lgpw:cr:s:Pt:m:a:] -c|--cov check pytest code coverage -r|--remote set a remote hostname for the application to run on: ${PROGRAM_NAME} -r -s|--single choose a single test to run: ${PROGRAM_NAME} -s test_name +-L|--list List pytest +-k|--pattern Run pytest matching pattern -t|--transactions specify a number of transactions for the target application: ${PROGRAM_NAME} -t -m|--multithreaded specify the number of threads for the target application: ${PROGRAM_NAME} -m -a|--apps a comma separated list of binaries to test: ${PROGRAM_NAME} -a @@ -103,6 +105,10 @@ function doesDirectoryExist() { fi } +function listPytests() { + PYTHONPATH=${XPEDITE_DIR}:${PYTHONPATH} pytest ${PYTEST_DIR} --collect-only +} + function runPytests() { RUN_DIR=$(mktemp -d) @@ -128,8 +134,10 @@ function runPytests() { SCENARIO_TYPES="--scenarioTypes=Regular,Benchmark,PMC" fi - PYTEST_ARGS="${COV} ${TEST_NAME} -v ${APP_HOST} ${TRANSACTION_COUNT} ${THREAD_COUNT} ${WORKSPACE} ${RUN_DIR_ARG} ${APPS} ${SCENARIO_TYPES} ${RECORD_PMC}" - if ! PYTHONPATH=${XPEDITE_DIR}:${PYTHONPATH} pytest ${PYTEST_ARGS}; then + set -x + PYTEST_ARGS="${COV} ${TEST_NAME} ${TEST_PATTERN} -v ${APP_HOST} ${PYTEST_ARGS} ${TRANSACTION_COUNT} ${THREAD_COUNT}" + PYTEST_ARGS="${PYTEST_ARGS} ${WORKSPACE} ${RUN_DIR_ARG} ${APPS} ${SCENARIO_TYPES} ${RECORD_PMC}" + if ! PYTHONPATH=${XPEDITE_DIR}:${PYTHONPATH} FORCE_COLOR=true pytest ${PYTEST_ARGS}; then echo detected one or more pytest failures RC=$(($RC + 1)) fi @@ -173,7 +181,7 @@ function runAllTests() { runPytests } -ARGS=$(getopt -o lgpw:cr:s:t:m:a:S:P --long lint,gtest,pytest,workspace,cov,remote:,test:,transactions:,multithreaded:,apps:,scenarioTypes:recordPMC -- "$@") +ARGS=$(getopt -o lLgpw:cr:s:k:t:m:a:S:P --long lint,list,gtest,pytest,single,pattern,workspace,cov,remote:,test:,transactions:,multithreaded:,apps:,scenarioTypes:recordPMC -- "$@") if [ $? -ne 0 ]; then usage @@ -213,6 +221,15 @@ while true ; do echo ${TEST_NAME} shift 2 ;; + -k|--pattern) + TEST_PATTERN="-k $2" ; + echo 'Test pattern *******' ${TEST_PATTERN} + shift 2 + ;; + -L|--list) + LIST_PYTEST=true + shift + ;; -t|--transactions) TRANSACTION_COUNT="--transactions=$2" shift 2 @@ -252,7 +269,7 @@ if [ -d ${RUNTIME_DIR}/bin ]; then python -m pip --trusted-host pypi.org --trusted-host files.pythonhosted.org install pytest pylint pytest-cov fi -if [[ -z "${LINT}" && -z "${GTEST}" && -z "${PYTEST}" ]]; then +if [[ -z "${LINT}" && -z "${GTEST}" && -z "${PYTEST}" && -z "${LIST_PYTEST}" && -z "${TEST_PATTERN}" ]]; then runAllTests else if [ -z "${PYTEST}" ] && [[ "${APP_HOST}" || "${TRANSACTION_COUNT}" || "${THREAD_COUNT}" || "${WORKSPACE}" || "${COV}" || "${TEST_NAME}" || "${APPS}" || "${SCENARIO_TYPES}" || "${RECORD_PMC}" ]]; then @@ -267,9 +284,13 @@ else runLint fi - if [ "${PYTEST}" = true ]; then + if [ "${PYTEST}" = true -o -n "${TEST_PATTERN}" ]; then runPytests fi + + if [ "${LIST_PYTEST}" = true ]; then + listPytests + fi fi exit $RC