diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..9b38853 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,7 @@ +{ + "python.testing.pytestArgs": [ + "tests" + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true +} \ No newline at end of file diff --git a/AUTHORS b/AUTHORS index 3a4ff25..8f52141 100644 --- a/AUTHORS +++ b/AUTHORS @@ -4,3 +4,4 @@ Contributors include: - Raymond Gauthier (jraygauthier) added Python 3.5 support. - Kyle Altendorf (altendky) fixed bugs on session teardown - Hannes Engelhardt (veritogen) added Bitbucket CI support. + - Lucas Haupt (lhpt2) added profiler.py and track failed test support. diff --git a/docs/sources/changelog.rst b/docs/sources/changelog.rst index 769dd6a..e0c3446 100644 --- a/docs/sources/changelog.rst +++ b/docs/sources/changelog.rst @@ -3,6 +3,8 @@ Changelog ========= * :release:`to be discussed` +* :feature: `#65` Also monitor failed test as default and add flag ``--no-failed`` to turn monitoring failed tests off. +* :bug: `#79` Fix a bug concerning commandline flag ``--no-monitor`` causing tests that are supposed to fail to pass instead * :feature:`#75` Automatically gather CI build information for Bitbucket CI. * :release:`1.6.6 <2023-05-06>` diff --git a/docs/sources/configuration.rst b/docs/sources/configuration.rst index 90c29a6..a9acfa9 100644 --- a/docs/sources/configuration.rst +++ b/docs/sources/configuration.rst @@ -79,6 +79,14 @@ Disable monitoring If you need for some reason to disable the monitoring, pass the *\-\-no-monitor* option. +Disable failed tests +-------------------- + +By default failing tests are monitored in the database. The database has an additional column that +indicates if a test passed (boolean value). If you only need to monitor successful tests, pass +the *\-\-no-failed* option. + + Describing a run ---------------- diff --git a/docs/sources/operating.rst b/docs/sources/operating.rst index 8de5b7d..68b2ab7 100644 --- a/docs/sources/operating.rst +++ b/docs/sources/operating.rst @@ -129,5 +129,7 @@ CPU_USAGE (FLOAT) System-wide CPU usage as a percentage (100 % is equivalent to one core). MEM_USAGE (FLOAT) Maximum resident memory used during the test execution (in megabytes). +TEST_PASSED (BOOLEAN) + Boolean Value indicating if a test passed. In the local database, these Metrics are stored in table `TEST_METRICS`. diff --git a/docs/sources/remote.rst b/docs/sources/remote.rst index 340f508..4067d7b 100644 --- a/docs/sources/remote.rst +++ b/docs/sources/remote.rst @@ -115,7 +115,8 @@ POST /metrics/ user_time: float, kernel_time: float, cpu_usage: float, - mem_usage: float + mem_usage: float, + passed: bool, } **Return Codes**: Must return *201* (*CREATED*) if the **Metrics** has been created diff --git a/pytest_monitor/handler.py b/pytest_monitor/handler.py index 6aaa208..cfcba05 100644 --- a/pytest_monitor/handler.py +++ b/pytest_monitor/handler.py @@ -6,6 +6,28 @@ def __init__(self, db_path): self.__db = db_path self.__cnx = sqlite3.connect(self.__db) if db_path else None self.prepare() + # check if new table column is existent, if not create it + self.check_create_test_passed_column() + + def close(self): + self.__cnx.close() + + def __del__(self): + self.__cnx.close() + + def check_create_test_passed_column(self): + cursor = self.__cnx.cursor() + # check for test_passed column, + # table exists bc call happens after prepare() + cursor.execute("PRAGMA table_info(TEST_METRICS)") + has_test_column = any( + column[1] == "TEST_PASSED" for column in cursor.fetchall() + ) + if not has_test_column: + cursor.execute( + "ALTER TABLE TEST_METRICS ADD COLUMN TEST_PASSED BOOLEAN DEFAULT TRUE;" + ) + self.__cnx.commit() def query(self, what, bind_to, many=False): cursor = self.__cnx.cursor() @@ -13,11 +35,12 @@ def query(self, what, bind_to, many=False): return cursor.fetchall() if many else cursor.fetchone() def insert_session(self, h, run_date, scm_id, description): - with self.__cnx: - self.__cnx.execute( - "insert into TEST_SESSIONS(SESSION_H, RUN_DATE, SCM_ID, RUN_DESCRIPTION)" " values (?,?,?,?)", - (h, run_date, scm_id, description), - ) + self.__cnx.execute( + "insert into TEST_SESSIONS(SESSION_H, RUN_DATE, SCM_ID, RUN_DESCRIPTION)" + " values (?,?,?,?)", + (h, run_date, scm_id, description), + ) + self.__cnx.commit() def insert_metric( self, @@ -35,51 +58,53 @@ def insert_metric( kernel_time, cpu_usage, mem_usage, + passed: bool, ): - with self.__cnx: - self.__cnx.execute( - "insert into TEST_METRICS(SESSION_H,ENV_H,ITEM_START_TIME,ITEM," - "ITEM_PATH,ITEM_VARIANT,ITEM_FS_LOC,KIND,COMPONENT,TOTAL_TIME," - "USER_TIME,KERNEL_TIME,CPU_USAGE,MEM_USAGE) " - "values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", - ( - session_id, - env_id, - item_start_date, - item, - item_path, - item_variant, - item_loc, - kind, - component, - total_time, - user_time, - kernel_time, - cpu_usage, - mem_usage, - ), - ) + self.__cnx.execute( + "insert into TEST_METRICS(SESSION_H,ENV_H,ITEM_START_TIME,ITEM," + "ITEM_PATH,ITEM_VARIANT,ITEM_FS_LOC,KIND,COMPONENT,TOTAL_TIME," + "USER_TIME,KERNEL_TIME,CPU_USAGE,MEM_USAGE,TEST_PASSED) " + "values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + ( + session_id, + env_id, + item_start_date, + item, + item_path, + item_variant, + item_loc, + kind, + component, + total_time, + user_time, + kernel_time, + cpu_usage, + mem_usage, + passed, + ), + ) + self.__cnx.commit() def insert_execution_context(self, exc_context): - with self.__cnx: - self.__cnx.execute( - "insert into EXECUTION_CONTEXTS(CPU_COUNT,CPU_FREQUENCY_MHZ,CPU_TYPE,CPU_VENDOR," - "RAM_TOTAL_MB,MACHINE_NODE,MACHINE_TYPE,MACHINE_ARCH,SYSTEM_INFO," - "PYTHON_INFO,ENV_H) values (?,?,?,?,?,?,?,?,?,?,?)", - ( - exc_context.cpu_count, - exc_context.cpu_frequency, - exc_context.cpu_type, - exc_context.cpu_vendor, - exc_context.ram_total, - exc_context.fqdn, - exc_context.machine, - exc_context.architecture, - exc_context.system_info, - exc_context.python_info, - exc_context.compute_hash(), - ), - ) + self.__cnx.execute( + "insert into EXECUTION_CONTEXTS(CPU_COUNT,CPU_FREQUENCY_MHZ,CPU_TYPE,CPU_VENDOR," + "RAM_TOTAL_MB,MACHINE_NODE,MACHINE_TYPE,MACHINE_ARCH,SYSTEM_INFO," + "PYTHON_INFO,ENV_H) values (?,?,?,?,?,?,?,?,?,?,?)", + ( + exc_context.cpu_count, + exc_context.cpu_frequency, + exc_context.cpu_type, + exc_context.cpu_vendor, + exc_context.ram_total, + exc_context.fqdn, + exc_context.machine, + exc_context.architecture, + exc_context.system_info, + exc_context.python_info, + exc_context.compute_hash(), + ), + ) + self.__cnx.commit() def prepare(self): cursor = self.__cnx.cursor() @@ -109,6 +134,7 @@ def prepare(self): KERNEL_TIME float, -- time spent in kernel space CPU_USAGE float, -- cpu usage MEM_USAGE float, -- Max resident memory used. + TEST_PASSED boolean, -- boolean indicating if test passed FOREIGN KEY (ENV_H) REFERENCES EXECUTION_CONTEXTS(ENV_H), FOREIGN KEY (SESSION_H) REFERENCES TEST_SESSIONS(SESSION_H) );""" @@ -131,3 +157,9 @@ def prepare(self): """ ) self.__cnx.commit() + + def get_env_id(self, env_hash): + query_result = self.query( + "SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env_hash,) + ) + return query_result[0] if query_result else None diff --git a/pytest_monitor/profiler.py b/pytest_monitor/profiler.py new file mode 100644 index 0000000..b295e47 --- /dev/null +++ b/pytest_monitor/profiler.py @@ -0,0 +1,193 @@ +# The following code has been copied from the memory_profiler project and +# modified to fit the new usecase. +# Homepage of memory_profiler: https://github.com/pythonprofilers/memory_profiler +# +# Memory_Profiler License: +# New BSD License + +# Copyright (c) 2007–2014 Fabian Pedregosa. +# All rights reserved. + + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# a. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# b. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# c. Neither the name of the memory_profiler developers nor the names of +# its contributors may be used to endorse or promote products +# derived from this software without specific prior written +# permission. + + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. + +import os +from signal import SIGKILL +from typing import Any, Callable, Tuple + +import psutil + +_TWO_20 = float(2**20) + +try: + from multiprocessing import Pipe, Process +except ImportError: + # from multiprocessing.dummy import Pipe + raise + + +def memory_usage(proc: Tuple[Callable, Any, Any], retval=False): + """ + Return the memory usage of a process or piece of code + + Parameters + ---------- + proc : {int, string, tuple}, optional + The process to monitor. Is a tuple + representing a Python function. The tuple contains three + values (f, args, kw) and specifies to run the function + f(*args, **kw). + Set to -1 (default) for current process. + + retval : bool, optional + For profiling python functions. Save the return value of the profiled + function. Return value of memory_usage becomes a tuple: + (mem_usage, retval) + + Returns + ------- + mem_usage : list of floating-point values + memory usage, in MiB. It's length is always < timeout / interval + if max_usage is given, returns the two elements maximum memory and + number of measurements effectuated + ret : return value of the profiled function + Only returned if retval is set to True + """ + + ret = -1 + max_iter = 1 + interval = 0.1 + + if callable(proc): + proc = (proc, (), {}) + + if isinstance(proc, (list, tuple)): + if len(proc) == 1: + f, args, kw = (proc[0], (), {}) + elif len(proc) == 2: + f, args, kw = (proc[0], proc[1], {}) + elif len(proc) == 3: + f, args, kw = (proc[0], proc[1], proc[2]) + else: + raise ValueError + + current_iter = 0 + while True: + current_iter += 1 + child_conn, parent_conn = Pipe() # this will store MemTimer's results + p = MemTimer(os.getpid(), interval, child_conn) + p.start() + parent_conn.recv() # wait until we start getting memory + + # When there is an exception in the "proc" - the (spawned) monitoring processes don't get killed. + # Therefore, the whole process hangs indefinitely. Here, we are ensuring that the process gets killed! + try: + returned = f(*args, **kw) + parent_conn.send(0) # finish timing + ret = parent_conn.recv() + n_measurements = parent_conn.recv() + # Convert the one element list produced by MemTimer to a singular value + ret = ret[0], None + if retval: + ret = ret, returned + except BaseException as e: + parent_conn.send(0) # finish timing + ret = parent_conn.recv() + n_measurements = parent_conn.recv() + # Convert the one element list produced by MemTimer to a singular value + ret = ret[0], e + # parent = psutil.Process(os.getpid()) + # kill only the just spawned MemTimer process and its potential children + # instead of all children of the main process (could lead to issues when using testdir fixture) + parent = psutil.Process(p.pid) + for child in parent.children(recursive=True): + os.kill(child.pid, SIGKILL) + p.join(0) + break + + p.join(5 * interval) + + if (n_measurements > 4) or (current_iter == max_iter) or (interval < 1e-6): + break + interval /= 10.0 + else: + raise ValueError("proc is no valid function") + + return ret + + +class MemTimer(Process): + """ + Fetch memory consumption from over a time interval + """ + + def __init__(self, monitor_pid, interval, pipe, *args, **kw): + self.monitor_pid = monitor_pid + self.interval = interval + self.pipe = pipe + self.cont = True + self.n_measurements = 1 + + # get baseline memory usage + self.mem_usage = [_get_memory(self.monitor_pid)] + super(MemTimer, self).__init__(*args, **kw) + + def run(self): + self.pipe.send(0) # we're ready + stop = False + while True: + cur_mem = _get_memory(self.monitor_pid) + self.mem_usage[0] = max(cur_mem, self.mem_usage[0]) + self.n_measurements += 1 + if stop: + break + stop = self.pipe.poll(self.interval) + # do one more iteration + + self.pipe.send(self.mem_usage) + self.pipe.send(self.n_measurements) + + +def _get_memory(pid): + # .. low function to get memory consumption .. + if pid == -1: + pid = os.getpid() + + # .. cross-platform but but requires psutil .. + process = psutil.Process(pid) + try: + # avoid using get_memory_info since it does not exists + # in psutil > 2.0 and accessing it will cause exception. + meminfo_attr = ( + "memory_info" if hasattr(process, "memory_info") else "get_memory_info" + ) + mem = getattr(process, meminfo_attr)()[0] / _TWO_20 + return mem + + except psutil.AccessDenied: + pass + # continue and try to get this from ps diff --git a/pytest_monitor/pytest_monitor.py b/pytest_monitor/pytest_monitor.py index 3e9c12c..a812eb1 100644 --- a/pytest_monitor/pytest_monitor.py +++ b/pytest_monitor/pytest_monitor.py @@ -3,11 +3,12 @@ import time import warnings -import memory_profiler import pytest from pytest_monitor.session import PyTestMonitorSession +from .profiler import memory_usage + # These dictionaries are used to compute members set on each items. # KEY is the marker set on a test function # value is a tuple: @@ -22,7 +23,9 @@ "monitor_test_if": (True, "monitor_force_test", lambda x: bool(x), False), } PYTEST_MONITOR_DEPRECATED_MARKERS = {} -PYTEST_MONITOR_ITEM_LOC_MEMBER = "_location" if tuple(pytest.__version__.split(".")) < ("5", "3") else "location" +PYTEST_MONITOR_ITEM_LOC_MEMBER = ( + "_location" if tuple(pytest.__version__.split(".")) < ("5", "3") else "location" +) PYTEST_MONITORING_ENABLED = True @@ -44,7 +47,9 @@ def pytest_addoption(parser): help="Set this option to distinguish parametrized tests given their values." " This requires the parameters to be stringifiable.", ) - group.addoption("--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces") + group.addoption( + "--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces" + ) group.addoption( "--remote-server", action="store", @@ -68,13 +73,21 @@ def pytest_addoption(parser): "--force-component", action="store", dest="mtr_force_component", - help="Force the component to be set at the given value for the all tests run" " in this session.", + help="Force the component to be set at the given value for the all tests run" + " in this session.", ) group.addoption( "--component-prefix", action="store", dest="mtr_component_prefix", - help="Prefix each found components with the given value (applies to all tests" " run in this session).", + help="Prefix each found components with the given value (applies to all tests" + " run in this session).", + ) + group.addoption( + "--no-failed", + action="store_true", + dest="mtr_disable_monitoring_failed", + help="Disable monitoring of failed tests and only monitor successful tests", ) group.addoption( "--no-gc", @@ -99,10 +112,13 @@ def pytest_addoption(parser): def pytest_configure(config): - config.addinivalue_line("markers", "monitor_skip_test: mark test to be executed but not monitored.") + config.addinivalue_line( + "markers", "monitor_skip_test: mark test to be executed but not monitored." + ) config.addinivalue_line( "markers", - "monitor_skip_test_if(cond): mark test to be executed but " "not monitored if cond is verified.", + "monitor_skip_test_if(cond): mark test to be executed but " + "not monitored if cond is verified.", ) config.addinivalue_line( "markers", @@ -126,14 +142,24 @@ def pytest_runtest_setup(item): """ if not PYTEST_MONITORING_ENABLED: return - item_markers = {mark.name: mark for mark in item.iter_markers() if mark and mark.name.startswith("monitor_")} + item_markers = { + mark.name: mark + for mark in item.iter_markers() + if mark and mark.name.startswith("monitor_") + } mark_to_del = [] for set_marker in item_markers.keys(): if set_marker not in PYTEST_MONITOR_VALID_MARKERS: - warnings.warn("Nothing known about marker {}. Marker will be dropped.".format(set_marker)) + warnings.warn( + "Nothing known about marker {}. Marker will be dropped.".format( + set_marker + ) + ) mark_to_del.append(set_marker) if set_marker in PYTEST_MONITOR_DEPRECATED_MARKERS: - warnings.warn(f"Marker {set_marker} is deprecated. Consider upgrading your tests") + warnings.warn( + f"Marker {set_marker} is deprecated. Consider upgrading your tests" + ) for marker in mark_to_del: del item_markers[marker] @@ -201,19 +227,27 @@ def wrapped_function(): pyfuncitem.obj(**testargs) except Exception: raise - except BaseException as e: - return e + except BaseException: + raise def prof(): - m = memory_profiler.memory_usage((wrapped_function, ()), max_iterations=1, max_usage=True, retval=True) - if isinstance(m[1], BaseException): # Do we have any outcome? - raise m[1] - memuse = m[0][0] if type(m[0]) is list else m[0] + (memuse, exception) = memory_usage((wrapped_function, ())) setattr(pyfuncitem, "mem_usage", memuse) setattr(pyfuncitem, "monitor_results", True) + if isinstance(exception, BaseException): # Do we have any outcome? + if pyfuncitem.session.config.option.mtr_disable_monitoring_failed: + setattr(pyfuncitem, "monitor_results", False) + setattr(pyfuncitem, "passed", False) + raise exception + + setattr(pyfuncitem, "passed", True) + if not PYTEST_MONITORING_ENABLED: - wrapped_function() + try: + wrapped_function() + except BaseException: + raise else: if not pyfuncitem.session.config.option.mtr_disable_gc: gc.collect() @@ -233,12 +267,26 @@ def pytest_sessionstart(session): Instantiate a monitor session to save collected metrics. We yield at the end to let pytest pursue the execution. """ - if session.config.option.mtr_force_component and session.config.option.mtr_component_prefix: - raise pytest.UsageError("Invalid usage: --force-component and --component-prefix are incompatible options!") - if session.config.option.mtr_no_db and not session.config.option.mtr_remote and not session.config.option.mtr_none: - warnings.warn("pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.") + if ( + session.config.option.mtr_force_component + and session.config.option.mtr_component_prefix + ): + raise pytest.UsageError( + "Invalid usage: --force-component and --component-prefix are incompatible options!" + ) + if ( + session.config.option.mtr_no_db + and not session.config.option.mtr_remote + and not session.config.option.mtr_none + ): + warnings.warn( + "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." + ) session.config.option.mtr_none = True - component = session.config.option.mtr_force_component or session.config.option.mtr_component_prefix + component = ( + session.config.option.mtr_force_component + or session.config.option.mtr_component_prefix + ) if session.config.option.mtr_component_prefix: component += ".{user_component}" if not component: @@ -248,13 +296,24 @@ def pytest_sessionstart(session): if (session.config.option.mtr_none or session.config.option.mtr_no_db) else session.config.option.mtr_db_out ) - remote = None if session.config.option.mtr_none else session.config.option.mtr_remote + remote = ( + None if session.config.option.mtr_none else session.config.option.mtr_remote + ) session.pytest_monitor = PyTestMonitorSession( db=db, remote=remote, component=component, scope=session.config.option.mtr_scope ) global PYTEST_MONITORING_ENABLED PYTEST_MONITORING_ENABLED = not session.config.option.mtr_none - session.pytest_monitor.compute_info(session.config.option.mtr_description, session.config.option.mtr_tags) + session.pytest_monitor.compute_info( + session.config.option.mtr_description, session.config.option.mtr_tags + ) + yield + + +@pytest.hookimpl(hookwrapper=True) +def pytest_sessionfinish(session): + if session.pytest_monitor is not None: + session.pytest_monitor.close() yield @@ -284,6 +343,7 @@ def _prf_module_tracer(request): ptimes_b.user - ptimes_a.user, ptimes_b.system - ptimes_a.system, rss, + True, ) @@ -295,7 +355,9 @@ def _prf_tracer(request): ptimes_a = request.session.pytest_monitor.process.cpu_times() yield ptimes_b = request.session.pytest_monitor.process.cpu_times() - if not request.node.monitor_skip_test and getattr(request.node, "monitor_results", False): + if not request.node.monitor_skip_test and getattr( + request.node, "monitor_results", False + ): item_name = request.node.originalname or request.node.name item_loc = getattr(request.node, PYTEST_MONITOR_ITEM_LOC_MEMBER)[0] request.session.pytest_monitor.add_test_info( @@ -310,4 +372,5 @@ def _prf_tracer(request): ptimes_b.user - ptimes_a.user, ptimes_b.system - ptimes_a.system, request.node.mem_usage, + getattr(request.node, "passed", False), ) diff --git a/pytest_monitor/session.py b/pytest_monitor/session.py index 677362c..ad1ce47 100644 --- a/pytest_monitor/session.py +++ b/pytest_monitor/session.py @@ -5,11 +5,11 @@ import warnings from http import HTTPStatus -import memory_profiler import psutil import requests from pytest_monitor.handler import DBHandler +from pytest_monitor.profiler import memory_usage from pytest_monitor.sys_utils import ( ExecutionContext, collect_ci_info, @@ -31,6 +31,10 @@ def __init__(self, db=None, remote=None, component="", scope=None, tracing=True) self.__mem_usage_base = None self.__process = psutil.Process(os.getpid()) + def close(self): + if self.__db is not None: + self.__db.close() + @property def monitoring_enabled(self): return self.__monitor_enabled @@ -50,7 +54,10 @@ def process(self): def get_env_id(self, env): db, remote = None, None if self.__db: - row = self.__db.query("SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env.compute_hash(),)) + row = self.__db.query( + "SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", + (env.compute_hash(),), + ) db = row[0] if row else None if self.__remote: r = requests.get(f"{self.__remote}/contexts/{env.compute_hash()}") @@ -76,7 +83,7 @@ def compute_info(self, description, tags): if description: d["description"] = description for tag in tags: - if type(tag) is str: + if isinstance(tag, str): _tag_info = tag.split("=", 1) d[_tag_info[0]] = _tag_info[1] else: @@ -109,12 +116,17 @@ def set_environment_info(self, env): db_id, remote_id = self.__eid if self.__db and db_id is None: self.__db.insert_execution_context(env) - db_id = self.__db.query("select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", (env.compute_hash(),))[0] + db_id = self.__db.query( + "select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", + (env.compute_hash(),), + )[0] if self.__remote and remote_id is None: # We must postpone that to be run at the end of the pytest session. r = requests.post(f"{self.__remote}/contexts/", json=env.to_dict()) if r.status_code != HTTPStatus.CREATED: - warnings.warn(f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating...") + warnings.warn( + f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating..." + ) self.__remote = "" else: remote_id = json.loads(r.text)["h"] @@ -124,8 +136,10 @@ def prepare(self): def dummy(): return True - memuse = memory_profiler.memory_usage((dummy,), max_iterations=1, max_usage=True) - self.__mem_usage_base = memuse[0] if type(memuse) is list else memuse + (memuse, exception) = memory_usage((dummy,)) + self.__mem_usage_base = memuse + if isinstance(exception, BaseException): + raise def add_test_info( self, @@ -140,6 +154,7 @@ def add_test_info( user_time, kernel_time, mem_usage, + passed: bool, ): if kind not in self.__scope: return @@ -166,6 +181,7 @@ def add_test_info( kernel_time, cpu_usage, mem_usage, + passed, ) if self.__remote and self.remote_env_id is not None: r = requests.post( @@ -185,6 +201,7 @@ def add_test_info( "kernel_time": kernel_time, "cpu_usage": cpu_usage, "mem_usage": mem_usage, + "test_passed": passed, }, ) if r.status_code != HTTPStatus.CREATED: diff --git a/requirements.txt b/requirements.txt index 5182b1c..ebe0b8d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ psutil>=5.1.0 -memory_profiler>=0.58 pytest requests diff --git a/tests/test_monitor.py b/tests/test_monitor.py index 85b4ff6..f5506ee 100644 --- a/tests/test_monitor.py +++ b/tests/test_monitor.py @@ -39,12 +39,39 @@ def test_ok(): cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert len(cursor.fetchall()) == 1 cursor = db.cursor() - tags = json.loads(cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0]) + tags = json.loads( + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0] + ) assert "description" not in tags assert "version" in tags assert tags["version"] == "12.3.5" +def test_monitor_basic_test_failing(testdir): + """Make sure that pytest-monitor handles failing tests properly (without ending in deadlock when nested)""" + # create a temporary pytest test module + testdir.makepyfile( + """ + import time + + def test_fail(): + time.sleep(0.5) + x = [ "hello" ] + assert len(x) == 2 + +""" + ) + + # run pytest with the following cmd args + result = testdir.runpytest("") + + pymon_path = pathlib.Path(str(testdir)) / ".pymon" + assert pymon_path.exists() + + # make sure that that we get a '0' exit code for the test suite + result.assert_outcomes(failed=1) + + def test_monitor_basic_test_description(testdir): """Make sure that pytest-monitor does the job without impacting user tests.""" # create a temporary pytest test module @@ -62,7 +89,9 @@ def test_ok(): ) # run pytest with the following cmd args - result = testdir.runpytest("-vv", "--description", '"Test"', "--tag", "version=12.3.5") + result = testdir.runpytest( + "-vv", "--description", '"Test"', "--tag", "version=12.3.5" + ) # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) @@ -78,7 +107,9 @@ def test_ok(): cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert len(cursor.fetchall()) == 1 cursor = db.cursor() - tags = json.loads(cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0]) + tags = json.loads( + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0] + ) assert "description" in tags assert tags["description"] == '"Test"' assert "version" in tags @@ -176,7 +207,9 @@ def test_ok(): result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"]) + result.stdout.fnmatch_lines( + ["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"] + ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() @@ -298,7 +331,9 @@ def test_monitored(): result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"]) + result.stdout.fnmatch_lines( + ["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"] + ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() @@ -406,3 +441,64 @@ def run(a, b): # make sure that that we get a '0' exit code for the testsuite result.assert_outcomes(passed=1) assert not pymon_path.exists() + + +def test_monitor_monitor_failed_tests(testdir): + """Check new standard behavior that monitors failed tests in database""" + testdir.makepyfile( + """ + def test_failing_test(): + assert False + """ + ) + + result = testdir.runpytest("") + # # make sure that that we get a '0' exit code for the testsuite + result.assert_outcomes(failed=1) + + pymon_path = pathlib.Path(str(testdir)) / ".pymon" + assert pymon_path.exists() + db = sqlite3.connect(str(pymon_path)) + cursor = db.cursor() + + # TEST_METRICS table is supposed to be have 1 entry (1 failed test) + cursor.execute("SELECT * FROM TEST_METRICS") + test_metrics = cursor.fetchall() + assert len(test_metrics) == 1 + + +def test_monitor_no_monitor_failed(testdir): + """Ensure cmd flag --no-failed works and turns of monitoring failed tests""" + testdir.makepyfile( + """ + def test_failing_test(): + assert False + """ + ) + pymon_path = pathlib.Path(str(testdir)) / ".pymon" + db = sqlite3.connect(str(pymon_path)) + cursor = db.cursor() + + result = testdir.runpytest("--no-failed") + result.assert_outcomes(failed=1) + + # TEST_METRICS table is supposed to be empty (only one failing test) + cursor.execute("SELECT * FROM TEST_METRICS") + ncursor = cursor.fetchall() + print(ncursor) + assert not len(ncursor) + + testdir.makepyfile( + """ + def test_successful_test(): + assert True + """ + ) + + result = testdir.runpytest("--no-failed") + # make sure that that we get a '0' exit code for the testsuite + result.assert_outcomes(passed=1) + + # TEST_METRICS table is supposed to have 1 entry (2 tests, 1 successful) + cursor.execute("SELECT * FROM TEST_METRICS") + assert len(cursor.fetchall()) == 1 diff --git a/tests/test_monitor_handler.py b/tests/test_monitor_handler.py new file mode 100644 index 0000000..ad998af --- /dev/null +++ b/tests/test_monitor_handler.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +import datetime +import sqlite3 + +import pytest + +from pytest_monitor.handler import DBHandler +from pytest_monitor.sys_utils import determine_scm_revision + +DB_Context = sqlite3.Connection + + +# helper function +def reset_db(db_context: DB_Context): + # cleanup_cursor.execute("DROP DATABASE postgres") + # cleanup_cursor.execute("CREATE DATABASE postgres") + cleanup_cursor = db_context.cursor() + cleanup_cursor.execute("DROP TABLE IF EXISTS TEST_METRICS") + cleanup_cursor.execute("DROP TABLE IF EXISTS TEST_SESSIONS") + cleanup_cursor.execute("DROP TABLE IF EXISTS EXECUTION_CONTEXTS") + db_context.commit() + cleanup_cursor.close() + + # cleanup_cursor.execute("CREATE SCHEMA public;") + # cleanup_cursor.execute("ALTER DATABASE postgres SET search_path TO public;") + # cleanup_cursor.execute("ALTER ROLE postgres SET search_path TO public;") + # cleanup_cursor.execute("ALTER SCHEMA public OWNER to postgres;") + # cleanup_cursor.execute("GRANT ALL ON SCHEMA public TO postgres;") + # cleanup_cursor.execute("GRANT ALL ON SCHEMA public TO public;") + + +@pytest.fixture() +def sqlite_empty_mock_db() -> sqlite3.Connection: + """Initialize empty sqlite3 db""" + mockdb = sqlite3.connect(":memory:") + yield mockdb + mockdb.close() + + +@pytest.fixture() +def prepared_mocked_SqliteDBHandler(sqlite_empty_mock_db) -> DBHandler: + """Pepare a sqlite db handler with the old style database table (without passed column)""" + mockdb = sqlite_empty_mock_db + db_cursor = mockdb.cursor() + db_cursor.execute( + """ +CREATE TABLE IF NOT EXISTS TEST_SESSIONS( + SESSION_H varchar(64) primary key not null unique, -- Session identifier + RUN_DATE varchar(64), -- Date of test run + SCM_ID varchar(128), -- SCM change id + RUN_DESCRIPTION json +);""" + ) + db_cursor.execute( + """ +CREATE TABLE IF NOT EXISTS EXECUTION_CONTEXTS ( + ENV_H varchar(64) primary key not null unique, + CPU_COUNT integer, + CPU_FREQUENCY_MHZ integer, + CPU_TYPE varchar(64), + CPU_VENDOR varchar(256), + RAM_TOTAL_MB integer, + MACHINE_NODE varchar(512), + MACHINE_TYPE varchar(32), + MACHINE_ARCH varchar(16), + SYSTEM_INFO varchar(256), + PYTHON_INFO varchar(512) +); +""" + ) + + db_cursor.execute( + """ +CREATE TABLE IF NOT EXISTS TEST_METRICS ( + SESSION_H varchar(64), -- Session identifier + ENV_H varchar(64), -- Environment description identifier + ITEM_START_TIME varchar(64), -- Effective start time of the test + ITEM_PATH varchar(4096), -- Path of the item, following Python import specification + ITEM varchar(2048), -- Name of the item + ITEM_VARIANT varchar(2048), -- Optional parametrization of an item. + ITEM_FS_LOC varchar(2048), -- Relative path from pytest invocation directory to the item's module. + KIND varchar(64), -- Package, Module or function + COMPONENT varchar(512) NULL, -- Tested component if any + TOTAL_TIME float, -- Total time spent running the item + USER_TIME float, -- time spent in user space + KERNEL_TIME float, -- time spent in kernel space + CPU_USAGE float, -- cpu usage + MEM_USAGE float, -- Max resident memory used. + FOREIGN KEY (ENV_H) REFERENCES EXECUTION_CONTEXTS(ENV_H), + FOREIGN KEY (SESSION_H) REFERENCES TEST_SESSIONS(SESSION_H) +);""" + ) + + db_cursor.execute( + "insert into TEST_SESSIONS(SESSION_H, RUN_DATE, SCM_ID, RUN_DESCRIPTION)" + " values (?,?,?,?)", + ( + "1", + datetime.datetime.now().isoformat(), + determine_scm_revision(), + '{ "descr": "Test Session" }', + ), + ) + + db_cursor.execute( + "insert into EXECUTION_CONTEXTS(CPU_COUNT,CPU_FREQUENCY_MHZ,CPU_TYPE,CPU_VENDOR," + "RAM_TOTAL_MB,MACHINE_NODE,MACHINE_TYPE,MACHINE_ARCH,SYSTEM_INFO," + "PYTHON_INFO,ENV_H) values (?,?,?,?,?,?,?,?,?,?,?)", + ( + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + ), + ) + + # insert old style entry + db_cursor.execute( + "insert into TEST_METRICS(SESSION_H,ENV_H,ITEM_START_TIME,ITEM," + "ITEM_PATH,ITEM_VARIANT,ITEM_FS_LOC,KIND,COMPONENT,TOTAL_TIME," + "USER_TIME,KERNEL_TIME,CPU_USAGE,MEM_USAGE) " + "values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + ( + "1", + "1", + "Startdate", + "name of item", + "Item path", + "Optional Param", + "relative path", + None, + None, + 42, + 42, + 42, + 42, + 42, + ), + ) + db = DBHandler(":memory:") + db.__cnx = mockdb + db._DBHandler__cnx = mockdb + db._DBHandler__db = "mockdb" + + return db + +def test_sqlite_handler(): + """Ensure the Sqlite DB Handler works as expected""" + # db handler + db = DBHandler(":memory:") + session, metrics, exc_context = db.query( + "SELECT name FROM sqlite_master where type='table'", (), many=True + ) + assert session[0] == "TEST_SESSIONS" + assert metrics[0] == "TEST_METRICS" + assert exc_context[0] == "EXECUTION_CONTEXTS" + +def test_sqlite_handler_check_new_db_setup(): + """Check the Sqlite Handler initializes the new Test_Metrics table configuration""" + # db handler + db = DBHandler(":memory:") + table_cols = db.query("PRAGMA table_info(TEST_METRICS)", (), many=True) + assert any(column[1] == "TEST_PASSED" for column in table_cols) + +def test_sqlite_handler_check_create_test_passed_column( + prepared_mocked_SqliteDBHandler +): + """Check automatic migration from existing old database to new database style (passed column in TEST_METRICS)""" + # mockedDBHandler with old style database attached + mockedHandler = prepared_mocked_SqliteDBHandler + mock_cursor = mockedHandler.__cnx.cursor() + + # test for old style db + mock_cursor.execute("PRAGMA table_info(TEST_METRICS)") + has_test_column = any( + column[1] == "TEST_PASSED" for column in mock_cursor.fetchall() + ) + assert not has_test_column + + try: + # run function to test (migration) + mockedHandler.check_create_test_passed_column() + + # check for new column + mock_cursor = mockedHandler.__cnx.cursor() + mock_cursor.execute("PRAGMA table_info(TEST_METRICS)") + has_test_column = any( + column[1] == "TEST_PASSED" for column in mock_cursor.fetchall() + ) + assert has_test_column + + # check for default value TRUE in existing entry + mock_cursor.execute("SELECT TEST_PASSED FROM TEST_METRICS LIMIT 1") + default_is_passed = mock_cursor.fetchone() + + # default value true(1) for entries after migration + assert default_is_passed[0] == 1 + + except Exception: + raise + +