diff --git a/.dockerfiles/Dockerfile b/.dockerfiles/Dockerfile index 3763e5bd..416f1ba4 100644 --- a/.dockerfiles/Dockerfile +++ b/.dockerfiles/Dockerfile @@ -5,7 +5,10 @@ FROM ubuntu:$UBUNTU_VERSION ARG LOGIN_USER RUN apt-get update && \ - apt-get -y install sudo + apt-get -y install sudo && \ + apt-get -y install openssh-server + +RUN mkdir /var/run/sshd && chmod 755 /var/run/sshd # Create a directory for the app code (keep the name generic) RUN mkdir -p /app @@ -14,6 +17,12 @@ RUN useradd -ms /bin/bash $LOGIN_USER && \ usermod -aG sudo $LOGIN_USER && \ echo "$LOGIN_USER ALL=(ALL) NOPASSWD:ALL" | sudo tee "/etc/sudoers.d/$LOGIN_USER" +RUN mkdir /ssh_pub_key && touch /ssh_pub_key/authorized_keys + USER $LOGIN_USER +RUN mkdir /home/$LOGIN_USER/.ssh && \ + chown $LOGIN_USER. /home/$LOGIN_USER/.ssh && \ + chmod 700 /home/$LOGIN_USER/.ssh && \ + ln -s /ssh_pub_key/authorized_keys /home/$LOGIN_USER/.ssh/authorized_keys WORKDIR /app diff --git a/.dockerfiles/docker-config.yml b/.dockerfiles/docker-config.yml index 46acfe8c..176b6a17 100644 --- a/.dockerfiles/docker-config.yml +++ b/.dockerfiles/docker-config.yml @@ -5,6 +5,6 @@ workers: - name: autotst2 - name: autotst3 queues: - - student - - single + - high + - low - batch diff --git a/.dockerfiles/entrypoint-dev.sh b/.dockerfiles/entrypoint-dev.sh index c2d0dfd5..64406cd2 100755 --- a/.dockerfiles/entrypoint-dev.sh +++ b/.dockerfiles/entrypoint-dev.sh @@ -2,9 +2,17 @@ set -e -if [ ! -f /.installed ]; then - /app/bin/install.sh -p '3.8' --docker - sudo touch /.installed +if [ ! -f "${HOME}/.installed" ]; then + /app/bin/install.sh -p '3.8' --docker --all-testers + + echo "export REDIS_URL=${REDIS_URL} + export PGHOST=${PGHOST} + export PGPORT=${PGPORT} + export AUTOTESTER_CONFIG=${AUTOTESTER_CONFIG} + " >> "${HOME}/.bash_profile" + touch "${HOME}/.installed" fi -exec "$@" +sudo "$(command -v sshd)" + +sudo -Eu "$(whoami)" -- "$@" # run command in new shell so that additional groups are loaded diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..6f40f6f7 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,7 @@ +**/.DS_Store +.git/ +venv/ +.eggs/ +**/.pytest_cache +**/*.egg-info +.installed diff --git a/.flake8.ini b/.flake8.ini index 0e0f5037..93298bf0 100644 --- a/.flake8.ini +++ b/.flake8.ini @@ -1,3 +1,3 @@ [flake8] max-line-length = 120 -ignore = E266 +ignore = E266, BLK100 diff --git a/.gitignore b/.gitignore index b60fa3eb..1a0e31af 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,11 @@ __pycache__ *.egg-info .eggs venv +venv2 +build + +# tmp files +.installed # bin bin/kill_worker_procs diff --git a/Changelog.md b/Changelog.md index a21c670f..ce123906 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,7 +1,10 @@ # CHANGELOG All notable changes to this project will be documented here. -## [unreleased] +## [v1.10.0] +- Updated development docker image to connect to the development MarkUs docker image (#238) +- Removed Tasty-Stats as a dependency for the haskell tester and added our own ingredient instead to collect stats (#259) +- Updated/improved the interface between the autotester and clients that use it (#257) ## [1.9.0] - allow tests to write to existing subdirectories but not overwrite existing test script files (#237). diff --git a/Layerfile b/Layerfile index 941d08dc..d35eb1a1 100644 --- a/Layerfile +++ b/Layerfile @@ -1,15 +1,11 @@ -FROM ubuntu:18.04 +FROM vm/ubuntu:18.04 RUN apt-get update && \ apt-get -y install python3 python3-pip python3-venv && \ rm -rf /var/lib/apt/lists/* -CHECKPOINT - RUN python3 -m venv /tmp/venv -CHECKPOINT - WORKDIR /app COPY . . RUN /tmp/venv/bin/python setup.py test diff --git a/README.md b/README.md index 0c936c0a..af835175 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,33 @@ [![Acceptance tests](https://layerci.com/badge/github/MarkUsProject/markus-autotesting)](https://layerci.com/jobs/github/MarkUsProject/markus-autotesting) -Autotesting with Markus -============================== +Autotesting +=========== Autotesting allows instructors to run tests on students submissions and automatically create marks for them. It also allows students to run a separate set of tests and self-assess their submissions. -Autotesting consists of a client component integrated into MarkUs, and a standalone server component. Jobs are enqueued using the gem Resque with a first in first out strategy, and served one at a time or concurrently. ## Install and run ### Client -The autotesting client component is already included in a MarkUs installation. See the [Markus Configuration Options](#markus-configuration-options) section for how to configure your MarkUs installation to run tests with markus-autotesting. +The autotester currently only supports a MarkUs instance as a client. The autotesting client component is already +included in a MarkUs installation. See the [Markus Configuration Options](#markus-configuration-options) section for how +to configure your MarkUs installation to run tests with the autotester. + +If you would like to use a different client, please contact the developers of this project to discuss how to get the +autotester to work with your client. + +Client requirements: + +- REST Api that allows: + - download test script files + - download test settings + - download student files + - upload results + - optional: upload feedback files + - optional: upload source code annotations ### Server @@ -48,11 +62,11 @@ Installing the server will also install the following debian packages: - postgresql (if not running in a docker environment) - iptables (if not running in a docker environment) -This script may also add new users and create new postgres databases. See the [configuration](#markus-autotesting-configuration-options) section for more details. +This script may also add new users and create new postgres databases. See the [configuration](#autotesting-configuration-options) section for more details. ### Testers -The markus autotester currently supports testers for the following languages and testing frameworks: +The autotester currently supports testers for the following languages and testing frameworks: - `haskell` - [QuickCheck](http://hackage.haskell.org/package/QuickCheck) @@ -88,18 +102,18 @@ Installing each tester will also install the following additional packages: - `custom` - none -## Markus-autotesting configuration options +## Autotester configuration options These settings can be overridden or extended by including a configuration file in one of two locations: -- `${HOME}/.markus_autotester_config` (where `${HOME}` is the home directory of the user running the markus server) -- `/etc/markus_autotester_config` (for a system wide configuration) +- `${HOME}/.autotester_config` (where `${HOME}` is the home directory of the user running the supervisor process) +- `/etc/autotester_config` (for a system wide configuration) An example configuration file can be found in `doc/config_example.yml`. Please see below for a description of all options and defaults: ```yaml -workspace: # an absolute path to a directory containing all files/workspaces required to run the autotester default is - # ${HOME}/.markus-autotesting/workspace where ${HOME} is the home directory of the user running the autotester +workspace: # an absolute path to a directory containing all files/workspaces required to run the autotester. Default is + # ${HOME}/.autotesting/workspace where ${HOME} is the home directory of the user running the autotester server_user: # the username of the user designated to run the autotester itself. Default is the current user @@ -109,7 +123,8 @@ workers: reaper: # the username of a user used to clean up test processes. This value can be null (see details below) queues: # a list of queue names that these users will monitor and select test jobs from. # The order of this list indicates which queues have priority when selecting tests to run - # default is ['student', 'single', 'batch'] (see the "queues:" setting option below) + # This list may only contain the strings 'high', 'low', and 'batch'. + # default is ['high', 'low', 'batch'] redis: url: # url for the redis database. default is: redis://127.0.0.1:6379/0 @@ -129,14 +144,31 @@ resources: postgresql: port: # port the postgres server is running on host: # host the postgres server is running on +``` + +### Environment variables + +Certain settings can be specified with environment variables. If these environment variables are set, they will override +the corresponding setting in the configuration files: + +```yaml +workspace: # AUTOTESTER_WORKSPACE + +redis: + url: # REDIS_URL -queues: - - name: # the name of a queue used to enqueue test jobs (see details below) - schema: # a json schema used to validate the json representation of the arguments passed to the test_enqueuer script - # by MarkUs (see details below) +server_user: # AUTOTESTER_SERVER_USER + +supervisor: + url: # AUTOTESTER_SUPERVISOR_URL + +resources: + postgresql: + port: # PGPORT + host: # PGHOST ``` -### Markus-autotesting configuration details +### autotesting configuration details #### reaper users @@ -174,33 +206,19 @@ to test. #### queue names and schemas -When a test run is sent to the autotester from MarkUs, the test is not run immediately. Instead it is put in a queue and +When a test run is sent to the autotester from a client, the test is not run immediately. Instead it is put in a queue and run only when a worker user becomes available. You can choose to just have a single queue or multiple. -If using multiple queues, you can set a priority order for each worker user (see the `workers:` setting). The workers -will prioritize running tests from queues that appear earlier in the priority order. +If using multiple queues, you can set a priority order for each worker user (see the `workers:` setting). The default is +to select jobs in the 'high' queue first, then the jobs in the 'low' queue, and finally jobs in the 'batch' queue. -When MarkUs sends the test to the autotester, in order to decide which queue to put the test in, we inspect the json -string passed as an argument to the `markus_autotester` command (using either the `-j` or `-f` flags). This inspection -involves validating that json string against a [json schema validation](https://json-schema.org/) for each queue. If the -json string passes the validation for a certain queue, the test is added to that queue. - -For example, the default queue settings in the configuration are: - -```yaml -queues: - - name: batch - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} - - name: single - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} - - name: student - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} -``` +Note that not all workers need to be monitoring all queues. However, you should have at least one worker monitoring every +queue or else some jobs may never be run! -Under this default setup: - - a test with a non-null `batch_id` will be put in the `batch` queue. - - a test with a null `batch_id` and where `user_type == 'Admin'` will be put in the `single` queue - - a test with a null `batch_id` and where `user_type == 'Student'` will be put in the `student` queue +When a client sends the test to the autotester, in order to decide which queue to put the test in, we inspect the json +string passed as an argument to the `autotester` command (using either the `-j` or `-f` flags). If there is more +than one test to enqueue, all jobs will be put in the 'batch' queue; if there is a single test and the `request_high_priority` +keyword argument is `True`, the job will be put in the 'high' queue; otherwise, the job will be put in the 'low' queue. ## MarkUs configuration options @@ -240,18 +258,18 @@ can be `nil`, forcing `config.x.autotest.server_host` to be `localhost` and loca ##### config.x.autotest.server_dir The directory on the autotest server where temporary files are copied. -This should be the same as the `workspace` directory in the markus-autotesting config file. +This should be the same as the `workspace` directory in the autotesting config file. (multiple MarkUs instances can use the same directory) ##### config.x.autotest.server_command -The command to run on the markus-autotesting server that runs the wrapper script that calls `markus_autotester`. +The command to run on the autotesting server that runs the wrapper script that calls `autotester`. In most cases, this should be set to `'autotest_enqueuer'` ## The Custom Tester -The markus-autotesting server supports running arbitrary scripts as a 'custom' tester. This script will be run using the custom tester and results from this test script will be parsed and reported to MarkUs in the same way as any other tester would. +The autotesting server supports running arbitrary scripts as a 'custom' tester. This script will be run using the custom tester and results from this test script will be parsed and reported to MarkUs in the same way as any other tester would. Any custom script should report the results individual test cases by writing a json string to stdout in the following format: diff --git a/bin/generate_supervisord_conf.py b/bin/generate_supervisord_conf.py index e8d3c066..6b9e1529 100755 --- a/bin/generate_supervisord_conf.py +++ b/bin/generate_supervisord_conf.py @@ -17,7 +17,7 @@ """ CONTENT = """[program:rq_worker_{worker_user}] -environment=MARKUSWORKERUSER={worker_user} +environment=WORKERUSER={worker_user} command={rq} worker {worker_args} {queues} process_name=rq_worker_{worker_user} numprocs={numprocs} diff --git a/bin/install.sh b/bin/install.sh index 3167c300..10580809 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -56,7 +56,8 @@ install_packages() { postgresql-client \ libpq-dev \ openssh-server \ - gcc + gcc \ + rsync if [ -z "${DOCKER}" ]; then sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" install iptables postgresql fi @@ -119,6 +120,7 @@ _create_unprivileged_user() { echo "[AUTOTEST-INSTALL] worker users are not restricted from accessing redis in a docker installation" fi echo "${SERVER_USER} ALL=(${username}) NOPASSWD:ALL" | sudo EDITOR="tee -a" visudo + sudo usermod -a -G "${username}" "${SERVER_USER}" } _create_worker_and_reaper_users() { @@ -237,12 +239,13 @@ create_default_tester_venv() { local default_tester_venv default_tester_venv="${WORKSPACE_SUBDIRS[SPECS]}/${DEFAULT_VENV_NAME}/venv" + rm -rf "${default_tester_venv}" "python${PYTHON_VERSION}" -m venv "${default_tester_venv}" local pip pip="${default_tester_venv}/bin/pip" ${pip} install --upgrade pip ${pip} install wheel # must be installed before requirements - ${pip} install "${TESTERSROOT}" + ${pip} install -e "${TESTERSROOT}" } compile_reaper_script() { @@ -261,25 +264,31 @@ create_enqueuer_wrapper() { echo "[AUTOTEST-INSTALL] Creating enqueuer wrapper at '${enqueuer}'" echo "#!/usr/bin/env bash - ${SERVER_VENV}/bin/markus_autotester \"\$@\"" | sudo tee ${enqueuer} > /dev/null + source \${HOME}/.bash_profile + ${SERVER_VENV}/bin/autotester \"\$@\"" | sudo tee ${enqueuer} > /dev/null sudo chown "${SERVER_USER}:${SERVER_USER}" "${enqueuer}" sudo chmod u=rwx,go=r ${enqueuer} - } start_workers() { local supervisorconf local generate_script local rq + local worker_cmd supervisorconf="${WORKSPACE_SUBDIRS[LOGS]}/supervisord.conf" generate_script="${BINDIR}/generate_supervisord_conf.py" rq="${SERVER_VENV}/bin/rq" + worker_cmd="${PYTHON} ${generate_script} ${rq} ${supervisorconf}" echo "[AUTOTEST-INSTALL] Generating supervisor config at '${supervisorconf}' and starting rq workers" - sudo -u "${SERVER_USER}" -- bash -c "${PYTHON} ${generate_script} ${rq} ${supervisorconf} && - ${BINDIR}/start-stop.sh start" + if [ -z "${DOCKER}" ]; then + echo "[AUTOTEST-INSTALL] Starting rq workers" + worker_cmd="${worker_cmd} && ${BINDIR}/start-stop.sh start" + fi + + sudo -Eu "${SERVER_USER}" -- bash -c "${worker_cmd}" } install_testers() { @@ -292,12 +301,16 @@ install_testers() { fi for tester in "${to_install[@]}"; do echo "[AUTOTEST-INSTALL] installing tester: ${tester}" - "${TESTERSROOT}/testers/${tester}/bin/install.sh" + if [ -n "${NON_INTERACTIVE}" ]; then + "${TESTERSROOT}/testers/${tester}/bin/install.sh" --non-interactive + else + "${TESTERSROOT}/testers/${tester}/bin/install.sh" + fi done } suggest_next_steps() { - echo "[AUTOTEST-INSTALL] You must add MarkUs web server's public key to ${SERVER_USER}'s '~/.ssh/authorized_keys'" + echo "[AUTOTEST-INSTALL] You must add the client's public key to ${SERVER_USER}'s '~/.ssh/authorized_keys'" echo "[AUTOTEST-INSTALL] You may want to add '${BINDIR}/start-stop.sh start' to ${SERVER_USER}'s crontab with a @reboot time" } diff --git a/bin/start-stop.sh b/bin/start-stop.sh index 08e57c37..33e462df 100755 --- a/bin/start-stop.sh +++ b/bin/start-stop.sh @@ -10,15 +10,7 @@ RQ="${PROJECTROOT}/venv/bin/rq" SUPERVISORD="${PROJECTROOT}/venv/bin/supervisord" start_supervisor() { - local pid_file - pid_file="${LOGS_DIR}/supervisord.pid" - if [ -f "${pid_file}" ]; then - local supervisor_pid - supervisor_pid=$(cat "${pid_file}") - echo "Supervisor appears to be running already (PID: ${supervisor_pid})" >&2 - exit 1 - fi - (cd "${LOGS_DIR}" && ${SUPERVISORD} -c supervisord.conf) + (cd "${LOGS_DIR}" && ${SUPERVISORD} -c supervisord.conf "$@") } stop_supervisor() { @@ -56,14 +48,14 @@ fi case $1 in start) - start_supervisor + start_supervisor "${@:2}" ;; stop) stop_supervisor ;; restart) stop_supervisor - start_supervisor + start_supervisor "${@:2}" ;; stat) "${RQ}" info --url "${REDIS_URL}" "${@:2}" diff --git a/doc/config_example.yml b/doc/config_example.yml index 1d4d0110..4532dd01 100644 --- a/doc/config_example.yml +++ b/doc/config_example.yml @@ -1,4 +1,4 @@ -workspace: !ENV ${HOME}/.markus-autotesting/workspace +workspace: !ENV ${HOME}/.autotesting/workspace server_user: !ENV ${USER} @@ -7,8 +7,8 @@ workers: - name: !ENV ${USER} reaper: null queues: - - student - - single + - high + - low - batch redis: @@ -29,11 +29,3 @@ resources: postgresql: port: 5432 host: localhost - -queues: - - name: batch - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} - - name: single - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} - - name: student - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} diff --git a/doc/hooks_example.py b/doc/hooks_example.py deleted file mode 100644 index 6ba8e782..00000000 --- a/doc/hooks_example.py +++ /dev/null @@ -1,87 +0,0 @@ -import glob -import os -import json -import shutil -import subprocess - - -# Helper functions -def upload_svn_file( - api, - group_repo_name, - assignment_name, - file_name, - svn_user, - svn_password, - commit_message, -): - repo_url = f"{api.parsed_url.scheme}://{api.parsed_url.netloc}/svn{api.parsed_url.path}/{group_repo_name}" - svn_co_command = [ - "svn", - "co", - "--username", - svn_user, - "--password", - svn_password, - repo_url, - ] - subprocess.run(svn_co_command, capture_output=True, check=True) - repo_file_path = os.path.join(group_repo_name, assignment_name, file_name) - previous_file = os.path.isfile(repo_file_path) - shutil.copy2(file_name, repo_file_path) - if not previous_file: - svn_add_command = ["svn", "add", repo_file_path] - subprocess.run(svn_add_command, capture_output=True, check=True) - svn_ci_command = [ - "svn", - "ci", - "--username", - svn_user, - "--password", - svn_password, - "-m", - commit_message, - repo_file_path, - ] - subprocess.run(svn_ci_command, capture_output=True, check=True) - - -# Hooks -def before_all(_api, _assignment_id, _group_id, _group_repo_name): - # clean up unwanted files - pattern = os.path.join("**", "*.o") - for file_path in glob.glob(pattern, recursive=True): - os.remove(file_path) - - -def before_each(_api, _assignment_id, _group_id, _group_repo_name): - pass - - -def after_each(_api, _assignment_id, _group_id, _group_repo_name): - pass - - -def after_all(api, assignment_id, group_id, group_repo_name): - # upload feedback file - feedback_name = "feedback_pyta.txt" - if os.path.isfile(feedback_name): - with open(feedback_name) as feedback_open: - api.upload_feedback_file( - assignment_id, group_id, feedback_name, feedback_open.read() - ) - # upload in svn repo - upload_svn_file( - api, - group_repo_name, - "AX", - feedback_name, - "svn_user", - "svn_password", - "Feedback file", - ) - # upload annotations - annotations_name = "feedback_pyta.json" - if os.path.isfile(annotations_name): - with open(annotations_name) as annotations_open: - api.upload_annotations(assignment_id, group_id, json.load(annotations_open)) diff --git a/docker-compose.yml b/docker-compose.yml index f326e27d..8a5c56f5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,19 +8,24 @@ services: args: UBUNTU_VERSION: '18.04' LOGIN_USER: 'docker' - image: markus-autotest-dev:1.0.0 + image: autotest-dev:1.0.0 stdin_open: true tty: true user: docker volumes: - .:/app:cached + - ssh_pub_key:/ssh_pub_key:ro + - workspace:/home/docker/.autotesting:rw environment: - USER=docker - REDIS_URL=redis://redis_autotest:6379/ - PGHOST=postgres_autotest - PGPORT=5432 - EDITOR=vi - - MARKUS_AUTOTESTER_CONFIG=/app/.dockerfiles/docker-config.yml + - AUTOTESTER_CONFIG=/app/.dockerfiles/docker-config.yml + networks: + - default + - markus_dev depends_on: - postgres_autotest - redis_autotest @@ -28,16 +33,16 @@ services: autotest: <<: *app entrypoint: .dockerfiles/entrypoint-dev.sh - command: '/bin/bash' + container_name: autotest + command: './bin/start-stop.sh start --nodaemon --logfile -' postgres_autotest: image: postgres:10 volumes: - - .psqlrc:/root/.psqlrc:ro - postgres_autotest:/var/lib/postgresql/data - - ./log:/root/log:cached environment: - PSQL_HISTFILE=/root/log/.psql_history + - POSTGRES_HOST_AUTH_METHOD=trust ports: - '45432:5432' @@ -48,6 +53,23 @@ services: ports: - 6379 + rq_dashboard: + image: eoranged/rq-dashboard + environment: + - RQ_DASHBOARD_REDIS_URL=redis://redis_autotest:6379/ + ports: + - '9181:9181' + depends_on: + - redis_autotest + volumes: postgres_autotest: redis_autotest: + workspace: + ssh_pub_key: + name: ssh_pub_key + +networks: + markus_dev: + external: + name: markus_dev diff --git a/setup.py b/setup.py index ac57e82a..8a6985ae 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ test_exclusions = ["*.tests", "*.tests.*", "tests.*", "tests"] setup( - name="markus-autotester", + name="autotester", version="2.0", description="Automatic tester for programming assignments", url="https://github.com/MarkUsProject/markus-autotesting", @@ -20,11 +20,11 @@ "supervisor==4.1.0", "PyYAML==5.1.2", "psycopg2-binary==2.8.4", - "markusapi==0.0.1", + "markusapi==0.1.1", "jsonschema==3.0.2", ], tests_require=["pytest==5.3.1", "hypothesis==4.47.3", "fakeredis==1.1.0"], setup_requires=["pytest-runner"], include_package_data=True, - entry_points={"console_scripts": "markus_autotester = autotester.cli:cli"}, + entry_points={"console_scripts": "autotester = autotester.cli:cli"}, ) diff --git a/src/autotester/server/hooks_context/__init__.py b/src/__init__.py similarity index 100% rename from src/autotester/server/hooks_context/__init__.py rename to src/__init__.py diff --git a/src/autotester/MANIFEST.in b/src/autotester/MANIFEST.in index fc768370..a9528a17 100644 --- a/src/autotester/MANIFEST.in +++ b/src/autotester/MANIFEST.in @@ -1,3 +1,4 @@ -include testers/racket/lib/markus.rkt +include testers/haskell/lib/Stats.hs +include testers/racket/lib/autotester.rkt graft testers/java/lib include testers/*/specs/install_settings.json diff --git a/src/autotester/cli.py b/src/autotester/cli.py index 0fb47693..048b6f20 100755 --- a/src/autotester/cli.py +++ b/src/autotester/cli.py @@ -5,69 +5,30 @@ import argparse import rq import json -import inspect -import glob import time -import shutil +from typing import TypeVar, List, Dict from rq.exceptions import NoSuchJobError -from functools import wraps from autotester.exceptions import ( - JobArgumentError, - InvalidQueueError, TestScriptFilesError, TestParameterError, - MarkUsError, + AutotestError, ) from autotester.server.utils.redis_management import ( redis_connection, get_avg_pop_interval, test_script_directory, ) -from autotester.server.utils.file_management import ignore_missing_dir_error from autotester.config import config -from autotester.server.utils import form_validation +from autotester.server.utils import form_management from autotester.server.server import run_test, update_test_specs +from autotester.server.client_customizations import get_client, ClientType SETTINGS_FILENAME = config["_workspace_contents", "_settings_file"] - -def _format_job_id(markus_address, run_id, **_kw): - """ - Return a unique job id for each enqueued job - based on the markus_address and the run_id - """ - return "{}_{}".format(markus_address, run_id) - - -def _check_args(func, args=None, kwargs=None): - """ - Raises an error if calling the function func with args and - kwargs would raise an error. - """ - args = args or [] - kwargs = kwargs or {} - try: - inspect.signature(func).bind(*args, **kwargs) - except TypeError as e: - raise JobArgumentError( - "{}\nWith args: {}\nWith kwargs:{}".format(e, args, tuple(kwargs)) - ) - - -def _get_queue(**kw): - """ - Return a queue. The returned queue is one whose condition function - returns True when called with the arguments in **kw. - """ - for queue in config["queues"]: - if form_validation.is_valid(kw, queue["schema"]): - return rq.Queue(queue["name"], connection=redis_connection()) - raise InvalidQueueError( - "cannot enqueue job: unable to determine correct queue type" - ) +ExtraArgType = TypeVar("ExtraArgType", str, int, float) -def _print_queue_info(queue): +def _print_queue_info(queue: rq.Queue) -> None: """ Print to stdout the estimated time to service for a new job being added to the queue. This is calculated based on the average pop interval @@ -78,113 +39,87 @@ def _print_queue_info(queue): print(avg_pop_interval * count) -def _check_test_script_files_exist(markus_address, assignment_id, **_kw): - if test_script_directory(markus_address, assignment_id) is None: - raise TestScriptFilesError( - "cannot find test script files: please upload some before running tests" - ) - - -def _clean_on_error(func): +def _check_test_script_files_exist(client: ClientType) -> None: """ - Remove files_path directories from the working dir if a function raises an error. - - Note: the files_path directory must be passed to the function as a keyword argument. + Raise a TestScriptFilesError if the tests script files for this test cannot be found. """ + if test_script_directory(client.unique_script_str()) is None: + raise TestScriptFilesError("cannot find test script files: please upload some before running tests") - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception: - files_path = kwargs.get("files_path") - if files_path: - shutil.rmtree(files_path, onerror=ignore_missing_dir_error) - raise - return wrapper - - -def _get_job_timeout(test_specs, test_categories, multiplier=1.5): +def _get_job_timeouts(client: ClientType, multiplier: float = 1.5) -> int: """ - Return multiplier times the sum of all timeouts in the - dictionary - - Raises a RuntimeError if there are no elements in test_data that - have the category + Return an integer equal to multiplier times the sum of all timeouts in the + test_specs dictionary. """ + test_files_dir = test_script_directory(client.unique_script_str()) + with open(os.path.join(test_files_dir, SETTINGS_FILENAME)) as f: + test_specs = json.load(f) total_timeout = 0 - test_data_count = 0 for settings in test_specs["testers"]: for test_data in settings["test_data"]: - test_category = test_data.get("category", []) - if set(test_category) & set( - test_categories - ): # TODO: ensure test_categories is non-string collection type - total_timeout += test_data.get( - "timeout", 30 - ) # TODO: don't hardcode default timeout - test_data_count += 1 - if test_data_count: + total_timeout += test_data["timeout"] + if total_timeout: return int(total_timeout * multiplier) - raise TestParameterError( - f"there are no tests of the given categories: {test_categories}" - ) + raise TestParameterError(f"There are no tests to run") -@_clean_on_error -def enqueue_test(user_type, batch_id, **kw): +def _select_queue(is_batch: bool, request_high_priority: bool) -> rq.Queue: """ - Enqueue a test run job with keyword arguments specified in **kw + Return a queue. + + Return the batch queue iff is_batch is True. + Otherwise return the high queue if request_high_priority is True and return the low queue otherwise. """ - kw["enqueue_time"] = time.time() - queue = _get_queue(user_type=user_type, batch_id=batch_id, **kw) - _check_args(run_test, kwargs=kw) - _check_test_script_files_exist(**kw) - test_files_dir = test_script_directory(kw["markus_address"], kw["assignment_id"]) - with open(os.path.join(test_files_dir, SETTINGS_FILENAME)) as f: - test_specs = json.load(f) - _print_queue_info(queue) - timeout = _get_job_timeout(test_specs, kw["test_categories"]) - queue.enqueue_call( - run_test, kwargs=kw, job_id=_format_job_id(**kw), timeout=timeout - ) + if is_batch: + return rq.Queue("batch", connection=redis_connection()) + elif request_high_priority: + return rq.Queue("high", connection=redis_connection()) + else: + return rq.Queue("low", connection=redis_connection()) -@_clean_on_error -def update_specs(test_specs, schema=None, **kw): +def enqueue_tests( + client_type: str, client_data: Dict, test_data: List[Dict], request_high_priority: bool = False +) -> None: """ - Run test spec update function after validating the form data. + Enqueue test run jobs with keyword arguments specified in test_data. + + Prints the queue information to stdout (see _print_queue_info). """ - if schema is not None: - error = form_validation.validate_with_defaults( - schema, test_specs, best_only=True - ) - if error: - raise error - update_test_specs(test_specs=test_specs, **kw) + if not test_data: + raise TestParameterError("test_data cannot be empty") + client = get_client(client_type, client_data) + _check_test_script_files_exist(client) + timeout = _get_job_timeouts(client) + queue = _select_queue(len(test_data) > 1, request_high_priority) + _print_queue_info(queue) + for data in test_data: + kwargs = { + "client_type": client_type, + "test_data": {**client_data, **data}, + "enqueue_time": time.time(), + "test_categories": data["test_categories"], + } + queue.enqueue_call(run_test, kwargs=kwargs, job_id=client.unique_run_str(), timeout=timeout) -def cancel_test(markus_address, run_ids, **_kw): +def cancel_tests(client_type: str, client_data: Dict, test_data: List[Dict]) -> None: """ - Cancel a test run job with the job_id defined using - markus_address and run_id. + Cancel a test run job with enqueued with the same """ with rq.Connection(redis_connection()): - for run_id in run_ids: - job_id = _format_job_id(markus_address, run_id) + for data in test_data: + client = get_client(client_type, {**client_data, **data}) try: - job = rq.job.Job.fetch(job_id) + job = rq.job.Job.fetch(client.unique_run_str()) except NoSuchJobError: - return + continue if job.is_queued(): - files_path = job.kwargs["files_path"] - if files_path: - shutil.rmtree(files_path, onerror=ignore_missing_dir_error) job.cancel() -def get_schema(**_kw): +def get_schema(**_kw: ExtraArgType) -> None: """ Print a json to stdout representing a json schema that indicates the required specs for each installed tester type. @@ -192,70 +127,34 @@ def get_schema(**_kw): This json schema should be used to generate a UI with react-jsonschema-form (https://github.com/mozilla-services/react-jsonschema-form) or similar. """ - this_dir = os.path.dirname(os.path.abspath(__file__)) - - with open(os.path.join(this_dir, "lib", "tester_schema_skeleton.json")) as f: - schema_skeleton = json.load(f) - - glob_pattern = os.path.join(this_dir, "testers", "*", "specs", ".installed") - for path in sorted(glob.glob(glob_pattern)): - tester_type = os.path.basename(os.path.dirname(os.path.dirname(path))) - specs_dir = os.path.dirname(path) - with open(os.path.join(specs_dir, "settings_schema.json")) as f: - tester_schema = json.load(f) - - schema_skeleton["definitions"]["installed_testers"]["enum"].append(tester_type) - schema_skeleton["definitions"]["tester_schemas"]["oneOf"].append(tester_schema) - - print(json.dumps(schema_skeleton)) - - -def parse_arg_file(arg_file): - """ - Load arg_file as a json and return a dictionary - containing the keyword arguments to be pased to - one of the commands. - The file is them immediately removed if remove - is True. - - Note: passing arguments in a file like this makes - is more secure because it means the (potentially - sensitive) arguments are not passed through a terminal - or with stdin, both of which are potentially - accessible using tools like `ps` - """ - - with open(arg_file) as f: - kwargs = json.load(f) - if "files_path" not in kwargs: - kwargs["files_path"] = os.path.dirname(os.path.realpath(f.name)) - os.remove(arg_file) - return kwargs + print(json.dumps(form_management.get_schema())) COMMANDS = { - "run": enqueue_test, - "specs": update_specs, - "cancel": cancel_test, + "run": enqueue_tests, + "specs": update_test_specs, + "cancel": cancel_tests, "schema": get_schema, } -def cli(): +def cli() -> None: + """ + Entrypoint for the command line interface for the autotester package. + + This function is invoked when the autotester command is called + from the command line. + """ parser = argparse.ArgumentParser() parser.add_argument("command", choices=COMMANDS) - group = parser.add_mutually_exclusive_group(required=False) - group.add_argument("-f", "--arg_file", type=parse_arg_file) - group.add_argument("-j", "--arg_json", type=json.loads) + parser.add_argument("-j", "--arg_json", type=json.loads) args = parser.parse_args() - kwargs = args.arg_file or args.arg_json or {} - try: - COMMANDS[args.command](**kwargs) - except MarkUsError as e: + COMMANDS[args.command](**args.arg_json) + except AutotestError as e: print(str(e)) sys.exit(1) diff --git a/src/autotester/config.py b/src/autotester/config.py index 83afc95c..4a0546c6 100644 --- a/src/autotester/config.py +++ b/src/autotester/config.py @@ -4,15 +4,36 @@ import os import re import json +from typing import ( + Optional, + Pattern, + Tuple, + Union, + TypeVar, + ClassVar, + List, + Dict, + Callable, +) from collections.abc import Mapping import yaml DEFAULT_ROOT = os.path.join(os.path.dirname(__file__), "config_defaults") -CONFIG_FILENAME = "markus_autotester_config" -CONFIG_ENV_VAR = "MARKUS_AUTOTESTER_CONFIG" +CONFIG_FILENAME = "autotester_config" +CONFIG_ENV_VAR = "AUTOTESTER_CONFIG" -def _find_local_config(): +ConfigValues = TypeVar("ConfigValues", List, Dict, str, int, float, type(None)) + + +def _find_local_config() -> Optional[str]: + """ + Return the file name of the local configuration file if it exists. + + Returns the file specified by the AUTOTESTER_CONFIG environment variable, + otherwise returns the file at $HOME/.autotester_config, + otherwise returns the file at /etc/autotester_config + """ system_config = os.path.join(os.path.sep, "etc", CONFIG_FILENAME) user_config = os.path.join(os.environ.get("HOME"), f".{CONFIG_FILENAME}") env_config = os.environ.get(CONFIG_ENV_VAR) @@ -27,24 +48,37 @@ def _find_local_config(): class _Config: - _local_config = _find_local_config() - _default_config = os.path.join(DEFAULT_ROOT, "config_default.yml") - _env_var_config = os.path.join(DEFAULT_ROOT, "config_env_vars.yml") - _replacement_pattern = re.compile(r".*?\${(\w+)}.*?") - _not_found_key = "!VARIABLE NOT FOUND!" + _local_config: ClassVar[Optional[str]] = _find_local_config() + _default_config: ClassVar[str] = os.path.join(DEFAULT_ROOT, "config_default.yml") + _env_var_config: ClassVar[str] = os.path.join(DEFAULT_ROOT, "config_env_vars.yml") + _replacement_pattern: ClassVar[Pattern] = re.compile(r".*?\${(\w+)}.*?") + _not_found_key: ClassVar[str] = "!VARIABLE NOT FOUND!" - def __init__(self): + def __init__(self) -> None: + """ + Initialize a configuration object that stores the configuration settings + after loading them from one or several configuration file. + + Loading configuration files may also require resolving values by parsing + environment variables. + """ self._yaml_loader = yaml.SafeLoader self._yaml_loader.add_implicit_resolver("!ENV", self._replacement_pattern, None) - env_constructor = self._constructor_factory( - lambda g: os.environ.get(g, self._not_found_key) - ) + env_constructor = self._constructor_factory(lambda g: os.environ.get(g, self._not_found_key)) self._yaml_loader.add_constructor("!ENV", env_constructor) self._settings = self._load_from_yaml() - def __getitem__(self, key): + def __getitem__(self, key: Union[str, Tuple[Union[str, int], ...]]) -> ConfigValues: + """ + Return the value in self._settings that corresponds to key. + + If key is a tuple, then this method will chain __getitem__ calls. + For example: + + config['a', 'b', 'c'] is equivalent to config['a']['b']['c'] + """ try: return self._settings[key] except KeyError: @@ -55,11 +89,18 @@ def __getitem__(self, key): return d raise - def to_json(self): + def to_json(self) -> str: + """ + Return a json representation of self._settings + """ return json.dumps(self._settings) @classmethod - def _merge_dicts(cls, dicts): + def _merge_dicts(cls, dicts: List[Dict[str, ConfigValues]]) -> Dict[str, ConfigValues]: + """ + Returns a merged dictionary created from merging all dictionaries in dicts + in order. + """ try: _merged = dicts[0].copy() except AttributeError: @@ -73,8 +114,26 @@ def _merge_dicts(cls, dicts): _merged[key] = cls._merge_dicts([_merged[key], val]) return _merged - def _constructor_factory(self, replacement_func): + def _constructor_factory(self, replacement_func: Callable) -> Callable: + """ + Returns a constructor function used to load data from a yaml file. + + Meant for use when calling yaml.add_constructor to add a new constructor + for a given yaml tag. + """ + def constructor(loader, node, pattern=self._replacement_pattern): + """ + Replaces the value of a yaml node with the return value of replacement_func + if the current value matches _replacement_pattern. + + Used in this case to replace values with the !ENV tag with the environment + variable that follows the !ENV tag in curly braces. For example, if there is + currently an environment variable WORKSPACE with value '/workspace': + + replaces: !{ENV} ${WORKSPACE} + with: '/workspace' + """ value = loader.construct_scalar(node) match = pattern.findall(value) if match: @@ -86,7 +145,11 @@ def constructor(loader, node, pattern=self._replacement_pattern): return constructor - def _load_from_yaml(self): + def _load_from_yaml(self) -> Dict[str, ConfigValues]: + """ + Returns a dictionary containing all data loaded from the various yaml + configuration files (default and user specified). + """ config_dicts = [] if self._local_config is not None and os.path.isfile(self._local_config): with open(self._local_config) as f: diff --git a/src/autotester/config_defaults/config_default.yml b/src/autotester/config_defaults/config_default.yml index 10bb0c69..8359d382 100644 --- a/src/autotester/config_defaults/config_default.yml +++ b/src/autotester/config_defaults/config_default.yml @@ -2,21 +2,17 @@ # Settings prefixed with an underscore are technically overwritable by # a local settings file but it is not recommended. -queues: - - name: batch - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'number'}}} - - name: single - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Admin'}}} - - name: student - schema: {'type': 'object', 'properties': {'batch_id': {'type': 'null'}, 'user_type': {'const': 'Student'}}} +server_user: !ENV ${USER} + +workspace: !ENV ${HOME}/.autotesting/ workers: - users: - name: !ENV ${USER} reaper: null queues: - - student - - single + - high + - low - batch redis: @@ -52,4 +48,3 @@ _workspace_contents: _default_venv_name: defaultvenv _settings_file: settings.json _files_dir: files - _hooks_file: hooks.py diff --git a/src/autotester/config_defaults/config_env_vars.yml b/src/autotester/config_defaults/config_env_vars.yml index 6a34ac11..522b61aa 100644 --- a/src/autotester/config_defaults/config_env_vars.yml +++ b/src/autotester/config_defaults/config_env_vars.yml @@ -1,12 +1,12 @@ -workspace: !ENV ${HOME}/.markus-autotesting/ +workspace: !ENV ${AUTOTESTER_WORKSPACE} redis: url: !ENV ${REDIS_URL} -server_user: !ENV ${USER} +server_user: !ENV ${AUTOTESTER_SERVER_USER} supervisor: - url: !ENV ${SUPERVISOR_URL} + url: !ENV ${AUTOTESTER_SUPERVISOR_URL} resources: postgresql: diff --git a/src/autotester/exceptions.py b/src/autotester/exceptions.py index 6e02f341..c7a278cc 100644 --- a/src/autotester/exceptions.py +++ b/src/autotester/exceptions.py @@ -1,31 +1,25 @@ """ -Custom Exception Type for use in MarkUs +Custom Exception Types """ -class MarkUsError(Exception): - pass +class AutotestError(Exception): + """ Generic Autotester Error """ -class TesterCreationError(MarkUsError): - pass +class TesterCreationError(AutotestError): + """ Error raised when a tester environment could not be created """ -class TesterUserError(MarkUsError): - pass +class TesterUserError(AutotestError): + """ Error raised when a tester user is not available """ -class JobArgumentError(MarkUsError): - pass +class TestScriptFilesError(AutotestError): + """ Error raised when test script files cannot be found for a given test job """ -class InvalidQueueError(MarkUsError): - pass - - -class TestScriptFilesError(MarkUsError): - pass - - -class TestParameterError(MarkUsError): - pass +class TestParameterError(AutotestError): + """ + Error raised when the value of the arguments used to enqueue a test job are invalid + """ diff --git a/src/autotester/lib/tester_schema_skeleton.json b/src/autotester/lib/tester_schema_skeleton.json index 47c7e85d..b1b76916 100644 --- a/src/autotester/lib/tester_schema_skeleton.json +++ b/src/autotester/lib/tester_schema_skeleton.json @@ -45,10 +45,6 @@ } } } - }, - "hooks_file": { - "$ref": "#/definitions/files_list", - "title": "Custom hooks file" } } } diff --git a/src/autotester/resources/ports/__init__.py b/src/autotester/resources/ports/__init__.py index eb5f8d36..51db5e6d 100644 --- a/src/autotester/resources/ports/__init__.py +++ b/src/autotester/resources/ports/__init__.py @@ -8,7 +8,7 @@ REDIS_PORT_INT = f"{REDIS_PREFIX}{config['resources', 'port', '_redis_int']}" -def next_port(): +def next_port() -> int: """ Return a port number that is greater than the last time this method was called (by any process on this machine). @@ -18,8 +18,8 @@ def next_port(): return int(r.incr(REDIS_PORT_INT) or 0) % (PORT_MAX - PORT_MIN) + PORT_MIN -def get_available_port(host="localhost"): - """ Return the next available open port on . """ +def get_available_port(host: str = "localhost") -> str: + """ Return the next available open port on host. """ while True: try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: diff --git a/src/autotester/resources/postgresql/__init__.py b/src/autotester/resources/postgresql/__init__.py index 15eadce5..688b2eb7 100644 --- a/src/autotester/resources/postgresql/__init__.py +++ b/src/autotester/resources/postgresql/__init__.py @@ -3,36 +3,37 @@ import psycopg2 import secrets import string +from typing import Dict from psycopg2.extensions import AsIs from autotester.config import config POSTGRES_PREFIX = config["resources", "postgresql", "_prefix"] -PGPASSFILE = os.path.join( - config["workspace"], config["_workspace_contents", "_logs"], ".pgpass" -) +PGPASSFILE = os.path.join(config["workspace"], config["_workspace_contents", "_logs"], ".pgpass") +PGHOST = config["resources", "postgresql", "host"] -def setup_database(test_username): +def setup_database(test_username: str) -> Dict[str, str]: + """ + Return a dictionary containing all the information required + to connect to the dedicated postgres database for user + test_username. + + This dictionary is meant to be used to set environment variables + for the process that will run the test. + """ user = getpass.getuser() database = f"{POSTGRES_PREFIX}{test_username}" with open(PGPASSFILE) as f: password = f.read().strip() - with psycopg2.connect( - database=database, user=user, password=password, host="localhost" - ) as conn: + with psycopg2.connect(database=database, user=user, password=password, host=PGHOST) as conn: with conn.cursor() as cursor: cursor.execute("DROP OWNED BY CURRENT_USER;") if test_username != user: user = test_username - password = "".join( - secrets.choice(string.ascii_letters + string.digits) - for _ in range(20) - ) - cursor.execute( - "ALTER USER %s WITH PASSWORD %s;", (AsIs(user), password) - ) + password = "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20)) + cursor.execute("ALTER USER %s WITH PASSWORD %s;", (AsIs(user), password)) return { "PGDATABASE": database, diff --git a/src/autotester/server/client_customizations/__init__.py b/src/autotester/server/client_customizations/__init__.py new file mode 100644 index 00000000..b47efd4e --- /dev/null +++ b/src/autotester/server/client_customizations/__init__.py @@ -0,0 +1,11 @@ +from typing import Dict +from autotester.server.client_customizations import markus, client + +ClientType = client.Client + +_CLIENTS = {"markus": markus.MarkUs} + + +def get_client(client_type: str, init_kwargs: Dict) -> ClientType: + """ Return a client of initialized with """ + return _CLIENTS[client_type](**init_kwargs) diff --git a/src/autotester/server/client_customizations/client.py b/src/autotester/server/client_customizations/client.py new file mode 100644 index 00000000..2abda7dd --- /dev/null +++ b/src/autotester/server/client_customizations/client.py @@ -0,0 +1,55 @@ +from abc import ABC, abstractmethod +from typing import Dict, Callable +from functools import wraps + + +class Client(ABC): + @abstractmethod + def write_test_files(self, destination: str) -> None: + """ Get test files from the client and write them to """ + pass + + @abstractmethod + def get_test_specs(self) -> Dict: + """ Get and Return test specs from the client """ + pass + + @abstractmethod + def write_student_files(self, destination: str) -> None: + """ Get student files from the client and write them to """ + pass + + @abstractmethod + def send_test_results(self, results) -> None: + """ Send test results to the client """ + pass + + @abstractmethod + def unique_script_str(self) -> str: + """ Return a unique string to represent the test scripts used to run tests """ + pass + + @abstractmethod + def unique_run_str(self) -> str: + """ Return a unique string to represent an individual run of tests """ + pass + + @staticmethod + def _return_error_str(f: Callable) -> Callable: + @wraps(f) + def return_error_str(*args, **kwargs): + try: + f(*args, **kwargs) + except Exception as e: + return str(e) + return "" + + return return_error_str + + @abstractmethod + def after_test(self, test_data: Dict, cwd: str) -> str: + """ + Run after each test where test_data is the data for the test and cwd the directory where the test is run. + Return any error messages. + """ + pass diff --git a/src/autotester/server/client_customizations/markus.py b/src/autotester/server/client_customizations/markus.py new file mode 100644 index 00000000..c361ed79 --- /dev/null +++ b/src/autotester/server/client_customizations/markus.py @@ -0,0 +1,97 @@ +import os +import json +import markusapi +from typing import Dict +from autotester.server.client_customizations.client import Client +from autotester.server.utils.file_management import extract_zip_stream +from autotester.exceptions import TestScriptFilesError + + +class MarkUs(Client): + client_type = "markus" + + def __init__(self, **kwargs): + self.url = kwargs.get("url") + self.api_key = kwargs.get("api_key") + self.assignment_id = kwargs.get("assignment_id") + self.group_id = kwargs.get("group_id") + self.run_id = kwargs.get("run_id") + self.user_type = kwargs.get("user_type") + self._api = markusapi.Markus(self.api_key, self.url) + + def write_test_files(self, destination: str) -> None: + """ Get test files from the client and write them to """ + zip_content = self._api.get_test_files(self.assignment_id) + if zip_content is None: + raise TestScriptFilesError("No test files found") + extract_zip_stream(zip_content, destination, ignore_root_dirs=1) + + def get_test_specs(self) -> Dict: + """ Get and Return test specs from the client """ + return self._api.get_test_specs(self.assignment_id) + + def write_student_files(self, destination: str) -> None: + """ Get student files from the client and write them to """ + collected = self.user_type == "Admin" + zip_content = self._api.get_files_from_repo(self.assignment_id, self.group_id, collected=collected) + if zip_content is None: + raise TestScriptFilesError("No test files found") + extract_zip_stream(zip_content, destination, ignore_root_dirs=2) + + def send_test_results(self, results_data: Dict) -> None: + """ Send test results to the client """ + self._api.upload_test_group_results(self.assignment_id, self.group_id, self.run_id, json.dumps(results_data)) + + def unique_script_str(self) -> str: + """ Return a unique string to represent the test scripts used to run tests """ + return "_".join([self.client_type, self.url, str(self.assignment_id)]) + + def unique_run_str(self) -> str: + """ Return a unique string to represent an individual run of tests """ + return "_".join([self.unique_script_str(), str(self.run_id)]) + + def after_test(self, test_data: Dict, cwd: str) -> str: + """ + Upload feedback files and annotations and return any error messages. + """ + hooks_error = "" + feedback_file = test_data.get("feedback_file_name") + annotation_file = test_data.get("annotation_file") + if feedback_file: + feedback_file = os.path.join(cwd, feedback_file) + if test_data.get("upload_feedback_file"): + hooks_error += self._return_error_str(self.upload_feedback_file)(feedback_file) + if test_data.get("upload_feedback_to_repo"): + hooks_error += self._return_error_str(self.upload_feedback_to_repo)(feedback_file) + if annotation_file and test_data.get("upload_annotations"): + annotation_file = os.path.join(cwd, annotation_file) + hooks_error += self._return_error_str(self.upload_annotations)(annotation_file) + return hooks_error + + def upload_feedback_to_repo(self, feedback_file: str) -> None: + """ + Upload the feedback file to the group's repo. + """ + if os.path.isfile(feedback_file): + with open(feedback_file) as feedback_open: + self._api.upload_file_to_repo( + self.assignment_id, self.group_id, os.path.basename(feedback_file), feedback_open.read() + ) + + def upload_feedback_file(self, feedback_file: str) -> None: + """ + Upload the feedback file using MarkUs' api. + """ + if os.path.isfile(feedback_file): + with open(feedback_file) as feedback_open: + self._api.upload_feedback_file( + self.assignment_id, self.group_id, os.path.basename(feedback_file), feedback_open.read() + ) + + def upload_annotations(self, annotation_file: str) -> None: + """ + Upload annotations using MarkUs' api. + """ + if os.path.isfile(annotation_file): + with open(annotation_file) as annotations_open: + self._api.upload_annotations(self.assignment_id, self.group_id, json.load(annotations_open)) diff --git a/src/autotester/server/client_customizations/tests/__init__.py b/src/autotester/server/client_customizations/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/autotester/server/client_customizations/tests/client_test.py b/src/autotester/server/client_customizations/tests/client_test.py new file mode 100644 index 00000000..d38dbf86 --- /dev/null +++ b/src/autotester/server/client_customizations/tests/client_test.py @@ -0,0 +1,15 @@ +from autotester.server.client_customizations import _CLIENTS +from autotester.server.client_customizations.client import Client + + +class TestClient: + pass + + +for client_class in _CLIENTS.values(): + + def func(self): + """ Checks if all abstract methods in Client are implemented """ + assert not Client.__abstractmethods__ - set(vars(client_class).keys()) + + setattr(TestClient, f"test_{client_class}_implements_all_abstract", func) diff --git a/src/autotester/server/hooks_context/builtin_hooks.py b/src/autotester/server/hooks_context/builtin_hooks.py deleted file mode 100644 index 5929a0aa..00000000 --- a/src/autotester/server/hooks_context/builtin_hooks.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Builtin hooks used by hooks_context.Hooks -""" - -import os -import json -import pkgutil -import importlib -from autotester import testers - -HOOKS = { - "upload_feedback_file": {"context": "after_each"}, - "upload_feedback_to_repo": { - "requires": ["clear_feedback_file"], - "context": "after_each", - }, - "upload_annotations": {"context": "after_each"}, - "clear_feedback_file": {"context": "before_each"}, -} - - -def clear_feedback_file(test_data, **_kwargs): - """ - Remove any previous feedback file before the tests run. - """ - feedback_file = test_data.get("feedback_file_name", "") - if os.path.isfile(feedback_file): - os.remove(feedback_file) - - -def upload_feedback_to_repo(api, assignment_id, group_id, test_data, **_kwargs): - """ - Upload the feedback file to the group's repo. - """ - feedback_file = test_data.get("feedback_file_name", "") - if os.path.isfile(feedback_file): - with open(feedback_file) as feedback_open: - api.upload_file_to_repo( - assignment_id, group_id, feedback_file, feedback_open.read() - ) - - -def upload_feedback_file(api, assignment_id, group_id, test_data, **_kwargs): - """ - Upload the feedback file using MarkUs' api. - """ - feedback_file = test_data.get("feedback_file_name", "") - if os.path.isfile(feedback_file): - with open(feedback_file) as feedback_open: - api.upload_feedback_file( - assignment_id, group_id, feedback_file, feedback_open.read() - ) - - -def upload_annotations(api, assignment_id, group_id, test_data, **_kwargs): - """ - Upload annotations using MarkUs' api. - """ - annotations_name = test_data.get("annotation_file", "") - if os.path.isfile(annotations_name): - with open(annotations_name) as annotations_open: - api.upload_annotations(assignment_id, group_id, json.load(annotations_open)) - - -def _load_default_hooks(): - """ - Return a dictionary containing all hooks loaded from any default_hooks.py in the testers package. - """ - defaults = {} - for _finder, name, _ispkg in pkgutil.walk_packages( - testers.__path__, f"{testers.__name__}." - ): - if name.endswith("default_hooks"): - default_hooks = importlib.import_module(name) - for hook in default_hooks.HOOKS: - defaults[hook.__name__] = hook - return defaults - - -DEFAULT_HOOKS = _load_default_hooks() diff --git a/src/autotester/server/hooks_context/hooks_context.py b/src/autotester/server/hooks_context/hooks_context.py deleted file mode 100644 index 9a2dedb2..00000000 --- a/src/autotester/server/hooks_context/hooks_context.py +++ /dev/null @@ -1,287 +0,0 @@ -""" -This module contains the Hooks class which is used to load and run -hooks defined in buitin_hooks.py, a custom hooks file and all -default_hooks.py files in testers/testers/*/bin directories. -""" - -import os -import traceback -from collections import defaultdict, deque -from collections.abc import Callable -from contextlib import contextmanager -from autotester.server.hooks_context import builtin_hooks -from autotester.server.utils.path_management import current_directory, add_path - - -class Hooks: - """ - The hooks class is used to load and run all hooks that may be used while running tests. - - Loads three types of hooks: - - custom hooks: hook functions contained in a user created file python file. - - builtin hooks: hook functions contained in builtin_hooks.py - - default hooks: hook functions contained in any default_hooks.py file in tester bin/ - directories. - - For default and custom hooks, the name of the hook function indicates when each hook should - be run relative to the code contained in the self.around context manager. - For example, a function named 'before_all' will be run before a block of code (when - tester_type=='all'), a function named 'after_each' will be run after a block (when - tester_type=='each'), a function named 'before_all_racket' will be run before a block of code - (when tester_type=='racket'). - - Acceptable hook names are (in execution order): - before_all - before_all_X - before_each_X - before_each - after_each - after_each_X - after_all_X - after_all - - where X is the name of a tester (eg. 'racket', 'py', etc.). - - Builtin hooks can have any name and when they are executed is instead determined by the values - associated to their name in the builtin_hooks.HOOKS dictionary. - """ - - HOOK_BASENAMES = ["before_all", "before_each", "after_all", "after_each"] - - def __init__( - self, custom_hooks_path=None, testers=None, cwd=None, args=None, kwargs=None - ): - """ - Create a new Hooks object instance with args: - - custom_hooks_path: is a path to a file containing custom hooks - (see doc/hooks.py for an example) - testers: a list of tester names to load default hooks from (eg: ['py', 'haskell']) - cwd: all hooks will be run as if cwd is this directory (or actual current directory if None) - args: args to pass to each hook - kwargs: kwargs to pass to each hook - """ - self.custom_hooks_path = custom_hooks_path - self.testers = [] if testers is None else testers - self.cwd = cwd - self.args = [] if args is None else args - self.kwargs = {} if kwargs is None else kwargs - self.load_errors = [] - self.run_errors = [] - self.hooks = self._load_all() - self._context = deque() - - @staticmethod - def _select_builtins(tester_info, _info=None): - """ - Return a nested dictionary containing the hooks to apply based - based on tester_info. Hook functions are included in the output - dictionary only if their name is a key in tester_info and the - value for that key is truthy. - - The keys in the output dictionaries are determined by the value - of the 'context' key in builtin_hooks.HOOKS and any dependencies - specified in builtin_hooks.HOOKS are loaded as well. - - >>> tester_info = {'upload_feedback_file': True} - >>> Hooks._select_builtins(tester_info) - {'each': {'after': []}} - - >>> tester_info = {'upload_feedback_file': False} - >>> Hooks._select_builtins(tester_info) - {} - - >>> tester_info = {'upload_feedback_to_repo': True} # has clear_feedback_file as a dependency - >>> Hooks._select_builtins(tester_info) - {'each': {'after': [], - 'before': []}} - """ - if _info is None: - _info = defaultdict(lambda: defaultdict(list)) - - for func_name, data in builtin_hooks.HOOKS.items(): - if tester_info.get(func_name): - hook_type, hook_context = data["context"].split("_") - func = getattr(builtin_hooks, func_name) - if func not in _info.get(hook_context, {}).get(hook_type, set()): - _info[hook_context][hook_type].append(func) - for requires in data.get("requires", []): - Hooks._select_builtins({requires: True}, _info) - return _info - - @staticmethod - def _merge_hook_dicts(*hook_dicts): - """ - Return a dictionary created by merging all dictionaries in hook_dicts. - hook_dicts are merged by concatenating all value lists together. These - lists are then sorted according to whether their names are in HOOK_BASENAMES - and then alphabetically. This ensures that hooks are always executed in the same - order. - """ - merged = defaultdict(list) - for d in hook_dicts: - for key, hooks in d.items(): - merged[key].extend(h for h in hooks if h) - for key, hooks in merged.items(): - merged[key] = sorted( - (h for h in hooks if h), - key=lambda x: (x.__name__ in Hooks.HOOK_BASENAMES, x.__name__), - reverse=(key == "after"), - ) - return merged - - def _load_all(self): - """ - Return a dictionary containing all hooks that may be run over the course of a test run. - This dictionary contains three nested levels: The key at the first level indicates the - name of the tester ('py', 'haskell', etc.) or None if the hook is one of HOOK_BASENAMES. - The key at the second level indicates the context in which the hook should be run ('all' - or 'each'). The key at the third level indicates when the hook should be run ('before' or - 'after'). The values at the third level contain a list of hooks. - - Hook functions are loaded from builtin_hooks.HOOKS, builtin_hooks.DEFAULT_HOOKS (which are - loaded from the default_hooks.py files for each tester) and the custom_hooks_path file (if - specified). - """ - hooks = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) - - custom_hooks_module = self._load_module(self.custom_hooks_path) - - for hook_name in Hooks.HOOK_BASENAMES: - hook_type, hook_context = hook_name.split( - "_" - ) # eg. "before_all" -> ("before", "all") - custom_hook = self._load_hook(custom_hooks_module, hook_name) - builtin_hook = builtin_hooks.DEFAULT_HOOKS.get(hook_name) - hooks[None][hook_context][hook_type].extend([custom_hook, builtin_hook]) - for tester_type in self.testers: - tester_hook_name = f"{hook_name}_{tester_type}" - custom_hook = self._load_hook(custom_hooks_module, tester_hook_name) - builtin_hook = builtin_hooks.DEFAULT_HOOKS.get(tester_hook_name) - hooks[tester_type][hook_context][hook_type].extend( - [custom_hook, builtin_hook] - ) - return hooks - - def _load_module(self, hooks_script_path): - """ - Return module loaded from hook_script_path. Log any error - messages that were raised when trying to import the module - to self.load_errors. - """ - hooks_script_path = os.path.realpath(hooks_script_path) - if os.path.isfile(hooks_script_path): - dirpath = os.path.dirname(hooks_script_path) - basename = os.path.basename(hooks_script_path) - module_name, _ = os.path.splitext(basename) - try: - with add_path(dirpath): - hooks_module = __import__(module_name) - return hooks_module - except Exception as e: - self.load_errors.append((module_name, f"{traceback.format_exc()}\n{e}")) - return None - - def _load_hook(self, module, function_name): - """ - Return function named function_name from module or None if the function - doesn't exist in that module's namespace or if the function is not a - Callable object. - """ - try: - func = getattr(module, function_name) - if isinstance(func, Callable): - return func - else: - self.load_errors.append( - (module.__name__, f"hook function {function_name} is not callable") - ) - except AttributeError: - return - - def _run(self, func, extra_args=None, extra_kwargs=None): - """ - Run the function func with positional and keyword arguments obtained by - merging self.args with extra_args and self.kwargs with extra_kwargs. - """ - args = self.args + (extra_args or []) - kwargs = {**self.kwargs, **(extra_kwargs or {})} - try: - func(*args, **kwargs) - except BaseException as e: - self.run_errors.append( - (func.__name__, args, kwargs, f"{traceback.format_exc()}\n{e}") - ) - - def _get_hooks(self, tester_type, builtin_selector=None): - """ - Return list of hooks to run before and after a given block of code according - to the tester_type and the builtin_selector dictionary. tester_type should be - either 'all', 'each', or the name of a tester ('pyta', 'sql', etc.) and the - builtin_selector is passed to Hooks._select_builtins to select which builtin hooks - should be used. Also return the result of Hooks._select_builtins or an empty dict - if no builtin hooks are used. - """ - builtin_hook_dict = Hooks._select_builtins(builtin_selector or {}) - if tester_type == "all": - hooks = self.hooks.get(None, {}).get("all", {}) - elif tester_type == "each": - hooks = self.hooks.get(None, {}).get("each", {}) - other_hooks = [builtin_hook_dict.get("each", {})] - for context in self._context: - context_hooks = self.hooks.get(context, {}).get("each", {}) - other_hooks.append(context_hooks) - hooks = Hooks._merge_hook_dicts(hooks, *other_hooks) - else: - hooks = self.hooks.get(tester_type, {}).get("all", {}) - hooks = Hooks._merge_hook_dicts(hooks, builtin_hook_dict.get("all", {})) - return hooks.get("before", []), hooks.get("after", []) - - @contextmanager - def around( - self, - tester_type, - builtin_selector=None, - extra_args=None, - extra_kwargs=None, - cwd=None, - ): - """ - Context manager used to run hooks around any block of code. Hooks are selected based on the tester type (one - of 'all', 'each', or the name of a tester), a builtin_selector (usually the test settings for a given test - group). If extra_args or extra_kwargs are specified, these will be passed to each hook in addition to self.args - and self.kwargs. If cwd is specified, each hook will be run as if the current working directory were cwd. - """ - before, after = self._get_hooks(tester_type, builtin_selector) - if tester_type not in {"all", "each"}: - self._context.append(tester_type) - try: - if any(before) or any(after): - try: - with current_directory(cwd or self.cwd): - for hook in before: - self._run(hook, extra_args, extra_kwargs) - yield - finally: - with current_directory(cwd or self.cwd): - for hook in after: - self._run(hook, extra_args, extra_kwargs) - else: - yield - finally: - if tester_type not in {"all", "each"}: - self._context.pop() - - def format_errors(self): - """ - Return a string containing the data from self.load_errors and self.run_errors. - """ - error_list = [] - for module_name, tb in self.load_errors: - error_list.append(f"module_name: {module_name}\ntraceback:\n{tb}") - for hook_name, args, kwargs, tb in self.run_errors: - error_list.append( - f"function_name: {hook_name}\n" - f"args: {args}\nkwargs: {kwargs},\ntraceback:\n{tb}" - ) - return "\n\n".join(error_list) diff --git a/src/autotester/server/server.py b/src/autotester/server/server.py index 07bb4c51..6ec82fa7 100755 --- a/src/autotester/server/server.py +++ b/src/autotester/server/server.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import os +import sys import shutil import time import json @@ -8,89 +9,75 @@ import signal import rq import tempfile -from markusapi import Markus +import getpass +from typing import Optional, Dict, Union, List, Tuple from autotester.exceptions import TesterCreationError from autotester.config import config -from autotester.server.hooks_context.hooks_context import Hooks -from autotester.server.utils.string_management import ( - loads_partial_json, - decode_if_bytes, - stringify, -) +from autotester.server.utils.string_management import loads_partial_json, decode_if_bytes from autotester.server.utils.user_management import ( get_reaper_username, - current_user, tester_user, ) from autotester.server.utils.file_management import ( - random_tmpfile_name, - clean_dir_name, - setup_files, + copy_tree, ignore_missing_dir_error, - fd_open, - fd_lock, - move_tree, -) -from autotester.server.utils.resource_management import ( - set_rlimits_before_cleanup, - set_rlimits_before_test, + recursive_iglob, + clean_dir_name, ) +from autotester.server.utils.resource_management import set_rlimits_before_test from autotester.server.utils.redis_management import ( clean_after, test_script_directory, update_pop_interval_stat, ) +from autotester.server.utils.form_management import validate_against_schema from autotester.resources.ports import get_available_port from autotester.resources.postgresql import setup_database +from autotester.server.client_customizations import get_client, ClientType DEFAULT_ENV_DIR = config["_workspace_contents", "_default_venv_name"] -TEST_RESULT_DIR = os.path.join( - config["workspace"], config["_workspace_contents", "_results"] -) -HOOKS_FILENAME = config["_workspace_contents", "_hooks_file"] +TEST_RESULT_DIR = os.path.join(config["workspace"], config["_workspace_contents", "_results"]) SETTINGS_FILENAME = config["_workspace_contents", "_settings_file"] FILES_DIRNAME = config["_workspace_contents", "_files_dir"] -TEST_SPECS_DIR = os.path.join( - config["workspace"], config["_workspace_contents", "_specs"] -) -TEST_SCRIPT_DIR = os.path.join( - config["workspace"], config["_workspace_contents", "_scripts"] -) +TEST_SPECS_DIR = os.path.join(config["workspace"], config["_workspace_contents", "_specs"]) +TEST_SCRIPT_DIR = os.path.join(config["workspace"], config["_workspace_contents", "_scripts"]) TESTER_IMPORT_LINE = { - "custom": "from testers.custom.markus_custom_tester import MarkusCustomTester as Tester", - "haskell": "from testers.haskell.markus_haskell_tester import MarkusHaskellTester as Tester", - "java": "from testers.java.markus_java_tester import MarkusJavaTester as Tester", - "py": "from testers.py.markus_python_tester import MarkusPythonTester as Tester", - "pyta": "from testers.pyta.markus_pyta_tester import MarkusPyTATester as Tester", - "racket": "from testers.racket.markus_racket_tester import MarkusRacketTester as Tester", + "custom": "from testers.custom.custom_tester import CustomTester as Tester", + "haskell": "from testers.haskell.haskell_tester import HaskellTester as Tester", + "java": "from testers.java.java_tester import JavaTester as Tester", + "py": "from testers.py.python_tester import PythonTester as Tester", + "pyta": "from testers.pyta.pyta_tester import PyTATester as Tester", + "racket": "from testers.racket.racket_tester import RacketTester as Tester", } +ResultData = Dict[str, Union[str, int, type(None), Dict]] -def run_test_command(test_username=None): + +def _run_test_command(test_username: Optional[str] = None) -> str: """ Return a command used to run test scripts as a the test_username user, with the correct arguments. Set test_username to None to run as the current user. >>> test_script = 'mysscript.py' - >>> run_test_command('f').format(test_script) + >>> _run_test_command('f').format(test_script) 'sudo -u f -- bash -c "./myscript.py"' - >>> run_test_command().format(test_script) + >>> _run_test_command().format(test_script) './myscript.py' """ cmd = "{}" if test_username is not None: - cmd = " ".join( - ("sudo", "-Eu", test_username, "--", "bash", "-c", "'{}'".format(cmd)) - ) + cmd = " ".join(("sudo", "-Eu", test_username, "--", "bash", "-c", "'{}'".format(cmd))) return cmd -def create_test_group_result(stdout, stderr, run_time, extra_info, timeout=None): +def _create_test_group_result( + stdout: str, stderr: str, run_time: int, extra_info: Dict, timeout: Optional[int] = None, +) -> ResultData: """ Return the arguments passed to this function in a dictionary. If stderr is falsy, change it to None. Load the json string in stdout as a dictionary. @@ -106,7 +93,7 @@ def create_test_group_result(stdout, stderr, run_time, extra_info, timeout=None) } -def kill_with_reaper(test_username): +def _kill_with_reaper(test_username: str) -> bool: """ Try to kill all processes currently being run by test_username using the method described in this article: https://lwn.net/Articles/754980/. Return True if this @@ -124,25 +111,22 @@ def kill_with_reaper(test_username): reaper_username = get_reaper_username(test_username) if reaper_username is not None: cwd = os.path.dirname(os.path.abspath(__file__)) - kill_file_dst = random_tmpfile_name() - preexec_fn = set_rlimits_before_cleanup() - - copy_cmd = "sudo -u {0} -- bash -c 'cp kill_worker_procs {1} && chmod 4550 {1}'".format( - test_username, kill_file_dst - ) - copy_proc = subprocess.Popen( - copy_cmd, shell=True, preexec_fn=preexec_fn, cwd=cwd - ) - if copy_proc.wait() < 0: # wait returns the return code of the proc - return False - - kill_cmd = "sudo -u {} -- bash -c {}".format(reaper_username, kill_file_dst) - kill_proc = subprocess.Popen(kill_cmd, shell=True, preexec_fn=preexec_fn) + + with tempfile.NamedTemporaryFile() as f: + copy_cmd = "sudo -u {0} -- bash -c 'cp kill_worker_procs {1} && chmod 4550 {1}'".format( + test_username, f.name + ) + copy_proc = subprocess.Popen(copy_cmd, shell=True, cwd=cwd) + if copy_proc.wait() < 0: # wait returns the return code of the proc + return False + + kill_cmd = "sudo -u {} -- bash -c {}".format(reaper_username, f.name) + kill_proc = subprocess.Popen(kill_cmd, shell=True) return kill_proc.wait() == 0 return False -def kill_without_reaper(test_username): +def _kill_without_reaper(test_username: str) -> None: """ Kill all processes that test_username is able to kill """ @@ -150,7 +134,7 @@ def kill_without_reaper(test_username): subprocess.run(kill_cmd, shell=True) -def create_test_script_command(env_dir, tester_type): +def _create_test_script_command(env_dir: str, tester_type: str) -> str: """ Return string representing a command line command to run tests. @@ -159,154 +143,124 @@ def create_test_script_command(env_dir, tester_type): python_lines = [ "import sys, json", import_line, - "from testers.markus_test_specs import MarkusTestSpecs", - f"Tester(specs=MarkusTestSpecs.from_json(sys.stdin.read())).run()", + "from testers.specs import TestSpecs", + f"Tester(specs=TestSpecs.from_json(sys.stdin.read())).run()", ] - python_ex = os.path.join( - os.path.join(TEST_SPECS_DIR, env_dir), "venv", "bin", "python" - ) + python_ex = os.path.join(os.path.join(TEST_SPECS_DIR, env_dir), "venv", "bin", "python") python_str = "; ".join(python_lines) return f'{python_ex} -c "{python_str}"' -def get_env_vars(test_username): +def _get_env_vars(test_username: str) -> Dict[str, str]: """ Return a dictionary containing all environment variables to pass to the next test """ db_env_vars = setup_database(test_username) port_number = get_available_port() return {"PORT": port_number, **db_env_vars} -def run_test_specs(cmd, test_specs, test_categories, tests_path, test_username, hooks): +def _run_test_specs( + cmd: str, client: ClientType, test_categories: List[str], tests_path: str, test_username: str, +) -> Tuple[List[ResultData], str]: """ Run each test script in test_scripts in the tests_path directory using the command cmd. Return the results. """ results = [] - preexec_fn = set_rlimits_before_test() - - with hooks.around("all"): - for settings in test_specs["testers"]: - tester_type = settings["tester_type"] - extra_hook_kwargs = {"settings": settings} - with hooks.around(tester_type, extra_kwargs=extra_hook_kwargs): - env_dir = settings.get("env_loc", DEFAULT_ENV_DIR) - - cmd_str = create_test_script_command(env_dir, tester_type) - args = cmd.format(cmd_str) - - for test_data in settings["test_data"]: - test_category = test_data.get("category", []) - if set(test_category) & set( - test_categories - ): # TODO: make sure test_categories is non-string collection type - extra_hook_kwargs = {"test_data": test_data} - with hooks.around( - "each", - builtin_selector=test_data, - extra_kwargs=extra_hook_kwargs, - ): - start = time.time() - out, err = "", "" - timeout_expired = None - timeout = test_data.get("timeout") - try: - env_vars = get_env_vars(test_username) - proc = subprocess.Popen( - args, - start_new_session=True, - cwd=tests_path, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - stdin=subprocess.PIPE, - preexec_fn=preexec_fn, - env={**os.environ, **env_vars}, - ) - try: - settings_json = json.dumps( - {**settings, "test_data": test_data} - ).encode("utf-8") - out, err = proc.communicate( - input=settings_json, timeout=timeout - ) - except subprocess.TimeoutExpired: - if test_username == current_user(): - pgrp = os.getpgid(proc.pid) - os.killpg(pgrp, signal.SIGKILL) - else: - if not kill_with_reaper(test_username): - kill_without_reaper(test_username) - out, err = proc.communicate() - timeout_expired = timeout - except Exception as e: - err += "\n\n{}".format(e) - finally: - out = decode_if_bytes(out) - err = decode_if_bytes(err) - duration = int(round(time.time() - start, 3) * 1000) - extra_info = test_data.get("extra_info", {}) - results.append( - create_test_group_result( - out, err, duration, extra_info, timeout_expired - ) - ) - return results, hooks.format_errors() - - -def store_results(results_data, markus_address, assignment_id, group_id, submission_id): + hook_errors = "" + + test_specs = _get_test_specs(client) + + for settings in test_specs["testers"]: + tester_type = settings["tester_type"] + env_dir = settings.get("env_loc", DEFAULT_ENV_DIR) + + cmd_str = _create_test_script_command(env_dir, tester_type) + args = cmd.format(cmd_str) + + for test_data in settings["test_data"]: + test_category = test_data.get("category", []) + if set(test_category) & set(test_categories): + start = time.time() + out, err = "", "" + timeout_expired = None + timeout = test_data.get("timeout") + try: + env_vars = _get_env_vars(test_username) + proc = subprocess.Popen( + args, + start_new_session=True, + cwd=tests_path, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + preexec_fn=set_rlimits_before_test, + env={**os.environ, **env_vars}, + ) + try: + settings_json = json.dumps({**settings, "test_data": test_data}).encode("utf-8") + out, err = proc.communicate(input=settings_json, timeout=timeout) + except subprocess.TimeoutExpired: + if test_username == getpass.getuser(): + pgrp = os.getpgid(proc.pid) + os.killpg(pgrp, signal.SIGKILL) + else: + if not _kill_with_reaper(test_username): + _kill_without_reaper(test_username) + out, err = proc.communicate() + timeout_expired = timeout + hook_errors += client.after_test(test_data, tests_path) + except Exception as e: + err += "\n\n{}".format(e) + finally: + out = decode_if_bytes(out) + err = decode_if_bytes(err) + duration = int(round(time.time() - start, 3) * 1000) + extra_info = test_data.get("extra_info", {}) + results.append(_create_test_group_result(out, err, duration, extra_info, timeout_expired)) + return results, hook_errors + + +def _store_results(client: ClientType, results_data: Dict[str, Union[List[ResultData], str, int]]) -> None: """ Write the results of multiple test script runs to an output file as a json string. - The output file is located at: - {TEST_RESULT_DIR}/{markus_address}/{assignment_id}/{group_id}/{submission_id}/ouput.json - """ - clean_markus_address = clean_dir_name(markus_address) - run_time = "run_{}".format(int(time.time())) - destination = os.path.join( - *stringify( - TEST_RESULT_DIR, - clean_markus_address, - assignment_id, - group_id, - "s{}".format(submission_id or ""), - run_time, - ) - ) - os.makedirs(destination, exist_ok=True) - with open(os.path.join(destination, "output.json"), "w") as f: + """ + destination = os.path.join(TEST_RESULT_DIR, clean_dir_name(client.unique_run_str())) + ".json" + with open(destination, "w") as f: json.dump(results_data, f, indent=4) -def clear_working_directory(tests_path, test_username): +def _clear_working_directory(tests_path: str, test_username: str) -> None: """ Run commands that clear the tests_path working directory """ - if test_username != current_user(): - chmod_cmd = "sudo -u {} -- bash -c 'chmod -Rf ugo+rwX {}'".format( - test_username, tests_path - ) + if test_username != getpass.getuser(): + chmod_cmd = f"sudo -u {test_username} -- bash -c 'chmod -Rf ugo+rwX {tests_path}'" else: - chmod_cmd = "chmod -Rf ugo+rwX {}".format(tests_path) + chmod_cmd = f"chmod -Rf ugo+rwX {tests_path}" subprocess.run(chmod_cmd, shell=True) # be careful not to remove the tests_path dir itself since we have to # set the group ownership with sudo (and that is only done in ../install.sh) - clean_cmd = "rm -rf {0}/.[!.]* {0}/*".format(tests_path) + clean_cmd = f"rm -rf {tests_path}/.[!.]* {tests_path}/*" subprocess.run(clean_cmd, shell=True) -def stop_tester_processes(test_username): +def _stop_tester_processes(test_username: str) -> None: """ Run a command that kills all tester processes either by killing all user processes or killing with a reaper user (see https://lwn.net/Articles/754980/ for reference). """ - if test_username != current_user(): - if not kill_with_reaper(test_username): - kill_without_reaper(test_username) + if test_username != getpass.getuser(): + if not _kill_with_reaper(test_username): + _kill_without_reaper(test_username) -def finalize_results_data(results, error, all_hooks_error, time_to_service): +def _finalize_results_data( + results: List[ResultData], error: str, all_hooks_error: str, time_to_service: int +) -> Dict[str, Union[List[ResultData], str, int]]: """ Return a dictionary of test script results combined with test run info """ return { "test_groups": results, @@ -316,26 +270,67 @@ def finalize_results_data(results, error, all_hooks_error, time_to_service): } -def report(results_data, api, assignment_id, group_id, run_id): - """ Post the results of running test scripts to the markus api """ - api.upload_test_group_results( - assignment_id, group_id, run_id, json.dumps(results_data) - ) +def _get_test_specs(client: ClientType) -> Dict: + test_script_path = test_script_directory(client.unique_script_str()) + test_specs_path = os.path.join(test_script_path, SETTINGS_FILENAME) + + with open(test_specs_path) as f: + return json.load(f) + + +def _copy_test_script_files(client: ClientType, tests_path: str) -> List[Tuple[str, str]]: + """ + Copy test script files for a given assignment to the tests_path + directory if they exist. tests_path may already exist and contain + files and subdirectories. + + If the call to copy_tree raises a FileNotFoundError because the test + script directory has changed, retry the call to this function. + """ + test_script_outer_dir = test_script_directory(client.unique_script_str()) + test_script_dir = os.path.join(test_script_outer_dir, FILES_DIRNAME) + if os.path.isdir(test_script_dir): + try: + return copy_tree(test_script_dir, tests_path) + except FileNotFoundError: + if test_script_directory(client.unique_script_str()) != test_script_outer_dir: + _clear_working_directory(tests_path, getpass.getuser()) + return _copy_test_script_files(client, tests_path) + else: + raise + return [] + + +def _setup_files(client: ClientType, tests_path: str, test_username: str) -> None: + """ + Copy test script files and student files to the working directory tests_path, + then make it the current working directory. + The following permissions are also set: + - tests_path directory: rwxrwx--T + - test subdirectories: rwxrwx--T + - test files: rw-r----- + - student subdirectories: rwxrwx--- + - student files: rw-rw---- + """ + os.chmod(tests_path, 0o1770) + client.write_student_files(tests_path) + for fd, file_or_dir in recursive_iglob(tests_path): + if fd == "d": + os.chmod(file_or_dir, 0o770) + else: + os.chmod(file_or_dir, 0o770) + shutil.chown(file_or_dir, group=test_username) + script_files = _copy_test_script_files(client, tests_path) + for fd, file_or_dir in script_files: + if fd == "d": + os.chmod(file_or_dir, 0o1770) + else: + os.chmod(file_or_dir, 0o750) + shutil.chown(file_or_dir, group=test_username) @clean_after -def run_test( - markus_address, - server_api_key, - test_categories, - files_path, - assignment_id, - group_id, - group_repo_name, - submission_id, - run_id, - enqueue_time, -): +def run_test(client_type: str, test_data: Dict, enqueue_time: int, test_categories: List[str]) -> None: """ Run autotesting tests using the tests in the test_specs json file on the files in files_path. @@ -345,48 +340,28 @@ def run_test( error = None hooks_error = None time_to_service = int(round(time.time() - enqueue_time, 3) * 1000) - - test_script_path = test_script_directory(markus_address, assignment_id) - hooks_script_path = os.path.join(test_script_path, HOOKS_FILENAME) - test_specs_path = os.path.join(test_script_path, SETTINGS_FILENAME) - api = Markus(server_api_key, markus_address) - - with open(test_specs_path) as f: - test_specs = json.load(f) + client = get_client(client_type, test_data) try: job = rq.get_current_job() update_pop_interval_stat(job.origin) test_username, tests_path = tester_user() - hooks_kwargs = { - "api": api, - "assignment_id": assignment_id, - "group_id": group_id, - } - testers = {settings["tester_type"] for settings in test_specs["testers"]} - hooks = Hooks(hooks_script_path, testers, cwd=tests_path, kwargs=hooks_kwargs) try: - setup_files(files_path, tests_path, markus_address, assignment_id) - cmd = run_test_command(test_username=test_username) - results, hooks_error = run_test_specs( - cmd, test_specs, test_categories, tests_path, test_username, hooks - ) + _setup_files(client, tests_path, test_username) + cmd = _run_test_command(test_username=test_username) + results, hooks_error = _run_test_specs(cmd, client, test_categories, tests_path, test_username) finally: - stop_tester_processes(test_username) - clear_working_directory(tests_path, test_username) + _stop_tester_processes(test_username) + _clear_working_directory(tests_path, test_username) except Exception as e: error = str(e) finally: - results_data = finalize_results_data( - results, error, hooks_error, time_to_service - ) - store_results( - results_data, markus_address, assignment_id, group_id, submission_id - ) - report(results_data, api, assignment_id, group_id, run_id) + results_data = _finalize_results_data(results, error, hooks_error, time_to_service) + _store_results(client, results_data) + client.send_test_results(results_data) -def get_tester_root_dir(tester_type): +def _get_tester_root_dir(tester_type: str) -> str: """ Return the root directory of the tester named tester_type """ @@ -398,7 +373,7 @@ def get_tester_root_dir(tester_type): return tester_dir -def update_settings(settings, specs_dir): +def _update_settings(settings: Dict, specs_dir: str) -> Dict: """ Return a dictionary containing all the default settings and the installation settings contained in the tester's specs directory as well as the settings. The settings @@ -414,12 +389,19 @@ def update_settings(settings, specs_dir): return full_settings -def create_tester_environments(files_path, test_specs): +def _create_tester_environments(files_path: str, test_specs: Dict) -> Dict: + """ + Return the test_specs dictionary updated with any additional data generated + from creating a new tester environment. + + This function also creates a new tester environment if required based on the + values in test specs. Otherwise, the default environment is used. + """ for i, settings in enumerate(test_specs["testers"]): - tester_dir = get_tester_root_dir(settings["tester_type"]) + tester_dir = _get_tester_root_dir(settings["tester_type"]) specs_dir = os.path.join(tester_dir, "specs") bin_dir = os.path.join(tester_dir, "bin") - settings = update_settings(settings, specs_dir) + settings = _update_settings(settings, specs_dir) if settings.get("env_data"): new_env_dir = tempfile.mkdtemp(prefix="env", dir=TEST_SPECS_DIR) os.chmod(new_env_dir, 0o775) @@ -430,9 +412,7 @@ def create_tester_environments(files_path, test_specs): cmd = [f"{create_file}", json.dumps(settings), files_path] proc = subprocess.run(cmd, stderr=subprocess.PIPE) if proc.returncode != 0: - raise TesterCreationError( - f"create tester environment failed with:\n{proc.stderr}" - ) + raise TesterCreationError(f"create tester environment failed with:\n{proc.stderr}") else: settings["env_loc"] = DEFAULT_ENV_DIR test_specs["testers"][i] = settings @@ -440,59 +420,63 @@ def create_tester_environments(files_path, test_specs): return test_specs -def destroy_tester_environments(old_test_script_dir): +def _destroy_tester_environments(old_test_script_dir: str) -> None: + """ + Remove the tester environment specified in the settings file located + in the the old_test_script_dir directory. + + Additionally, if the tester has an associated destroy_environment.sh + script, that script is run as well. + """ test_specs_file = os.path.join(old_test_script_dir, SETTINGS_FILENAME) with open(test_specs_file) as f: test_specs = json.load(f) for settings in test_specs["testers"]: env_loc = settings.get("env_loc", DEFAULT_ENV_DIR) if env_loc != DEFAULT_ENV_DIR: - tester_dir = get_tester_root_dir(settings["tester_type"]) + tester_dir = _get_tester_root_dir(settings["tester_type"]) bin_dir = os.path.join(tester_dir, "bin") destroy_file = os.path.join(bin_dir, "destroy_environment.sh") if os.path.isfile(destroy_file): cmd = [f"{destroy_file}", json.dumps(settings)] proc = subprocess.run(cmd, stderr=subprocess.PIPE) if proc.returncode != 0: - raise TesterCreationError( - f"destroy tester environment failed with:\n{proc.stderr}" - ) + raise TesterCreationError(f"destroy tester environment failed with:\n{proc.stderr}") shutil.rmtree(env_loc, onerror=ignore_missing_dir_error) @clean_after -def update_test_specs(files_path, assignment_id, markus_address, test_specs): +def update_test_specs(client_type: str, client_data: Dict) -> None: """ - Copy new test scripts for a given assignment to from the files_path - to a new location. Indicate that these new test scripts should be used instead of - the old ones. And remove the old ones when it is safe to do so (they are not in the + Download test script files and test specs for the given client. + Indicate that these new test scripts should be used instead of + the old ones. Remove the old ones when it is safe to do so (they are not in the process of being copied to a working directory). - - This function should be used by an rq worker. """ # TODO: catch and log errors + client = get_client(client_type, client_data) test_script_dir_name = "test_scripts_{}".format(int(time.time())) - clean_markus_address = clean_dir_name(markus_address) - new_dir = os.path.join( - *stringify( - TEST_SCRIPT_DIR, clean_markus_address, assignment_id, test_script_dir_name - ) - ) + unique_script_str = client.unique_script_str() + new_dir = os.path.join(TEST_SCRIPT_DIR, clean_dir_name(unique_script_str), test_script_dir_name) + test_specs = client.get_test_specs() + new_files_dir = os.path.join(new_dir, FILES_DIRNAME) - move_tree(files_path, new_files_dir) - if "hooks_file" in test_specs: - src = os.path.join(new_files_dir, test_specs["hooks_file"]) - if os.path.isfile(src): - os.rename(src, os.path.join(new_dir, HOOKS_FILENAME)) - test_specs = create_tester_environments(new_files_dir, test_specs) + os.makedirs(new_files_dir, exist_ok=True) + client.write_test_files(new_files_dir) + filenames = [os.path.relpath(path, new_files_dir) for fd, path in recursive_iglob(new_files_dir) if fd == "f"] + try: + validate_against_schema(test_specs, filenames) + except Exception as e: + sys.stderr.write(f"Form Validation Error: {str(e)}") + sys.exit(1) + + test_specs = _create_tester_environments(new_files_dir, test_specs) settings_filename = os.path.join(new_dir, SETTINGS_FILENAME) with open(settings_filename, "w") as f: json.dump(test_specs, f) - old_test_script_dir = test_script_directory(markus_address, assignment_id) - test_script_directory(markus_address, assignment_id, set_to=new_dir) - - if old_test_script_dir is not None: - with fd_open(old_test_script_dir) as fd: - with fd_lock(fd, exclusive=True): - destroy_tester_environments(old_test_script_dir) - shutil.rmtree(old_test_script_dir, onerror=ignore_missing_dir_error) + old_test_script_dir = test_script_directory(unique_script_str) + test_script_directory(unique_script_str, set_to=new_dir) + + if old_test_script_dir is not None and os.path.isdir(old_test_script_dir): + _destroy_tester_environments(old_test_script_dir) + shutil.rmtree(old_test_script_dir, onerror=ignore_missing_dir_error) diff --git a/src/autotester/server/utils/file_management.py b/src/autotester/server/utils/file_management.py index 56b53b72..acbb360c 100644 --- a/src/autotester/server/utils/file_management.py +++ b/src/autotester/server/utils/file_management.py @@ -1,41 +1,33 @@ import os -import uuid -import tempfile import shutil -import fcntl -from autotester.server.utils import redis_management -from autotester.config import config -from contextlib import contextmanager +import zipfile +from io import BytesIO +from typing import Generator, Tuple, List, Callable, Type, Optional +from types import TracebackType -FILES_DIRNAME = config["_workspace_contents", "_files_dir"] - -def clean_dir_name(name): +def clean_dir_name(name: str) -> str: """ Return name modified so that it can be used as a unix style directory name """ return name.replace("/", "_") -def random_tmpfile_name(): - return os.path.join(tempfile.gettempdir(), uuid.uuid4().hex) - - -def recursive_iglob(root_dir): +def recursive_iglob(root_dir: str) -> Generator[Tuple[str, str], None, None]: """ Walk breadth first over a directory tree starting at root_dir and yield the path to each directory or file encountered. Yields a tuple containing a string indicating whether the path is to a directory ("d") or a file ("f") and the path itself. Raise a - ValueError if the root_dir doesn't exist + FileNotFoundError if the root_dir doesn't exist """ if os.path.isdir(root_dir): for root, dirnames, filenames in os.walk(root_dir): yield from (("d", os.path.join(root, d)) for d in dirnames) yield from (("f", os.path.join(root, f)) for f in filenames) else: - raise ValueError("directory does not exist: {}".format(root_dir)) + raise FileNotFoundError("directory does not exist: {}".format(root_dir)) -def copy_tree(src, dst, exclude=tuple()): +def copy_tree(src: str, dst: str, exclude: Tuple = tuple()) -> List[Tuple[str, str]]: """ Recursively copy all files and subdirectories in the path indicated by src to the path indicated by dst. If directories @@ -45,7 +37,7 @@ def copy_tree(src, dst, exclude=tuple()): copied = [] for fd, file_or_dir in recursive_iglob(src): src_path = os.path.relpath(file_or_dir, src) - if src_path in exclude: + if src_path in exclude or any(os.path.relpath(src_path, ex) for ex in exclude): continue target = os.path.join(dst, src_path) if fd == "d": @@ -57,7 +49,9 @@ def copy_tree(src, dst, exclude=tuple()): return copied -def ignore_missing_dir_error(_func, _path, excinfo): +def ignore_missing_dir_error( + _func: Callable, _path: str, excinfo: Tuple[Type[BaseException], BaseException, Optional[TracebackType]], +) -> None: """ Used by shutil.rmtree to ignore a FileNotFoundError """ err_type, err_inst, traceback = excinfo if err_type == FileNotFoundError: @@ -65,85 +59,21 @@ def ignore_missing_dir_error(_func, _path, excinfo): raise err_inst -def move_tree(src, dst): - """ - Recursively move all files and subdirectories in the path - indicated by src to the path indicated by dst. If directories - don't exist, they are created. - """ - os.makedirs(dst, exist_ok=True) - moved = copy_tree(src, dst) - shutil.rmtree(src, onerror=ignore_missing_dir_error) - return moved - - -@contextmanager -def fd_open(path, flags=os.O_RDONLY, *args, **kwargs): - """ - Open the file or directory at path, yield its - file descriptor, and close it when finished. - flags, *args and **kwargs are passed on to os.open. +def extract_zip_stream(zip_byte_stream: bytes, destination: str, ignore_root_dirs: int = 1) -> None: """ - fd = os.open(path, flags, *args, **kwargs) - try: - yield fd - finally: - os.close(fd) + Extract files in a zip archive's content to , a path to a local directory. - -@contextmanager -def fd_lock(file_descriptor, exclusive=True): - """ - Lock the object with the given file descriptor and unlock it - when finished. A lock can either be exclusive or shared by - setting the exclusive keyword argument to True or False. + If ignore_root_dir is a positive integer, the files in the zip archive will be extracted and written as if + the top n root directories of the zip archive were not in their path (where n == ignore_root_dirs). """ - fcntl.flock(file_descriptor, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH) - try: - yield - finally: - fcntl.flock(file_descriptor, fcntl.LOCK_UN) - - -def copy_test_script_files(markus_address, assignment_id, tests_path): - """ - Copy test script files for a given assignment to the tests_path - directory if they exist. tests_path may already exist and contain - files and subdirectories. - """ - test_script_outer_dir = redis_management.test_script_directory( - markus_address, assignment_id - ) - test_script_dir = os.path.join(test_script_outer_dir, FILES_DIRNAME) - if os.path.isdir(test_script_dir): - with fd_open(test_script_dir) as fd: - with fd_lock(fd, exclusive=False): - return copy_tree(test_script_dir, tests_path) - return [] - - -def setup_files(files_path, tests_path, markus_address, assignment_id): - """ - Copy test script files and student files to the working directory tests_path, - then make it the current working directory. - The following permissions are also set: - - tests_path directory: rwxrwx--T - - test subdirectories: rwxrwx--T - - test files: rw-r----- - - student subdirectories: rwxrwx--- - - student files: rw-rw---- - """ - os.chmod(tests_path, 0o1770) - student_files = move_tree(files_path, tests_path) - for fd, file_or_dir in student_files: - if fd == "d": - os.chmod(file_or_dir, 0o770) - else: - os.chmod(file_or_dir, 0o660) - script_files = copy_test_script_files(markus_address, assignment_id, tests_path) - for fd, file_or_dir in script_files: - if fd == "d": - os.chmod(file_or_dir, 0o1770) - else: - os.chmod(file_or_dir, 0o640) - return student_files, script_files + with zipfile.ZipFile(BytesIO(zip_byte_stream)) as zf: + for fname in zf.namelist(): + *dpaths, bname = fname.split(os.sep) + dest = os.path.join(destination, *dpaths[ignore_root_dirs:]) + filename = os.path.join(dest, bname) + if filename.endswith("/"): + os.makedirs(filename, exist_ok=True) + else: + os.makedirs(dest, exist_ok=True) + with open(filename, "wb") as f: + f.write(zf.read(fname)) diff --git a/src/autotester/server/utils/form_validation.py b/src/autotester/server/utils/form_management.py similarity index 59% rename from src/autotester/server/utils/form_validation.py rename to src/autotester/server/utils/form_management.py index e6df2170..8e02b66e 100644 --- a/src/autotester/server/utils/form_validation.py +++ b/src/autotester/server/utils/form_management.py @@ -1,9 +1,16 @@ +import os +import json +import glob +import autotester from jsonschema import Draft7Validator, validators, ValidationError from jsonschema.exceptions import best_match from copy import deepcopy +from typing import Type, Generator, Dict, Union, List +ValidatorType = type(Draft7Validator) -def extend_with_default(validator_class=Draft7Validator): + +def _extend_with_default(validator_class: Type[ValidatorType] = Draft7Validator,) -> ValidatorType: """ Extends a validator class to add defaults before validation. From: https://github.com/Julian/jsonschema/blob/master/docs/faq.rst @@ -11,7 +18,9 @@ def extend_with_default(validator_class=Draft7Validator): validate_props = validator_class.VALIDATORS["properties"] validate_array = validator_class.VALIDATORS["items"] - def set_defaults(validator, properties, instance, schema): + def _set_defaults( + validator: ValidatorType, properties: Dict, instance: Union[Dict, List], schema: Dict, + ) -> Generator[BaseException, None, None]: """ Set defaults within a "properties" context """ if not validator.is_type(instance, "object"): return @@ -30,7 +39,9 @@ def set_defaults(validator, properties, instance, schema): for error in validate_props(validator, properties, instance, schema): yield error - def set_array_defaults(validator, properties, instance, schema): + def _set_array_defaults( + validator: ValidatorType, properties: Dict, instance: List, schema: Dict + ) -> Generator[ValidationError, None, None]: """ Set defaults within an "array" context """ if not validator.is_type(instance, "array"): return @@ -48,13 +59,15 @@ def set_array_defaults(validator, properties, instance, schema): for error in validate_array(validator, properties, instance, schema): yield error - def set_oneOf_defaults(validator, properties, instance, schema): - """ + def _set_oneof_defaults( + validator: ValidatorType, properties: Dict, instance: Dict, schema: Dict + ) -> Generator[ValidationError, None, None]: + """ Set defaults within a "oneOf" context. This ensures that only defaults from the matching subschema are set on the instance. TODO: If we ever use other optional subschema contexts (ex: allOf, anyOf) - then we should implement custom validator functions for those as + then we should implement custom validator functions for those as well. """ @@ -85,22 +98,22 @@ def set_oneOf_defaults(validator, properties, instance, schema): instance.update(good_instance) custom_validators = { - "properties": set_defaults, - "items": set_array_defaults, - "oneOf": set_oneOf_defaults, + "properties": _set_defaults, + "items": _set_array_defaults, + "oneOf": _set_oneof_defaults, } return validators.extend(validator_class, custom_validators) -def validate_with_defaults( - schema, obj, validator_class=Draft7Validator, best_only=True -): +def _validate_with_defaults( + schema: Dict, obj: Union[Dict, List], validator_class: ValidatorType = Draft7Validator, best_only: bool = True, +) -> Union[ValidationError, List[ValidationError]]: """ - Return an iterator that yields errors from validating obj on schema + Return an iterator that yields errors from validating obj on schema after first filling in defaults on obj. """ - validator = extend_with_default(validator_class)(schema) + validator = _extend_with_default(validator_class)(schema) # first time to fill in defaults since validating 'required', 'minProperties', # etc. can't be done until the instance has been properly filled with defaults. list(validator.iter_errors(obj)) @@ -110,9 +123,36 @@ def validate_with_defaults( return errors -def is_valid(obj, schema, validator_class=Draft7Validator): +def get_schema() -> Dict: + package_root = autotester.__path__[0] + + with open(os.path.join(package_root, "lib", "tester_schema_skeleton.json")) as f: + schema_skeleton = json.load(f) + + glob_pattern = os.path.join(package_root, "testers", "*", "specs", ".installed") + for path in sorted(glob.glob(glob_pattern)): + tester_type = os.path.basename(os.path.dirname(os.path.dirname(path))) + specs_dir = os.path.dirname(path) + with open(os.path.join(specs_dir, "settings_schema.json")) as f: + tester_schema = json.load(f) + + schema_skeleton["definitions"]["installed_testers"]["enum"].append(tester_type) + schema_skeleton["definitions"]["tester_schemas"]["oneOf"].append(tester_schema) + + return schema_skeleton + + +def validate_against_schema(test_specs: Dict, filenames: List[str]) -> None: """ - Return True if is valid for schema using the - validator . + Check if test_specs is valid according to the schema from calling get_schema. + Raise an error if it is not valid. """ - return validator_class(schema).is_valid(obj) + schema = get_schema() + if schema is not None: + schema["definitions"]["files_list"]["enum"] = filenames + # don't validate based on categories + schema["definitions"]["test_data_categories"].pop("enum") + schema["definitions"]["test_data_categories"].pop("enumNames") + error = _validate_with_defaults(schema, test_specs, best_only=True) + if error: + raise error diff --git a/src/autotester/server/utils/path_management.py b/src/autotester/server/utils/path_management.py deleted file mode 100644 index 531869e8..00000000 --- a/src/autotester/server/utils/path_management.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -import sys -from contextlib import contextmanager - - -@contextmanager -def current_directory(path): - """ - Context manager that temporarily changes the working directory - to the path argument. - """ - if path is not None: - current_dir = os.getcwd() - os.chdir(path) - try: - yield - finally: - os.chdir(current_dir) - else: - yield - - -@contextmanager -def add_path(path, prepend=True): - """ - Context manager that temporarily adds a path to sys.path. - If prepend is True, the path will be prepended otherwise - it will be appended. - """ - if prepend: - sys.path.insert(0, path) - else: - sys.path.append(path) - try: - yield - finally: - try: - i = (sys.path if prepend else sys.path[::-1]).index(path) - sys.path.pop(i) - except ValueError: - pass diff --git a/src/autotester/server/utils/redis_management.py b/src/autotester/server/utils/redis_management.py index e09f56bc..edbf8417 100644 --- a/src/autotester/server/utils/redis_management.py +++ b/src/autotester/server/utils/redis_management.py @@ -2,14 +2,15 @@ import rq import time from functools import wraps -from autotester.server.utils import file_management, string_management +from typing import Optional, Tuple, Callable +from autotester.server.utils import string_management from autotester.config import config CURRENT_TEST_SCRIPT_HASH = config["redis", "_current_test_script_hash"] POP_INTERVAL_HASH = config["redis", "_pop_interval_hash"] -def redis_connection(): +def redis_connection() -> redis.Redis: """ Return the currently open redis connection object. If there is no connection currently open, one is created using the url specified in @@ -22,30 +23,20 @@ def redis_connection(): return rq.get_current_connection() -def get_test_script_key(markus_address, assignment_id): - """ - Return unique key for each assignment used for - storing the location of test scripts in Redis - """ - clean_markus_address = file_management.clean_dir_name(markus_address) - return f"{clean_markus_address}_{assignment_id}" - - -def test_script_directory(markus_address, assignment_id, set_to=None): +def test_script_directory(unique_script_str: str, set_to: Optional[str] = None): """ Return the directory containing the test scripts for a specific assignment. Optionally updates the location of the test script directory to the value of the set_to keyword argument (if it is not None) """ - key = get_test_script_key(markus_address, assignment_id) r = redis_connection() if set_to is not None: - r.hset(CURRENT_TEST_SCRIPT_HASH, key, set_to) - out = r.hget(CURRENT_TEST_SCRIPT_HASH, key) + r.hset(CURRENT_TEST_SCRIPT_HASH, unique_script_str, set_to) + out = r.hget(CURRENT_TEST_SCRIPT_HASH, unique_script_str) return string_management.decode_if_bytes(out) -def update_pop_interval_stat(queue_name): +def update_pop_interval_stat(queue_name: str) -> None: """ Update the values contained in the redis hash named REDIS_POP_HASH for the queue named queue_name. This should be called whenever a new job @@ -59,7 +50,7 @@ def update_pop_interval_stat(queue_name): r.hincrby(POP_INTERVAL_HASH, "{}_count".format(queue_name), 1) -def clear_pop_interval_stat(queue_name): +def _clear_pop_interval_stat(queue_name: str) -> None: """ Reset the values contained in the redis hash named REDIS_POP_HASH for the queue named queue_name. This should be called whenever a queue becomes @@ -71,7 +62,7 @@ def clear_pop_interval_stat(queue_name): r.hset(POP_INTERVAL_HASH, "{}_count".format(queue_name), 0) -def get_pop_interval_stat(queue_name): +def _get_pop_interval_stat(queue_name: str) -> Tuple[int, int, int]: """ Return the following data about the queue named queue_name: - the time the first job was popped from the queue during the @@ -83,19 +74,19 @@ def get_pop_interval_stat(queue_name): """ r = redis_connection() start = r.hget(POP_INTERVAL_HASH, "{}_start".format(queue_name)) - last = r.hget(POP_INTERVAL_HASH, "{}_count".format(queue_name)) + last = r.hget(POP_INTERVAL_HASH, "{}_last".format(queue_name)) count = r.hget(POP_INTERVAL_HASH, "{}_count".format(queue_name)) return start, last, count -def get_avg_pop_interval(queue_name): +def get_avg_pop_interval(queue_name: str) -> Optional[float]: """ Return the average interval between pops off of the end of the queue named queue_name during the current burst of jobs. Return None if there are no jobs in the queue, indicating that there is no current burst. """ - start, last, count = get_pop_interval_stat(queue_name) + start, last, count = _get_pop_interval_stat(queue_name) try: start = float(start) last = float(last) @@ -103,18 +94,18 @@ def get_avg_pop_interval(queue_name): except TypeError: return None count -= 1 - return (last - start) / count if count else 0 + return (last - start) / count if count else 0.0 -def clean_up(): +def _clean_up() -> None: """ Reset the pop interval data for each empty queue """ with rq.Connection(redis_connection()): for q in rq.Queue.all(): if q.is_empty(): - clear_pop_interval_stat(q.name) + _clear_pop_interval_stat(q.name) -def clean_after(func): +def clean_after(func: Callable) -> Callable: """ Call the clean_up function after the decorated function func is finished @@ -125,6 +116,6 @@ def wrapper(*args, **kwargs): try: return func(*args, **kwargs) finally: - clean_up() + _clean_up() return wrapper diff --git a/src/autotester/server/utils/resource_management.py b/src/autotester/server/utils/resource_management.py index 6c45d082..a2e101d0 100644 --- a/src/autotester/server/utils/resource_management.py +++ b/src/autotester/server/utils/resource_management.py @@ -4,11 +4,11 @@ RLIMIT_ADJUSTMENTS = {"nproc": 10} -def rlimit_str2int(rlimit_string): +def _rlimit_str2int(rlimit_string): return getattr(resource, f"RLIMIT_{rlimit_string.upper()}") -def set_rlimits_before_test(): +def set_rlimits_before_test() -> None: """ Sets rlimit settings specified in config file This function ensures that for specific limits (defined in RLIMIT_ADJUSTMENTS), @@ -17,33 +17,19 @@ def set_rlimits_before_test(): processes will always be able to run. """ for limit_str in config["rlimit_settings"].keys() | RLIMIT_ADJUSTMENTS.keys(): - limit = rlimit_str2int(limit_str) - - values = config["rlimit_settings"].get( - limit_str, (resource.RLIM_INFINITY, resource.RLIM_INFINITY) - ) + limit = _rlimit_str2int(limit_str) + config_soft, config_hard = config["rlimit_settings"].get(limit_str, resource.getrlimit(limit)) curr_soft, curr_hard = resource.getrlimit(limit) - soft, hard = (min(vals) for vals in zip((curr_soft, curr_hard), values)) - # reduce the hard limit so that cleanup scripts will have at least - # adj more resources to use. + # account for the fact that resource.RLIM_INFINITY == -1 + soft, hard = min(curr_soft, config_soft), min(curr_hard, config_hard) + if soft < 0: + soft = max(curr_soft, config_soft) + if hard < 0: + hard = max(curr_hard, config_hard) + # reduce the hard limit so that cleanup scripts will have at least adj more resources to use. adj = RLIMIT_ADJUSTMENTS.get(limit_str, 0) - if (curr_hard - hard) < adj: - hard = curr_hard - adj + if hard >= adj: + hard -= adj # make sure the soft limit doesn't exceed the hard limit - hard = max(hard, 0) - soft = max(min(hard, soft), 0) - - resource.setrlimit(limit, (soft, hard)) - - -def set_rlimits_before_cleanup(): - """ - Sets the rlimit settings specified in RLIMIT_ADJUSTMENTS - so that both the soft and hard limits are set as high as possible. This ensures - that cleanup processes will have as many resources as possible to run. - """ - for limit_str in RLIMIT_ADJUSTMENTS: - limit = rlimit_str2int(limit_str) - soft, hard = resource.getrlimit(limit) - soft = max(soft, hard) + soft = min(hard, soft) resource.setrlimit(limit, (soft, hard)) diff --git a/src/autotester/server/utils/string_management.py b/src/autotester/server/utils/string_management.py index 3c55ab5b..0cc14699 100644 --- a/src/autotester/server/utils/string_management.py +++ b/src/autotester/server/utils/string_management.py @@ -1,16 +1,16 @@ import json +from typing import Union, Type, Optional, Tuple, List -def stringify(*args): - for a in args: - yield str(a) - - -def decode_if_bytes(b, format_="utf-8"): +def decode_if_bytes(b: Union[str, bytes], format_: str = "utf-8") -> str: + """ + Return b as a string. If b is a bytes object then it is decoded to a + string using format_ as a format. + """ return b.decode(format_) if isinstance(b, bytes) else b -def loads_partial_json(json_string, expected_type=None): +def loads_partial_json(json_string: str, expected_type: Optional[Type] = None) -> Tuple[List, bool]: """ Return a list of objects loaded from a json string and a boolean indicating whether the json_string was malformed. This will try @@ -28,11 +28,12 @@ def loads_partial_json(json_string, expected_type=None): while i < len(json_string): try: obj, ind = decoder.raw_decode(json_string[i:]) + next_i = i + ind if expected_type is None or isinstance(obj, expected_type): results.append(obj) - elif json_string[i:i + ind].strip(): + elif json_string[i:next_i].strip(): malformed = True - i += ind + i = next_i except json.JSONDecodeError: if json_string[i].strip(): malformed = True diff --git a/src/autotester/server/utils/tests/__init__.py b/src/autotester/server/utils/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/autotester/server/utils/tests/file_management_test.py b/src/autotester/server/utils/tests/file_management_test.py new file mode 100644 index 00000000..8d324a41 --- /dev/null +++ b/src/autotester/server/utils/tests/file_management_test.py @@ -0,0 +1,209 @@ +import os +import tempfile +import contextlib +import shutil +import zipfile +from io import BytesIO +from hypothesis import given +from hypothesis import strategies as st +from typing import Union, List +from autotester.server.utils import file_management as fm + + +@st.composite +def nested_dir_structure(draw) -> Union[List, int]: + """ Return a recursively nested list of lists of integers """ + dirs = draw(st.recursive(st.integers(min_value=1, max_value=5), st.lists, max_leaves=10)) + if isinstance(dirs, int): + dirs = [dirs] + return dirs + + +def _nested_dirs( + structure: Union[List, int], stack: contextlib.ExitStack, dirname: str = None +) -> Union[None, tempfile.NamedTemporaryFile, tempfile.TemporaryDirectory]: + """ Helper method for nested_dirs """ + if isinstance(structure, int): + for _ in range(structure): + stack.enter_context(tempfile.NamedTemporaryFile(dir=dirname)) + else: + d = tempfile.TemporaryDirectory(dir=dirname) + stack.enter_context(d) + for struc in structure: + _nested_dirs(struc, stack, d.name) + return d + + +@contextlib.contextmanager +def nested_dirs(structure: List) -> tempfile.TemporaryDirectory: + """ Creates temporary nested directories based on """ + with contextlib.ExitStack() as stack: + yield _nested_dirs(structure, stack) + + +def zip_archive(structure: List) -> bytes: + """ Creates a zip archive of nested files/directories based on """ + with tempfile.NamedTemporaryFile() as f: + with nested_dirs(structure) as d: + with zipfile.ZipFile(f.name, "w", zipfile.ZIP_DEFLATED) as zipf: + for root, _, fnames in os.walk(d.name): + root_path = os.path.relpath(root, d.name) + zipf.write(root, arcname=root_path) + for file in fnames: + zipf.write(os.path.join(root, file), arcname=os.path.join(root_path, file)) + f.seek(0) + return f.read() + + +class TestCleanDirName: + @given(st.from_regex(r"[^/]+", fullmatch=True)) + def test_no_forward_slash(self, name: str): + """ Should not change a name that does not contain a forward slash character """ + assert fm.clean_dir_name(name) == name + + @given(st.from_regex(r"(?:.*/.*)+", fullmatch=True)) + def test_with_forward_slash_modified(self, name: str): + """ Should replace forward slashes with underscores """ + assert fm.clean_dir_name(name).replace("/", "_") + + +class TestRecursiveIglob: + def file_counter(self, struc): + if isinstance(struc, int): + return struc + return sum(self.file_counter(s) for s in struc) + + def dir_counter(self, struc): + if isinstance(struc, int): + return 0 + return 1 + sum(self.dir_counter(s) for s in struc) + + @given(nested_dir_structure()) + def test_yield_all_files(self, structure): + """ Should find all files recursively in the directory """ + with nested_dirs(structure) as d: + files = [fname for fd, fname in fm.recursive_iglob(d.name) if fd == "f"] + assert len(files) == self.file_counter(structure) + + @given(nested_dir_structure()) + def test_yield_all_dirs(self, structure): + """ Should find all files recursively in the directory """ + with nested_dirs(structure) as d: + dirs = [dname for fd, dname in fm.recursive_iglob(d.name) if fd == "d"] + assert len(dirs) + 1 == self.dir_counter(structure) # +1 to include the root directory + + @given(nested_dir_structure()) + def test_order_is_correct(self, structure): + """ Should navigate the directory breadth first """ + with nested_dirs(structure) as d: + visited = [d.name] + for fd, name in fm.recursive_iglob(d.name): + dir_name = os.path.dirname(name) + assert dir_name in visited # yielded child before parent + if fd == "d": + visited.append(name) + + +class TestCopyTree: + @staticmethod + def files_from_walk(src): + return [ + os.path.relpath(os.path.join(root, name), src) + for root, dnames, fnames in os.walk(src) + for name in dnames + fnames + ] + + @given(nested_dir_structure()) + def test_files_are_created(self, structure): + """ Should create copies of all files """ + with nested_dirs(structure) as src: + with tempfile.TemporaryDirectory() as dst_name: + fm.copy_tree(src.name, dst_name) + assert self.files_from_walk(src.name) == self.files_from_walk(dst_name) + + @given(nested_dir_structure()) + def test_src_files_not_moved(self, structure): + """ Should not move/delete source files """ + with nested_dirs(structure) as src: + with tempfile.TemporaryDirectory() as dst_name: + prev_files = self.files_from_walk(src.name) + fm.copy_tree(src.name, dst_name) + assert prev_files == self.files_from_walk(src.name) + + @given(nested_dir_structure(), st.data()) + def test_excluded_not_copied(self, structure, data): + """ Should not copy excluded files """ + with nested_dirs(structure) as src: + with tempfile.TemporaryDirectory() as dst_name: + objs = self.files_from_walk(src.name) + if objs: + excluded = data.draw(st.lists(st.sampled_from(objs), max_size=len(objs), unique=True)) + else: + excluded = [] + fm.copy_tree(src.name, dst_name, exclude=excluded) + assert not set(excluded) & set(self.files_from_walk(dst_name)) + + +class TestIgnoreMissingDirError: + def test_dir_exists(self): + """ Dir is removed when it exists """ + d = tempfile.TemporaryDirectory() + try: + shutil.rmtree(d.name, onerror=fm.ignore_missing_dir_error) + assert not os.path.isdir(d.name) + finally: + try: + d.cleanup() + except FileNotFoundError: + pass + + def test_dir_does_not_exist(self): + """ No error is raised whether the dir exists or not """ + d = tempfile.TemporaryDirectory() + try: + shutil.rmtree(d.name, onerror=fm.ignore_missing_dir_error) + shutil.rmtree(d.name, onerror=fm.ignore_missing_dir_error) + finally: + try: + d.cleanup() + except FileNotFoundError: + pass + + +class TestExtractZipStream: + @staticmethod + def trim(name: str, ignore: int) -> str: + *dnames, fname = name.split(os.sep) + return os.path.normpath(os.path.join(*dnames[ignore:], fname)) + + @given(nested_dir_structure()) + def test_extracts_stream_to_dir(self, structure): + archive = zip_archive(structure) + with tempfile.TemporaryDirectory() as dname: + fm.extract_zip_stream(archive, dname, ignore_root_dirs=0) + archive_set = { + os.path.normpath(name) for name in zipfile.ZipFile(BytesIO(archive)).namelist() if name != "./" + } + target_set = { + os.path.normpath(os.path.join(os.path.relpath(root, dname), name)) + for root, dnames, fnames in os.walk(dname) + for name in dnames + fnames + } + assert target_set == archive_set + + @given(nested_dir_structure(), st.integers(min_value=1, max_value=4)) + def test_extracts_stream_to_dir_ignore_roots(self, structure, ignore): + archive = zip_archive(structure) + with tempfile.TemporaryDirectory() as dname: + fm.extract_zip_stream(archive, dname, ignore_root_dirs=ignore) + archive_set = { + p + for p in (self.trim(name, ignore) for name in zipfile.ZipFile(BytesIO(archive)).namelist()) + if p and p != "." + } + target_set = { + os.path.normpath(os.path.join(os.path.relpath(root, dname), name)) + for root, dnames, fnames in os.walk(dname) + for name in dnames + fnames + } + assert target_set == archive_set diff --git a/src/autotester/server/utils/tests/form_management_test.py b/src/autotester/server/utils/tests/form_management_test.py new file mode 100644 index 00000000..ef23fb78 --- /dev/null +++ b/src/autotester/server/utils/tests/form_management_test.py @@ -0,0 +1,2 @@ +# TODO: write these tests after updating method to get schema file from the tester +# (write proper interface instead of globbing for files) diff --git a/src/autotester/server/utils/tests/redis_management_test.py b/src/autotester/server/utils/tests/redis_management_test.py new file mode 100644 index 00000000..829a118c --- /dev/null +++ b/src/autotester/server/utils/tests/redis_management_test.py @@ -0,0 +1,132 @@ +import rq +import pytest +from unittest.mock import patch, call +from autotester.server.utils import redis_management as rm +from autotester.config import config +from fakeredis import FakeStrictRedis +import time + + +@pytest.fixture +def clear_rq_connection(): + while rq.pop_connection(): + pass + + +@pytest.fixture +def redis_conn(): + with patch("autotester.server.utils.redis_management.redis_connection", return_value=FakeStrictRedis()) as conn: + yield conn + + +def set_pop_intervals(count, redis_conn): + """ Sets pop interval settings """ + time1 = time.time() + time.sleep(0.2) + time2 = time.time() + redis_conn.hset(config["redis", "_pop_interval_hash"], "a_start", time1) + redis_conn.hset(config["redis", "_pop_interval_hash"], "a_last", time2) + redis_conn.hset(config["redis", "_pop_interval_hash"], "a_count", count) + return time1, time2 + + +class TestRedisConnection: + @patch("redis.Redis") + def test_gets_existing_redis_connection(self, _conn, clear_rq_connection): + """ Reuse previous connection object """ + assert rm.redis_connection() == rm.redis_connection() + + @patch("redis.Redis") + def test_gets_new_connection(self, conn, clear_rq_connection): + """ Create new connection object from config settings if None exists """ + rm.redis_connection() + assert call(config["redis", "url"]).call_list() == conn.mock_calls + + +class TestTestScriptDirectory: + def test_gets_value(self, redis_conn): + """ Gets existing value """ + redis_conn.return_value.hset(config["redis", "_current_test_script_hash"], "a", "b") + assert rm.test_script_directory("a") == "b" + + def test_sets_value(self, redis_conn): + """ Gets existing value """ + rm.test_script_directory("c", set_to="d") + assert redis_conn.return_value.hget(config["redis", "_current_test_script_hash"], "c") == b"d" + + +class TestUpdatePopIntervalStat: + def test_creates_start_value(self, redis_conn): + """ Sets the start value if it does not exist """ + redis_conn.return_value.hdel(config["redis", "_pop_interval_hash"], "a_start") + rm.update_pop_interval_stat("a") + start_val = float(redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_start")) + assert start_val < float(time.time()) + + def test_no_overwrite_existing_start_value(self, redis_conn): + """ Does not overwrite an existing start value """ + redis_conn.return_value.hset(config["redis", "_pop_interval_hash"], "a_start", "a") + rm.update_pop_interval_stat("a") + assert redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_start") == b"a" + + def test_updates_last_value_if_not_set(self, redis_conn): + """ Sets the last value if it does not exist """ + redis_conn.return_value.hdel(config["redis", "_pop_interval_hash"], "a_last") + rm.update_pop_interval_stat("a") + assert float(redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_last")) < float(time.time()) + + def test_updates_last_value_if_set(self, redis_conn): + """ Sets the last value if it does exist """ + redis_conn.return_value.hset(config["redis", "_pop_interval_hash"], "a_last", "a") + rm.update_pop_interval_stat("a") + assert float(redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_last")) < float(time.time()) + + def test_increments_count_if_not_set(self, redis_conn): + """ Increments count when not set """ + redis_conn.return_value.hdel(config["redis", "_pop_interval_hash"], "a_count") + rm.update_pop_interval_stat("a") + assert redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_count") == b"1" + + def test_increments_count_if_set(self, redis_conn): + """ Increments count when set """ + redis_conn.return_value.hset(config["redis", "_pop_interval_hash"], "a_count", "5") + rm.update_pop_interval_stat("a") + assert redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_count") == b"6" + + +class TestGetAvgPopInterval: + def test_no_burst(self, redis_conn): + """ If there is currently no burst """ + assert rm.get_avg_pop_interval("a") is None + + def test_burst(self, redis_conn): + """ If there is a burst """ + time1, time2 = set_pop_intervals(3, redis_conn.return_value) + assert rm.get_avg_pop_interval("a") == pytest.approx((time2 - time1) / 2) + + def test_single(self, redis_conn): + """ If there is a single other value in the burst """ + set_pop_intervals(1, redis_conn.return_value) + assert rm.get_avg_pop_interval("a") == 0.0 + + +class TestCleanAfter: + def test_clears_pop_intervals(self, redis_conn): + """ Clears pop interval stats if the queue is empty """ + queue = rq.Queue("a", connection=redis_conn.return_value) + queue.enqueue_call(lambda: None).delete() + set_pop_intervals(3, redis_conn.return_value) + rm.clean_after(lambda: None)() + assert redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_start") is None + assert redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_last") == b"0" + assert redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_count") == b"0" + + def test_leaves_pop_intervals(self, redis_conn): + """ Does not clear pop intervals if the queue is not empty """ + queue = rq.Queue("a", connection=redis_conn.return_value) + queue.enqueue_call(lambda: None) + time1, time2 = set_pop_intervals(3, redis_conn.return_value) + rm.clean_after(lambda: None)() + assert redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_start") == str(time1).encode() + assert redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_last") == str(time2).encode() + assert redis_conn.return_value.hget(config["redis", "_pop_interval_hash"], "a_count") == b"3" diff --git a/src/autotester/server/utils/tests/resource_management_test.py b/src/autotester/server/utils/tests/resource_management_test.py new file mode 100644 index 00000000..326f2eee --- /dev/null +++ b/src/autotester/server/utils/tests/resource_management_test.py @@ -0,0 +1,32 @@ +import resource +from unittest.mock import patch +import multiprocessing +from typing import Callable, Tuple +from autotester.server.utils import resource_management as rm + + +def enqueue_limit(limit: int, queue: multiprocessing.Queue, func: Callable) -> None: + config = {"rlimit_settings": {"nproc": [200, 200], "cpu": [10, 10]}} + adjustments = {"nproc": 2} + with patch.dict("autotester.server.utils.resource_management.config._settings", config): + with patch.dict("autotester.server.utils.resource_management.RLIMIT_ADJUSTMENTS", adjustments): + func() + queue.put(resource.getrlimit(limit)) + + +def run_test(limit: int, func: Callable) -> Tuple[int, int]: + queue = multiprocessing.Queue() + proc = multiprocessing.Process(target=enqueue_limit, args=(limit, queue, func)) + proc.start() + proc.join() + return queue.get(block=False) + + +class TestSetRlimitsBeforeTest: + def test_sets_rlimits_with_adjustments(self): + """ Reduces rlimit by the adjustment amount from the config setting """ + assert run_test(resource.RLIMIT_NPROC, rm.set_rlimits_before_test) == (198, 198) + + def test_sets_rlimits_without_adjustments(self): + """ Does not reduce the rlimit from the config setting if no adjustment given """ + assert run_test(resource.RLIMIT_CPU, rm.set_rlimits_before_test) == (10, 10) diff --git a/src/autotester/server/utils/tests/string_management_test.py b/src/autotester/server/utils/tests/string_management_test.py new file mode 100644 index 00000000..66d3b194 --- /dev/null +++ b/src/autotester/server/utils/tests/string_management_test.py @@ -0,0 +1,71 @@ +from hypothesis import given +from hypothesis import strategies as st +from autotester.server.utils import string_management as sm + + +class TestDecodeIfBytes: + @given(st.from_regex(r"[\w\d]*", fullmatch=True)) + def test_decodes_string(self, string): + """ Returns original string """ + assert string == sm.decode_if_bytes(string) + + @given(st.from_regex(r"[\w\d]*", fullmatch=True)) + def test_decodes_bytes(self, string): + """ Returns original string """ + assert string == sm.decode_if_bytes(string.encode("utf-8", "ignore")) + + @given(st.from_regex(r"[\w\d]*", fullmatch=True)) + def test_decodes_bytes_non_utf8(self, string): + """ Returns original string """ + assert string == sm.decode_if_bytes(string.encode("utf-16", "ignore"), "utf-16") + + +class TestLoadPartialJson: + def test_well_formed_any(self): + """ Parses well formed json """ + j = '{"a": ["b", "c"]}' + result, malformed = sm.loads_partial_json(j) + assert result == [{"a": ["b", "c"]}] + assert not malformed + + def test_well_formed_single_type_dict(self): + """ Parses well formed json getting only dict types """ + j = '{"a": ["b", "c"]}' + result, malformed = sm.loads_partial_json(j, dict) + assert result == [{"a": ["b", "c"]}] + assert not malformed + + def test_well_formed_single_type_list(self): + """ Parses well formed json getting only list types """ + j = '["b", "c"]' + result, malformed = sm.loads_partial_json(j, list) + assert result == [["b", "c"]] + assert not malformed + + def test_nothing_well_formed(self): + """ Parses a json without any well formed json """ + j = "just a string" + result, malformed = sm.loads_partial_json(j) + assert result == [] + assert malformed + + def test_well_formed_partial_any(self): + """ Parses partially well formed json """ + j = 'bad bit{"a": ["b", "c"]} other bad bit' + result, malformed = sm.loads_partial_json(j) + assert result == [{"a": ["b", "c"]}] + assert malformed + + def test_well_formed_partial_single_type_dict(self): + """ Parses partially well formed json getting only dict types """ + j = 'bad bit{"a": ["b", "c"]} other bad bit' + result, malformed = sm.loads_partial_json(j, dict) + assert result == [{"a": ["b", "c"]}] + assert malformed + + def test_well_formed_partial_single_type_list(self): + """ Parses partially well formed json getting only list types """ + j = 'bad bit["b", "c"] other bad bit' + result, malformed = sm.loads_partial_json(j, list) + assert result == [["b", "c"]] + assert malformed diff --git a/src/autotester/server/utils/tests/user_management_test.py b/src/autotester/server/utils/tests/user_management_test.py new file mode 100644 index 00000000..11679f0b --- /dev/null +++ b/src/autotester/server/utils/tests/user_management_test.py @@ -0,0 +1,46 @@ +import os +import pytest +from unittest.mock import patch +from autotester.server.utils import user_management as um +from autotester.exceptions import TesterUserError + + +class TestTesterUser: + def test_unset_workeruser(self): + """ Should raise an error when the environment variable is not set """ + os.environ.pop("WORKERUSER", None) + with pytest.raises(TesterUserError): + um.tester_user() + + def test_set_workeruser_path_exists(self): + """ Should return the worker's name and workspace if the workspace exists """ + config = {"workspace": "some/path", "_workspace_contents": {"_workers": "work"}} + os.environ["WORKERUSER"] = "someworker" + with patch.dict("autotester.server.utils.resource_management.config._settings", config): + with patch("os.path.isdir", return_value=True): + user, workspace = um.tester_user() + assert user == "someworker" + assert workspace == "some/path/work/someworker" + + def test_set_workeruser_path_not_exist(self): + """ Should raise an error if the workspace does not exist """ + config = {"workspace": "some/path", "_workspace_contents": {"_workers": "work"}} + os.environ["WORKERUSER"] = "someworker" + with patch.dict("autotester.server.utils.resource_management.config._settings", config): + with patch("os.path.isdir", return_value=False): + with pytest.raises(TesterUserError): + um.tester_user() + + +class TestGetReaperUsername: + def test_no_reaper(self): + """ Should return None if there is no reaper username in the config """ + config = {"workers": [{"users": [{"name": "someuser"}]}]} + with patch.dict("autotester.server.utils.resource_management.config._settings", config): + assert um.get_reaper_username("someuser") is None + + def test_reaper(self): + """ Should return the reaper username if there is a reaper username in the config """ + config = {"workers": [{"users": [{"name": "someuser", "reaper": "reaperuser"}]}]} + with patch.dict("autotester.server.utils.resource_management.config._settings", config): + assert um.get_reaper_username("someuser") == "reaperuser" diff --git a/src/autotester/server/utils/user_management.py b/src/autotester/server/utils/user_management.py index 2d2db136..7dec8001 100644 --- a/src/autotester/server/utils/user_management.py +++ b/src/autotester/server/utils/user_management.py @@ -1,36 +1,36 @@ import os -import pwd +from typing import Tuple from autotester.exceptions import TesterUserError from autotester.config import config from autotester.server.utils.string_management import decode_if_bytes -def current_user(): - return pwd.getpwuid(os.getuid()).pw_name - - -def tester_user(): +def tester_user() -> Tuple[str, str]: """ - Get the workspace for the tester user specified by the MARKUSWORKERUSER + Get the workspace for the tester user specified by the WORKERUSER environment variable, return the user_name and path to that user's workspace. Raises an AutotestError if a tester user is not specified or if a workspace has not been setup for that user. """ - user_name = os.environ.get("MARKUSWORKERUSER") + user_name = os.environ.get("WORKERUSER") if user_name is None: raise TesterUserError("No worker users available to run this job") - user_workspace = os.path.join( - config["workspace"], config["_workspace_contents", "_workers"], user_name - ) + user_workspace = os.path.join(config["workspace"], config["_workspace_contents", "_workers"], user_name) if not os.path.isdir(user_workspace): raise TesterUserError(f"No workspace directory for user: {user_name}") - return user_name, decode_if_bytes(user_workspace) + return user_name, user_workspace -def get_reaper_username(test_username): +def get_reaper_username(test_username: str) -> str: + """ + Return the name of the user designated as the reaper for test_username. + A reaper user cleans up all remaining processes run by test_username. + + Returns None if there is no associated reaper user. + """ for users in (users for conf in config["workers"] for users in conf["users"]): if users["name"] == test_username: - return users["reaper"] + return users.get("reaper") diff --git a/src/autotester/setup.py b/src/autotester/setup.py index 2f389e79..3fc47a7e 100644 --- a/src/autotester/setup.py +++ b/src/autotester/setup.py @@ -2,12 +2,10 @@ test_exclusions = ["*.tests", "*.tests.*", "tests.*", "tests"] -packages = ["testers"] + [ - f"testers.{pkg}" for pkg in find_packages(where="testers", exclude=test_exclusions) -] +packages = ["testers"] + [f"testers.{pkg}" for pkg in find_packages(where="testers", exclude=test_exclusions)] setup( - name="markus-autotester-testers", + name="autotester-testers", version="2.0", description="Testers for the automatic tester for programming assignments", url="https://github.com/MarkUsProject/markus-autotesting", diff --git a/src/autotester/testers/custom/bin/install.sh b/src/autotester/testers/custom/bin/install.sh index cb7db56a..8beebb42 100755 --- a/src/autotester/testers/custom/bin/install.sh +++ b/src/autotester/testers/custom/bin/install.sh @@ -3,8 +3,8 @@ set -e # script starts here -if [[ $# -ne 0 ]]; then - echo "Usage: $0" +if [[ $# -gt 1 ]]; then + echo "Usage: $0 [--non-interactive]" exit 1 fi diff --git a/src/autotester/testers/custom/custom_tester.py b/src/autotester/testers/custom/custom_tester.py new file mode 100644 index 00000000..ba6d4a72 --- /dev/null +++ b/src/autotester/testers/custom/custom_tester.py @@ -0,0 +1,18 @@ +import subprocess +from testers.tester import Tester +from testers.specs import TestSpecs + + +class CustomTester(Tester): + def __init__(self, specs: TestSpecs) -> None: + """ Initialize a CustomTester """ + super().__init__(specs, test_class=None) + + @Tester.run_decorator + def run(self) -> None: + """ + Run a test and print the results to stdout + """ + file_paths = self.specs["test_data", "script_files"] + for file_path in file_paths: + subprocess.run(f"./{file_path}") diff --git a/src/autotester/testers/custom/default_hooks.py b/src/autotester/testers/custom/default_hooks.py deleted file mode 100644 index f4247477..00000000 --- a/src/autotester/testers/custom/default_hooks.py +++ /dev/null @@ -1,11 +0,0 @@ -import os - - -def before_all_custom(settings, **_kwargs): - """ Make script files executable """ - for test_data in settings["test_data"]: - for script_file in test_data["script_files"]: - os.chmod(script_file, 0o755) - - -HOOKS = [before_all_custom] diff --git a/src/autotester/testers/custom/markus_custom_tester.py b/src/autotester/testers/custom/markus_custom_tester.py deleted file mode 100644 index fedb673a..00000000 --- a/src/autotester/testers/custom/markus_custom_tester.py +++ /dev/null @@ -1,13 +0,0 @@ -import subprocess -from testers.markus_tester import MarkusTester - - -class MarkusCustomTester(MarkusTester): - def __init__(self, specs): - super().__init__(specs, test_class=None) - - @MarkusTester.run_decorator - def run(self): - file_paths = self.specs["test_data", "script_files"] - for file_path in file_paths: - subprocess.run(f"./{file_path}") diff --git a/src/autotester/testers/custom/tests/script_files/autotest_01.sh b/src/autotester/testers/custom/tests/script_files/autotest_01.sh index 533625d0..6fe5dc02 100644 --- a/src/autotester/testers/custom/tests/script_files/autotest_01.sh +++ b/src/autotester/testers/custom/tests/script_files/autotest_01.sh @@ -1,3 +1,3 @@ #!/bin/bash -python submission.py $0 +python3 submission.py $0 diff --git a/src/autotester/testers/custom/tests/student_files/submission.py b/src/autotester/testers/custom/tests/student_files/submission.py index 6eead424..2e9a4c32 100644 --- a/src/autotester/testers/custom/tests/student_files/submission.py +++ b/src/autotester/testers/custom/tests/student_files/submission.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ This student submission file is used to test the autotester @@ -9,14 +9,4 @@ import json -print( - json.dumps( - { - "name": "pass_test", - "output": "NA", - "marks_earned": 2, - "marks_total": 2, - "status": "pass", - } - ) -) +print(json.dumps({"name": "pass_test", "output": "NA", "marks_earned": 2, "marks_total": 2, "status": "pass"})) diff --git a/src/autotester/testers/haskell/bin/install.sh b/src/autotester/testers/haskell/bin/install.sh index 275513fb..0bbf6fc6 100755 --- a/src/autotester/testers/haskell/bin/install.sh +++ b/src/autotester/testers/haskell/bin/install.sh @@ -4,7 +4,15 @@ set -e install_packages() { echo "[HASKELL-INSTALL] Installing system packages" - sudo apt-get install ghc cabal-install python3 + local debian_frontend + local apt_opts + local apt_yes + if [ -n "${NON_INTERACTIVE}" ]; then + debian_frontend=noninteractive + apt_opts=(-o 'Dpkg::Options::=--force-confdef' -o 'Dpkg::Options::=--force-confold') + apt_yes='-y' + fi + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" install ghc cabal-install } install_haskell_packages() { @@ -12,14 +20,13 @@ install_haskell_packages() { # The order that these packages are installed matters. Could cause a dependency conflict # Crucially it looks like tasty-stats needs to be installed before tasty-quickcheck # TODO: install these without cabal so they can be properly isolated/uninstalled - sudo cabal install tasty-stats --global sudo cabal install tasty-discover --global sudo cabal install tasty-quickcheck --global } # script starts here -if [[ $# -ne 0 ]]; then - echo "Usage: $0" +if [[ $# -gt 1 ]]; then + echo "Usage: $0 [--non-interactive]" exit 1 fi @@ -27,6 +34,7 @@ fi THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") THISDIR=$(dirname "${THISSCRIPT}") SPECSDIR=$(readlink -f "${THISDIR}/../specs") +NON_INTERACTIVE=$1 # main install_packages diff --git a/src/autotester/testers/haskell/markus_haskell_tester.py b/src/autotester/testers/haskell/haskell_tester.py similarity index 60% rename from src/autotester/testers/haskell/markus_haskell_tester.py rename to src/autotester/testers/haskell/haskell_tester.py index 86093b4e..cc9efdaa 100644 --- a/src/autotester/testers/haskell/markus_haskell_tester.py +++ b/src/autotester/testers/haskell/haskell_tester.py @@ -2,12 +2,22 @@ import os import tempfile import csv +from typing import Dict, Optional, IO, Type, List, Iterator, Union -from testers.markus_tester import MarkusTester, MarkusTest, MarkusTestError +from testers.specs import TestSpecs +from testers.tester import Tester, Test, TestError -class MarkusHaskellTest(MarkusTest): - def __init__(self, tester, test_file, result, feedback_open=None): +class HaskellTest(Test): + def __init__( + self, tester: "HaskellTester", test_file: str, result: Dict, feedback_open: Optional[IO] = None, + ) -> None: + """ + Initialize a Haskell test created by tester. + + The result was created after running the tests in test_file and test feedback + will be written to feedback_open. + """ self._test_name = result.get("name") self._file_name = test_file self.status = result["status"] @@ -15,13 +25,17 @@ def __init__(self, tester, test_file, result, feedback_open=None): super().__init__(tester, feedback_open) @property - def test_name(self): + def test_name(self) -> str: + """ The name of this test """ if self._test_name: return ".".join([self._file_name, self._test_name]) return self._file_name - @MarkusTest.run_decorator - def run(self): + @Test.run_decorator + def run(self) -> str: + """ + Return a json string containing all test result information. + """ if self.status == "OK": return self.passed(message=self.message) elif self.status == "FAIL": @@ -30,20 +44,23 @@ def run(self): return self.error(message=self.message) -class MarkusHaskellTester(MarkusTester): - # column indexes of relevant data from tasty-stats csv - # reference: http://hackage.haskell.org/package/tasty-stats +class HaskellTester(Tester): TASTYSTATS = {"name": 1, "time": 2, "result": 3, "description": -1} - def __init__(self, specs, test_class=MarkusHaskellTest): + def __init__(self, specs: TestSpecs, test_class: Type[HaskellTest] = HaskellTest,) -> None: + """ + Initialize a Haskell tester using the specifications in specs. + + This tester will create tests of type test_class. + """ super().__init__(specs, test_class) - def _test_run_flags(self, test_file): + def _test_run_flags(self, test_file: str) -> List[str]: """ Return a list of additional arguments to the tasty-discover executable """ module_flag = f"--modules={os.path.basename(test_file)}" - stats_flag = "--ingredient=Test.Tasty.Stats.consoleStatsReporter" + stats_flag = "--ingredient=Stats.consoleStatsReporter" flags = [ module_flag, stats_flag, @@ -52,7 +69,7 @@ def _test_run_flags(self, test_file): ] return flags - def _parse_test_results(self, reader): + def _parse_test_results(self, reader: Iterator) -> List[Dict[str, Union[int, str]]]: """ Return a list of test result dictionaries parsed from an open csv reader object. The reader should be reading a csv file which @@ -69,7 +86,7 @@ def _parse_test_results(self, reader): test_results.append(result) return test_results - def run_haskell_tests(self): + def run_haskell_tests(self) -> Dict[str, List[Dict[str, Union[int, str]]]]: """ Return test results for each test file. Results contain a list of parsed test results. @@ -78,32 +95,26 @@ def run_haskell_tests(self): """ results = {} this_dir = os.getcwd() + haskell_lib = os.path.join(os.path.dirname(os.path.realpath(__file__)), "lib") for test_file in self.specs["test_data", "script_files"]: with tempfile.NamedTemporaryFile(dir=this_dir) as f: - cmd = ["tasty-discover", ".", "_", f.name] + self._test_run_flags( - test_file - ) - subprocess.run( - cmd, stdout=subprocess.DEVNULL, universal_newlines=True, check=True - ) + cmd = ["tasty-discover", ".", "_", f.name] + self._test_run_flags(test_file) + subprocess.run(cmd, stdout=subprocess.DEVNULL, universal_newlines=True, check=True) with tempfile.NamedTemporaryFile(mode="w+", dir=this_dir) as sf: - cmd = ["runghc", f.name, f"--stats={sf.name}"] - subprocess.run( - cmd, - stdout=subprocess.DEVNULL, - stderr=subprocess.PIPE, - universal_newlines=True, - check=True, - ) + cmd = ["runghc", "--", f"-i={haskell_lib}", f.name, f"--stats={sf.name}"] + subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE, universal_newlines=True) results[test_file] = self._parse_test_results(csv.reader(sf)) return results - @MarkusTester.run_decorator - def run(self): + @Tester.run_decorator + def run(self) -> None: + """ + Runs all tests in this tester. + """ try: results = self.run_haskell_tests() except subprocess.CalledProcessError as e: - raise MarkusTestError(e.stderr) from e + raise TestError(e.stderr) from e with self.open_feedback() as feedback_open: for test_file, result in results.items(): for res in result: diff --git a/src/autotester/testers/haskell/lib/Stats.hs b/src/autotester/testers/haskell/lib/Stats.hs new file mode 100644 index 00000000..c96b7adb --- /dev/null +++ b/src/autotester/testers/haskell/lib/Stats.hs @@ -0,0 +1,88 @@ +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE TupleSections #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE ViewPatterns #-} +module Stats (statsReporter, consoleStatsReporter) where + +-- a paired down version of the Tasty Stats ingredient that crucially does not depend on git: +-- https://hackage.haskell.org/package/tasty-stats + +import Control.Concurrent.STM (atomically, readTVar, TVar, STM, retry) +import Control.Monad ((>=>)) +import Data.Char (isSpace, isPrint) +import Data.Foldable (fold) +import Data.IntMap (IntMap) +import Data.List (dropWhileEnd, intersperse) +import Data.Monoid (Endo(..)) +import Data.Proxy (Proxy(..)) +import Data.Tagged (Tagged(..)) +import System.Directory (doesFileExist) +import Test.Tasty +import Test.Tasty.Ingredients +import Test.Tasty.Options +import Test.Tasty.Runners +import qualified Data.IntMap as IntMap + +newtype StatsFile = StatsFile FilePath + +instance IsOption (Maybe StatsFile) where + defaultValue = Nothing + parseValue = Just . Just . StatsFile + optionName = Tagged "stats" + optionHelp = Tagged "CSV file to store the collected statistics" + +-- | Reporter with support to collect statistics in a file. +statsReporter :: Ingredient +statsReporter = TestReporter optDesc runner + where optDesc = [ Option (Proxy :: Proxy (Maybe StatsFile)) ] + runner opts tree = do + StatsFile file <- lookupOption opts + pure $ collectStats file $ IntMap.fromList $ zip [0..] $ testsNames opts tree + +-- | Console reporter with support to collect statistics in a file. +consoleStatsReporter :: Ingredient +consoleStatsReporter = composeReporters consoleTestReporter statsReporter + +zipMap :: IntMap a -> IntMap b -> IntMap (a, b) +zipMap a b = IntMap.mapMaybeWithKey (\k v -> (v,) <$> IntMap.lookup k b) a + +waitFinished :: TVar Status -> STM Result +waitFinished = readTVar >=> \case + Done x -> pure x + _ -> retry + +collectStats :: FilePath -> IntMap TestName -> StatusMap -> IO (Time -> IO Bool) +collectStats file names status = do + results <- atomically (traverse waitFinished status) + rows <- resultRow $ IntMap.toList $ zipMap names results + exists <- doesFileExist file + if exists + then appendFile file $ formatCSV rows "" + else writeFile file $ formatCSV (header : rows) "" + pure $ const $ pure $ and $ fmap resultSuccessful results + + +foldEndo :: (Functor f, Foldable f) => f (a -> a) -> (a -> a) +foldEndo = appEndo . fold . fmap Endo + +formatCSV :: [[String]] -> ShowS +formatCSV = foldEndo . map ((. ('\n':)) . foldEndo . intersperse (',':) . map field) + where field s | all isValid s = (s++) + | otherwise = ('"':) . escape s . ('"':) + escape ('"':s) = ("\"\""++) . escape s + escape (c:s) = (c:) . escape s + escape [] = id + isValid ' ' = True + isValid ',' = False + isValid c = isPrint c && not (isSpace c) + +header :: [String] +header = ["idx", "name", "time", "result", "description"] + +resultRow :: [(Int, (TestName, Result))] -> IO [[String]] +resultRow results = do + pure $ flip map results $ + \(show -> idx, (name, Result { resultDescription=dropWhileEnd isSpace -> description + , resultShortDescription=result + , resultTime=show -> time })) -> + [idx, name, time, result, description] diff --git a/src/autotester/testers/java/bin/install.sh b/src/autotester/testers/java/bin/install.sh index a2115285..a9ab6e19 100755 --- a/src/autotester/testers/java/bin/install.sh +++ b/src/autotester/testers/java/bin/install.sh @@ -4,7 +4,15 @@ set -e install_packages() { echo "[JAVA-INSTALL] Installing system packages" - sudo apt-get install python3 openjdk-8-jdk jq + local debian_frontend + local apt_opts + local apt_yes + if [ -n "${NON_INTERACTIVE}" ]; then + debian_frontend=noninteractive + apt_opts=(-o 'Dpkg::Options::=--force-confdef' -o 'Dpkg::Options::=--force-confold') + apt_yes='-y' + fi + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" install openjdk-8-jdk } compile_tester() { @@ -16,12 +24,12 @@ compile_tester() { update_specs() { echo "[JAVA-INSTALL] Updating specs" - echo '{}' | jq ".path_to_tester_jars = \"${JAVADIR}/build/install/MarkusJavaTester/lib\"" > "${SPECSDIR}/install_settings.json" + echo '{}' | jq ".path_to_tester_jars = \"${JAVADIR}/build/install/JavaTester/lib\"" > "${SPECSDIR}/install_settings.json" } # script starts here -if [[ $# -ne 0 ]]; then - echo "Usage: $0" +if [[ $# -gt 1 ]]; then + echo "Usage: $0 [--non-interactive]" exit 1 fi @@ -30,6 +38,7 @@ THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") THISDIR=$(dirname "${THISSCRIPT}") SPECSDIR=$(readlink -f "${THISDIR}/../specs") JAVADIR=$(readlink -f "${THISDIR}/../lib") +NON_INTERACTIVE=$1 # main install_packages diff --git a/src/autotester/testers/java/java_tester.py b/src/autotester/testers/java/java_tester.py new file mode 100644 index 00000000..25b07e40 --- /dev/null +++ b/src/autotester/testers/java/java_tester.py @@ -0,0 +1,118 @@ +import enum +import json +import subprocess +from typing import Dict, Optional, IO, Type + +from testers.specs import TestSpecs +from testers.tester import Tester, Test, TestError + + +class JavaTest(Test): + class JUnitStatus(enum.Enum): + SUCCESSFUL = 1 + ABORTED = 2 + FAILED = 3 + + ERRORS = { + "bad_javac": 'Java compilation error: "{}"', + "bad_java": 'Java runtime error: "{}"', + } + + def __init__(self, tester: "JavaTester", result: Dict, feedback_open: Optional[IO] = None,) -> None: + """ + Initialize a Java test created by tester. + + The result was created after running some junit tests. + Test feedback will be written to feedback_open. + """ + self.class_name, _sep, self.method_name = result["name"].partition(".") + self.description = result.get("description") + self.status = JavaTest.JUnitStatus[result["status"]] + self.message = result.get("message") + super().__init__(tester, feedback_open) + + @property + def test_name(self) -> str: + """ The name of this test """ + name = f"{self.class_name}.{self.method_name}" + if self.description: + name += f" ({self.description})" + return name + + @Test.run_decorator + def run(self) -> str: + """ + Return a json string containing all test result information. + """ + if self.status == JavaTest.JUnitStatus.SUCCESSFUL: + return self.passed() + elif self.status == JavaTest.JUnitStatus.FAILED: + return self.failed(message=self.message) + else: + return self.error(message=self.message) + + +class JavaTester(Tester): + + JAVA_TESTER_CLASS = "edu.toronto.cs.teach.JavaTester" + + def __init__(self, specs: TestSpecs, test_class: Type[JavaTest] = JavaTest) -> None: + """ + Initialize a Java tester using the specifications in specs. + + This tester will create tests of type test_class. + """ + super().__init__(specs, test_class) + self.java_classpath = f'.:{self.specs["install_data", "path_to_tester_jars"]}/*' + + def compile(self) -> None: + """ + Compile the junit tests specified in the self.specs specifications. + """ + javac_command = ["javac", "-cp", self.java_classpath] + javac_command.extend(self.specs["test_data", "script_files"]) + # student files imported by tests will be compiled on cascade + subprocess.run( + javac_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, check=True, + ) + + def run_junit(self) -> subprocess.CompletedProcess: + """ + Run the junit tests specified in the self.specs specifications. + """ + java_command = [ + "java", + "-cp", + self.java_classpath, + JavaTester.JAVA_TESTER_CLASS, + ] + java_command.extend(self.specs["test_data", "script_files"]) + java = subprocess.run( + java_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, check=True, + ) + return java + + @Tester.run_decorator + def run(self) -> None: + """ + Runs all tests in this tester. + """ + # check that the submission compiles against the tests + try: + self.compile() + except subprocess.CalledProcessError as e: + msg = JavaTest.ERRORS["bad_javac"].format(e.stdout) + raise TestError(msg) from e + # run the tests with junit + try: + results = self.run_junit() + if results.stderr: + raise TestError(results.stderr) + except subprocess.CalledProcessError as e: + msg = JavaTest.ERRORS["bad_java"].format(e.stdout + e.stderr) + raise TestError(msg) from e + with self.open_feedback() as feedback_open: + for result in json.loads(results.stdout): + test = self.test_class(self, result, feedback_open) + result_json = test.run() + print(result_json, flush=True) diff --git a/src/autotester/testers/java/lib/build.gradle b/src/autotester/testers/java/lib/build.gradle index 02faefb6..a2331592 100644 --- a/src/autotester/testers/java/lib/build.gradle +++ b/src/autotester/testers/java/lib/build.gradle @@ -4,7 +4,7 @@ apply plugin: 'application' group = 'edu.toronto.cs.teach' version = '1.7.1' sourceCompatibility = 1.8 -mainClassName = 'edu.toronto.cs.teach.MarkusJavaTester' +mainClassName = 'edu.toronto.cs.teach.JavaTester' repositories { mavenCentral() diff --git a/src/autotester/testers/java/lib/settings.gradle b/src/autotester/testers/java/lib/settings.gradle index 675b47b7..71c5f8e1 100644 --- a/src/autotester/testers/java/lib/settings.gradle +++ b/src/autotester/testers/java/lib/settings.gradle @@ -1 +1 @@ -rootProject.name = 'MarkusJavaTester' +rootProject.name = 'JavaTester' diff --git a/src/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java b/src/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/JavaTester.java similarity index 96% rename from src/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java rename to src/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/JavaTester.java index 7e5a05d5..de1983a4 100644 --- a/src/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/MarkusJavaTester.java +++ b/src/autotester/testers/java/lib/src/main/java/edu/toronto/cs/teach/JavaTester.java @@ -22,7 +22,7 @@ import java.nio.file.Paths; import java.util.*; -public class MarkusJavaTester { +public class JavaTester { private class TestResult { private @NotNull String name; @@ -34,7 +34,7 @@ private class TestResult { private @NotNull String[] testClasses; private @NotNull List results; - public MarkusJavaTester(@NotNull String[] testFiles) { + public JavaTester(@NotNull String[] testFiles) { this.testClasses = new String[testFiles.length]; for (int i = 0; i < testFiles.length; i++) { @@ -93,7 +93,7 @@ public void executionFinished(TestIdentifier testIdentifier, TestExecutionResult public static void main(String[] args) { - MarkusJavaTester tester = new MarkusJavaTester(args); + JavaTester tester = new JavaTester(args); tester.run(); } diff --git a/src/autotester/testers/java/markus_java_tester.py b/src/autotester/testers/java/markus_java_tester.py deleted file mode 100644 index 9d51d936..00000000 --- a/src/autotester/testers/java/markus_java_tester.py +++ /dev/null @@ -1,100 +0,0 @@ -import enum -import json -import subprocess - -from testers.markus_tester import MarkusTester, MarkusTest, MarkusTestError - - -class MarkusJavaTest(MarkusTest): - class JUnitStatus(enum.Enum): - SUCCESSFUL = 1 - ABORTED = 2 - FAILED = 3 - - ERRORS = { - "bad_javac": 'Java compilation error: "{}"', - "bad_java": 'Java runtime error: "{}"', - } - - def __init__(self, tester, result, feedback_open=None): - self.class_name, _sep, self.method_name = result["name"].partition(".") - self.description = result.get("description") - self.status = MarkusJavaTest.JUnitStatus[result["status"]] - self.message = result.get("message") - super().__init__(tester, feedback_open) - - @property - def test_name(self): - name = f"{self.class_name}.{self.method_name}" - if self.description: - name += f" ({self.description})" - return name - - @MarkusTest.run_decorator - def run(self): - if self.status == MarkusJavaTest.JUnitStatus.SUCCESSFUL: - return self.passed() - elif self.status == MarkusJavaTest.JUnitStatus.FAILED: - return self.failed(message=self.message) - else: - return self.error(message=self.message) - - -class MarkusJavaTester(MarkusTester): - - JAVA_TESTER_CLASS = "edu.toronto.cs.teach.MarkusJavaTester" - - def __init__(self, specs, test_class=MarkusJavaTest): - super().__init__(specs, test_class) - self.java_classpath = f'.:{self.specs["install_data", "path_to_tester_jars"]}/*' - - def compile(self): - javac_command = ["javac", "-cp", self.java_classpath] - javac_command.extend(self.specs["test_data", "script_files"]) - # student files imported by tests will be compiled on cascade - subprocess.run( - javac_command, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True, - check=True, - ) - - def run_junit(self): - java_command = [ - "java", - "-cp", - self.java_classpath, - MarkusJavaTester.JAVA_TESTER_CLASS, - ] - java_command.extend(self.specs["test_data", "script_files"]) - java = subprocess.run( - java_command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - check=True, - ) - return java - - @MarkusTester.run_decorator - def run(self): - # check that the submission compiles against the tests - try: - self.compile() - except subprocess.CalledProcessError as e: - msg = MarkusJavaTest.ERRORS["bad_javac"].format(e.stdout) - raise MarkusTestError(msg) from e - # run the tests with junit - try: - results = self.run_junit() - if results.stderr: - raise MarkusTestError(results.stderr) - except subprocess.CalledProcessError as e: - msg = MarkusJavaTest.ERRORS["bad_java"].format(e.stdout + e.stderr) - raise MarkusTestError(msg) from e - with self.open_feedback() as feedback_open: - for result in json.loads(results.stdout): - test = self.test_class(self, result, feedback_open) - result_json = test.run() - print(result_json, flush=True) diff --git a/src/autotester/testers/py/bin/install.sh b/src/autotester/testers/py/bin/install.sh index 17ace2f2..8beebb42 100755 --- a/src/autotester/testers/py/bin/install.sh +++ b/src/autotester/testers/py/bin/install.sh @@ -2,14 +2,9 @@ set -e -install_packages() { - echo "[PYTHON-INSTALL] Installing system packages" - sudo apt-get install python3 -} - # script starts here -if [[ $# -ne 0 ]]; then - echo "Usage: $0" +if [[ $# -gt 1 ]]; then + echo "Usage: $0 [--non-interactive]" exit 1 fi @@ -19,5 +14,4 @@ THISDIR=$(dirname "${THISSCRIPT}") SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main -install_packages touch "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/py/lib/c_helper.py b/src/autotester/testers/py/lib/c_helper.py index 08fe7be9..320d5dc7 100644 --- a/src/autotester/testers/py/lib/c_helper.py +++ b/src/autotester/testers/py/lib/c_helper.py @@ -47,9 +47,7 @@ def setUpClass(cls) -> None: Use make if cls.make is True, and gcc otherwise. """ if not cls.make and not cls.source_files: - raise ValueError( - "ERROR: TestExecutable subclasses must specify source_files or set make=True." - ) + raise ValueError("ERROR: TestExecutable subclasses must specify source_files or set make=True.") cls.compile_out = "" cls.compile_err = "" @@ -65,13 +63,9 @@ def setUpClass(cls) -> None: try: if cls.make: # Tuple (stdoutdata, stderrdata) is returned - cls.compile_out, cls.compile_err, _ = _make( - cls.make_targets, cls.make_args - ) + cls.compile_out, cls.compile_err, _ = _make(cls.make_targets, cls.make_args) else: - cls.compile_out, cls.compile_err, _ = _compile( - cls.source_files, cls.executable_name - ) + cls.compile_out, cls.compile_err, _ = _compile(cls.source_files, cls.executable_name) except subprocess.CalledProcessError: cls.compiled = False else: @@ -135,9 +129,7 @@ def simple_test( """ def _t(self: "TestExecutable") -> None: - stdout, stderr, returncode = self._run_exec( - args=args, input_=input_, timeout=timeout, check=check - ) + stdout, stderr, returncode = self._run_exec(args=args, input_=input_, timeout=timeout, check=check) nonlocal expected_stderr nonlocal expected_stdout @@ -192,9 +184,7 @@ def substr_test( """ def _t(self: "TestExecutable") -> None: - stdout, stderr, returncode = self._run_exec( - args=args, input_=input_, timeout=timeout, check=check - ) + stdout, stderr, returncode = self._run_exec(args=args, input_=input_, timeout=timeout, check=check) nonlocal expected_stderr nonlocal expected_stdout @@ -227,9 +217,7 @@ class TestTrace(TestExecutable): call_types = [] # The only call types to watch out for (see ltrace man page) @classmethod - def _check_trace( - cls, args: Optional[List[str]] = None, ltrace_flags=None, **kwargs - ): + def _check_trace(cls, args: Optional[List[str]] = None, ltrace_flags=None, **kwargs): if ltrace_flags is None: ltrace_flags = DEFAULT_LTRACE_FLAGS else: @@ -240,11 +228,7 @@ def _check_trace( "+".join(["__libc_start_main"] + cls.call_types), ] - return Trace( - [os.path.join(".", cls.executable_name)] + (args or []), - ltrace_flags, - **kwargs - ) + return Trace([os.path.join(".", cls.executable_name)] + (args or []), ltrace_flags, **kwargs) class Trace: @@ -275,9 +259,7 @@ class Trace: this can be confirmed examining the regex """ - def __init__( - self, command: List[str], ltrace_flags: Optional[List[str]] = None, **kwargs - ): + def __init__(self, command: List[str], ltrace_flags: Optional[List[str]] = None, **kwargs): ltrace_flags = ltrace_flags or [] try: _exec(["ltrace"] + ltrace_flags + command, **kwargs) @@ -426,16 +408,12 @@ def build_outputs(self, args=""): `arg`s is optionally a string containing the command-line arguments given to the executable. """ print(os.path.join(self.input_dir, "*." + self.input_extension)) - for file in glob.glob( - os.path.join(self.input_dir, "*." + self.input_extension) - ): + for file in glob.glob(os.path.join(self.input_dir, "*." + self.input_extension)): print(file) name = os.path.splitext(os.path.basename(file))[0] stdout_file = os.path.join(self.out_dir, name + "." + self.output_extension) stderr_file = os.path.join(self.out_dir, name + "." + self.error_extension) - cmd = "{} {} < {} > {} 2> {}".format( - self.executable_path, args, file, stdout_file, stderr_file - ) + cmd = "{} {} < {} > {} 2> {}".format(self.executable_path, args, file, stdout_file, stderr_file) print("Running:", cmd) try: _exec_shell([cmd]) @@ -444,9 +422,7 @@ def build_outputs(self, args=""): def clean(self): """Remove generated test files.""" - for file in glob.glob( - os.path.join(self.input_dir, "*." + self.input_extension) - ): + for file in glob.glob(os.path.join(self.input_dir, "*." + self.input_extension)): name = os.path.splitext(os.path.basename(file))[0] stdout_file = os.path.join(self.out_dir, name + "." + self.output_extension) stderr_file = os.path.join(self.out_dir, name + "." + self.error_extension) @@ -459,9 +435,7 @@ def populate_tests(self, test_klass, args=None): This must be called *after* build_outputs has been called. """ args = args or [] - for file in glob.glob( - os.path.join(self.input_dir, "*." + self.input_extension) - ): + for file in glob.glob(os.path.join(self.input_dir, "*." + self.input_extension)): name = os.path.splitext(os.path.basename(file))[0] stdout_file = os.path.join(self.out_dir, name + "." + self.output_extension) stderr_file = os.path.join(self.out_dir, name + "." + self.error_extension) @@ -473,12 +447,7 @@ def populate_tests(self, test_klass, args=None): setattr( test_klass, "test_" + name, - simple_test( - args, - expected_stdout=test_out, - expected_stderr=test_err, - input_=test_in, - ), + simple_test(args, expected_stdout=test_out, expected_stderr=test_err, input_=test_in,), ) diff --git a/src/autotester/testers/py/lib/sql_helper.py b/src/autotester/testers/py/lib/sql_helper.py index 55611b4d..6adc5d0c 100644 --- a/src/autotester/testers/py/lib/sql_helper.py +++ b/src/autotester/testers/py/lib/sql_helper.py @@ -15,7 +15,7 @@ def _in_autotest_env() -> bool: """ - Return true iff this script is being run by the MarkUs autotester. + Return true iff this script is being run by the autotester. This function can be used to check whether the AUTOTESTENV environment variable has been set to 'true'. @@ -27,7 +27,7 @@ def connection(*args, **kwargs): """ Return a psycopg2 connection object - If this function is called while being run by the MarkUs autotester, + If this function is called while being run by the autotester, any arguments passed to this function will be ignored and a connection will be made to the correct database in the autotester's run environment. @@ -106,7 +106,7 @@ def execute_psql_file( $ psql -f [] - If this function is called while being run by the MarkUs autotester, + If this function is called while being run by the autotester, the , and arguments function will be ignored and a connection will be made to the correct database in the autotester's run environment instead. @@ -142,9 +142,7 @@ def execute_psql_file( "PGDATABASE": database or os.environ.get("PGDATABASE"), } env = {**os.environ, **db_vars} - return subprocess.run( - ["psql", "-f", filename] + list(args), env=env, capture_output=True - ) + return subprocess.run(["psql", "-f", filename] + list(args), env=env, capture_output=True) class PSQLTest: @@ -169,7 +167,7 @@ def create_connection(cls, *args, **kwargs) -> None: """ Set an open connection to a database as a class attribute. - If this function is called while being run by the MarkUs autotester, + If this function is called while being run by the autotester, any arguments passed to this function will be ignored and a connection will be made to the correct database in the autotester's run environment. @@ -233,11 +231,7 @@ def schema(cls, schema: str, persist: bool = False) -> ContextManager: @classmethod def copy_schema( - cls, - to_schema: str, - tables: Optional[List[str]] = None, - from_schema: str = "public", - overwrite: bool = True, + cls, to_schema: str, tables: Optional[List[str]] = None, from_schema: str = "public", overwrite: bool = True, ) -> None: """ Copies tables from to . is @@ -256,16 +250,12 @@ def copy_schema( curr.execute("CREATE SCHEMA IF NOT EXISTS %s;", [AsIs(to_schema)]) for table in tables: if overwrite: - curr.execute( - "DROP TABLE IF EXISTS %s.%s;", [AsIs(to_schema), AsIs(table)] - ) + curr.execute("DROP TABLE IF EXISTS %s.%s;", [AsIs(to_schema), AsIs(table)]) strs = {**strings, "table": AsIs(table)} curr.execute(cls.SCHEMA_COPY_STR, strs) @classmethod - def execute_files( - cls, files: List[str], *args, cursor: Optional[CursorType] = None, **kwargs - ) -> None: + def execute_files(cls, files: List[str], *args, cursor: Optional[CursorType] = None, **kwargs) -> None: """ Execute each file in by passing the content of each to cursor.execute. diff --git a/src/autotester/testers/py/markus_python_tester.py b/src/autotester/testers/py/python_tester.py similarity index 56% rename from src/autotester/testers/py/markus_python_tester.py rename to src/autotester/testers/py/python_tester.py index 5c16d6ab..ec231f0c 100644 --- a/src/autotester/testers/py/markus_python_tester.py +++ b/src/autotester/testers/py/python_tester.py @@ -1,34 +1,45 @@ import os import unittest +from typing import TextIO, Tuple, Optional, Type, Dict, IO, List +from types import TracebackType import pytest import sys -from testers.markus_tester import MarkusTester, MarkusTest +from testers.specs import TestSpecs +from testers.tester import Tester, Test -class MarkusTextTestResults(unittest.TextTestResult): + +class TextTestResults(unittest.TextTestResult): """ Custom unittest.TextTestResult that saves results as a hash to self.results so they can be more easily - parsed by the MarkusPythonTest.run function + parsed by the PythonTest.run function """ - def __init__(self, stream, descriptions, verbosity): + def __init__(self, stream: TextIO, descriptions: bool, verbosity: int) -> None: + """ + Extends TextTestResult.__init__ and adds additional attributes to keep track + of successes and additional result information. + """ super().__init__(stream, descriptions, verbosity) self.results = [] self.successes = [] - def addSuccess(self, test): - self.results.append( - { - "status": "success", - "name": test.id(), - "errors": "", - "description": test._testMethodDoc, - } - ) + def addSuccess(self, test: unittest.TestCase) -> None: + """ + Record that a test passed. + """ + self.results.append({"status": "success", "name": test.id(), "errors": "", "description": test._testMethodDoc}) self.successes.append(test) - def addFailure(self, test, err): + def addFailure( + self, + test: unittest.TestCase, + err: Tuple[Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]], + ) -> None: + """ + Record that a test failed. + """ super().addFailure(test, err) self.results.append( { @@ -39,29 +50,44 @@ def addFailure(self, test, err): } ) - def addError(self, test, err): + def addError( + self, + test: unittest.TestCase, + err: Tuple[Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]], + ) -> None: + """ + Record that a test failed with an error. + """ super().addError(test, err) self.results.append( - { - "status": "error", - "name": test.id(), - "errors": self.errors[-1][-1], - "description": test._testMethodDoc, - } + {"status": "error", "name": test.id(), "errors": self.errors[-1][-1], "description": test._testMethodDoc} ) -class MarkusPytestPlugin: +class PytestPlugin: """ Pytest plugin to collect and parse test results as well as any errors during the test collection process. """ - def __init__(self): + def __init__(self) -> None: + """ + Initialize a pytest plugin for collecting results + """ self.results = {} @pytest.hookimpl(hookwrapper=True, tryfirst=True) def pytest_runtest_makereport(self, item, call): + """ + Implement a pytest hook that is run when reporting the + results of a given test item. + + Records the results of each test in the self.results + attribute. + + See pytest documentation for a description of the parameter + types and the return value. + """ outcome = yield rep = outcome.get_result() if rep.failed or item.nodeid not in self.results: @@ -74,6 +100,16 @@ def pytest_runtest_makereport(self, item, call): return rep def pytest_collectreport(self, report): + """ + Implement a pytest hook that is run after the collector has + finished collecting all tests. + + Records a test failure in the self.results attribute if the + collection step failed. + + See pytest documentation for a description of the parameter + types and the return value. + """ if report.failed: self.results[report.nodeid] = { "status": "error", @@ -83,8 +119,16 @@ def pytest_collectreport(self, report): } -class MarkusPythonTest(MarkusTest): - def __init__(self, tester, test_file, result, feedback_open=None): +class PythonTest(Test): + def __init__( + self, tester: "PythonTester", test_file: str, result: Dict, feedback_open: Optional[IO] = None, + ): + """ + Initialize a Python test created by tester. + + The result was created after running some unittest or pytest tests. + Test feedback will be written to feedback_open. + """ self._test_name = result["name"] self._file_name = test_file self.description = result.get("description") @@ -93,13 +137,17 @@ def __init__(self, tester, test_file, result, feedback_open=None): super().__init__(tester, feedback_open) @property - def test_name(self): + def test_name(self) -> str: + """ The name of this test """ if self.description: return f"{self._test_name} ({self.description})" return self._test_name - @MarkusTest.run_decorator - def run(self): + @Test.run_decorator + def run(self) -> str: + """ + Return a json string containing all test result information. + """ if self.status == "success": return self.passed(message=self.message) elif self.status == "failure": @@ -108,12 +156,19 @@ def run(self): return self.error(message=self.message) -class MarkusPythonTester(MarkusTester): - def __init__(self, specs, test_class=MarkusPythonTest): +class PythonTester(Tester): + def __init__( + self, specs: TestSpecs, test_class: Type[PythonTest] = PythonTest, + ): + """ + Initialize a python tester using the specifications in specs. + + This tester will create tests of type test_class. + """ super().__init__(specs, test_class) @staticmethod - def _load_unittest_tests(test_file): + def _load_unittest_tests(test_file: str) -> unittest.TestSuite: """ Discover unittest tests in test_file and return a unittest.TestSuite that contains these tests @@ -123,7 +178,7 @@ def _load_unittest_tests(test_file): discovered_tests = test_loader.discover(test_file_dir, test_file) return unittest.TestSuite(discovered_tests) - def _run_unittest_tests(self, test_file): + def _run_unittest_tests(self, test_file: str) -> List[Dict]: """ Run unittest tests in test_file and return the results of these tests @@ -131,14 +186,12 @@ def _run_unittest_tests(self, test_file): test_suite = self._load_unittest_tests(test_file) with open(os.devnull, "w") as nullstream: test_runner = unittest.TextTestRunner( - verbosity=self.specs["test_data", "output_verbosity"], - stream=nullstream, - resultclass=MarkusTextTestResults, + verbosity=self.specs["test_data", "output_verbosity"], stream=nullstream, resultclass=TextTestResults, ) test_result = test_runner.run(test_suite) return test_result.results - def _run_pytest_tests(self, test_file): + def _run_pytest_tests(self, test_file: str) -> List[Dict]: """ Run unittest tests in test_file and return the results of these tests @@ -148,14 +201,14 @@ def _run_pytest_tests(self, test_file): try: sys.stdout = null_out verbosity = self.specs["test_data", "output_verbosity"] - plugin = MarkusPytestPlugin() + plugin = PytestPlugin() pytest.main([test_file, f"--tb={verbosity}"], plugins=[plugin]) results.extend(plugin.results.values()) finally: sys.stdout = sys.__stdout__ return results - def run_python_tests(self): + def run_python_tests(self) -> Dict[str, List[Dict]]: """ Return a dict mapping each filename to its results """ @@ -168,8 +221,11 @@ def run_python_tests(self): results[test_file] = result return results - @MarkusTester.run_decorator - def run(self): + @Tester.run_decorator + def run(self) -> None: + """ + Runs all tests in this tester. + """ results = self.run_python_tests() with self.open_feedback() as feedback_open: for test_file, result in results.items(): diff --git a/src/autotester/testers/pyta/bin/install.sh b/src/autotester/testers/pyta/bin/install.sh index 41a1b141..8beebb42 100755 --- a/src/autotester/testers/pyta/bin/install.sh +++ b/src/autotester/testers/pyta/bin/install.sh @@ -2,14 +2,9 @@ set -e -install_packages() { - echo "[PYTA-INSTALL] Installing system packages" - sudo apt-get install python3 -} - # script starts here -if [[ $# -ne 0 ]]; then - echo "Usage: $0" +if [[ $# -gt 1 ]]; then + echo "Usage: $0 [--non-interactive]" exit 1 fi @@ -19,5 +14,4 @@ THISDIR=$(dirname "${THISSCRIPT}") SPECSDIR=$(readlink -f "${THISDIR}/../specs") # main -install_packages touch "${SPECSDIR}/.installed" diff --git a/src/autotester/testers/pyta/bin/requirements.txt b/src/autotester/testers/pyta/bin/requirements.txt index 5659fd7c..8c452108 100644 --- a/src/autotester/testers/pyta/bin/requirements.txt +++ b/src/autotester/testers/pyta/bin/requirements.txt @@ -1,2 +1,3 @@ python-ta==1.4.2;python_version<"3.8" python-ta>=1.5.0;python_version>="3.8" +isort<5;python_version<"3.8" diff --git a/src/autotester/testers/pyta/markus_pyta_tester.py b/src/autotester/testers/pyta/pyta_tester.py similarity index 56% rename from src/autotester/testers/pyta/markus_pyta_tester.py rename to src/autotester/testers/pyta/pyta_tester.py index a3155f88..dd6b64bd 100644 --- a/src/autotester/testers/pyta/markus_pyta_tester.py +++ b/src/autotester/testers/pyta/pyta_tester.py @@ -1,50 +1,64 @@ import os import sys import json -from collections import defaultdict +from typing import Optional, IO, Type, Dict import python_ta from pylint.config import VALIDATORS from python_ta.reporters import PositionReporter, PlainReporter +from testers.specs import TestSpecs -from testers.markus_tester import MarkusTester, MarkusTest +from testers.tester import Tester, Test -class MarkusPyTAReporter(PositionReporter): - def __init__(self, *args, **kwargs): - super().__init__(self, *args, **kwargs) - self._sorted_error_messages = defaultdict(list) - +class PyTAReporter(PositionReporter): def print_messages(self, level="all"): - # print to feedback file, then reset and generate data for annotations + """ + Print error and warning messages to a feedback file + """ PlainReporter.print_messages(self, level) + self._sorted_error_messages.clear() + self._sorted_style_messages.clear() super().print_messages(level) - def output_blob(self): + def output_blob(self) -> None: + """ + Override this method so that the default json string report + doesn't get written to stdout. + """ pass -class MarkusPyTATest(MarkusTest): +class PyTATest(Test): ERROR_MSGS = {"reported": "{} error(s)"} - def __init__(self, tester, student_file_path, max_points, feedback_open=None): + def __init__( + self, tester: "PyTATester", student_file_path: str, max_points: int, feedback_open: Optional[IO] = None, + ) -> None: + """ + Initialize a Python TA test that checks the student_file_path file, + removes 1 point per error from a possible max_points total, and + writes results to feedback_open. + """ self.student_file = student_file_path super().__init__(tester, feedback_open) self.points_total = max_points self.annotations = [] @property - def test_name(self): + def test_name(self) -> str: + """ The name of this test """ return f"PyTA {self.student_file}" - def add_annotations(self, reporter): + def add_annotations(self, reporter: PyTAReporter) -> None: + """ + Records annotations from the results extracted from reporter. + """ for result in reporter._output["results"]: if "filename" not in result: continue - for msg_group in result.get("msg_errors", []) + result.get( - "msg_styles", [] - ): + for msg_group in result.get("msg_errors", []) + result.get("msg_styles", []): for msg in msg_group["occurrences"]: self.annotations.append( { @@ -58,22 +72,20 @@ def add_annotations(self, reporter): } ) - def after_successful_test_run(self): + def after_successful_test_run(self) -> None: + """ Record all the annotations from this test in the tester object """ self.tester.annotations.extend(self.annotations) - @MarkusTest.run_decorator - def run(self): + @Test.run_decorator + def run(self) -> str: + """ + Return a json string containing all test result information. + """ try: # run PyTA and collect annotations - sys.stdout = ( - self.feedback_open - if self.feedback_open is not None - else self.tester.devnull - ) + sys.stdout = self.feedback_open if self.feedback_open is not None else self.tester.devnull sys.stderr = self.tester.devnull - reporter = python_ta.check_all( - self.student_file, config=self.tester.pyta_config - ) + reporter = python_ta.check_all(self.student_file, config=self.tester.pyta_config) if reporter.current_file_linted is None: # No files were checked. The mark is set to 0. num_messages = 0 @@ -83,11 +95,7 @@ def run(self): # deduct 1 point per message occurrence (not type) num_messages = len(self.annotations) points_earned = max(0, self.points_total - num_messages) - message = ( - self.ERROR_MSGS["reported"].format(num_messages) - if num_messages > 0 - else "" - ) + message = self.ERROR_MSGS["reported"].format(num_messages) if num_messages > 0 else "" return self.done(points_earned, message) except Exception as e: self.annotations = [] @@ -97,17 +105,30 @@ def run(self): sys.stdout = sys.__stdout__ -class MarkusPyTATester(MarkusTester): - def __init__(self, specs, test_class=MarkusPyTATest): +class PyTATester(Tester): + def __init__(self, specs: TestSpecs, test_class: Type[PyTATest] = PyTATest): + """ + Initialize a Python TA tester using the specifications in specs. + + This tester will create tests of type test_class. + """ super().__init__(specs, test_class) self.feedback_file = self.specs.get("test_data", "feedback_file_name") self.annotation_file = self.specs.get("test_data", "annotation_file") self.pyta_config = self.update_pyta_config() self.annotations = [] self.devnull = open(os.devnull, "w") - VALIDATORS[MarkusPyTAReporter.__name__] = MarkusPyTAReporter + VALIDATORS[PyTAReporter.__name__] = PyTAReporter + + def update_pyta_config(self) -> Dict: + """ + Return a dictionary containing the configuration options for + this tester. - def update_pyta_config(self): + This dictionary is updated to set the pyta-reporter option to + PyTAReporter and the pyta-output-file to this tester's + feedback file. + """ config_file = self.specs.get("test_data", "config_file_name") if config_file: with open(config_file) as f: @@ -115,26 +136,31 @@ def update_pyta_config(self): else: config_dict = {} - config_dict["pyta-reporter"] = "MarkusPyTAReporter" + config_dict["pyta-reporter"] = "PyTAReporter" if self.feedback_file: config_dict["pyta-output-file"] = self.feedback_file return config_dict - def after_tester_run(self): + def after_tester_run(self) -> None: + """ + Write annotations extracted from the test results to the + annotation_file. + """ if self.annotation_file and self.annotations: with open(self.annotation_file, "w") as annotations_open: json.dump(self.annotations, annotations_open) if self.devnull: self.devnull.close() - @MarkusTester.run_decorator - def run(self): + @Tester.run_decorator + def run(self) -> None: + """ + Runs all tests in this tester. + """ with self.open_feedback(self.feedback_file) as feedback_open: for test_data in self.specs.get("test_data", "student_files", default=[]): student_file_path = test_data["file_path"] max_points = test_data.get("max_points", 10) - test = self.test_class( - self, student_file_path, max_points, feedback_open - ) + test = self.test_class(self, student_file_path, max_points, feedback_open) print(test.run()) diff --git a/src/autotester/testers/racket/bin/install.sh b/src/autotester/testers/racket/bin/install.sh index e21a6016..2b7aa017 100755 --- a/src/autotester/testers/racket/bin/install.sh +++ b/src/autotester/testers/racket/bin/install.sh @@ -4,12 +4,20 @@ set -e install_packages() { echo "[RACKET-INSTALL] Installing system packages" - sudo apt-get install racket python3 + local debian_frontend + local apt_opts + local apt_yes + if [ -n "${NON_INTERACTIVE}" ]; then + debian_frontend=noninteractive + apt_opts=(-o 'Dpkg::Options::=--force-confdef' -o 'Dpkg::Options::=--force-confold') + apt_yes='-y' + fi + sudo DEBIAN_FRONTEND=${debian_frontend} apt-get ${apt_yes} "${apt_opts[@]}" install racket } # script starts here -if [[ $# -ne 0 ]]; then - echo "Usage: $0" +if [[ $# -gt 1 ]]; then + echo "Usage: $0 [--non-interactive]" exit 1 fi @@ -17,6 +25,7 @@ fi THISSCRIPT=$(readlink -f "${BASH_SOURCE[0]}") THISDIR=$(dirname "${THISSCRIPT}") SPECSDIR=$(readlink -f "${THISDIR}/../specs") +NON_INTERACTIVE=$1 # main install_packages diff --git a/src/autotester/testers/racket/lib/markus.rkt b/src/autotester/testers/racket/lib/autotester.rkt similarity index 73% rename from src/autotester/testers/racket/lib/markus.rkt rename to src/autotester/testers/racket/lib/autotester.rkt index d8482914..cd4c7206 100755 --- a/src/autotester/testers/racket/lib/markus.rkt +++ b/src/autotester/testers/racket/lib/autotester.rkt @@ -17,43 +17,43 @@ ...)) ; struct containing the required values from running a single test -(struct markus-result (name status message)) +(struct autotester-result (name status message)) -; convert a markus-result struct to hash -(define (markus-result->hash r) - (hasheq 'name (markus-result-name r) - 'status (markus-result-status r) - 'message (markus-result-message r))) +; convert a autotester-result struct to hash +(define (autotester-result->hash r) + (hasheq 'name (autotester-result-name r) + 'status (autotester-result-status r) + 'message (autotester-result-message r))) ; convert test result info to hash (define (check-infos->hash stack) (make-immutable-hash (map (lambda (ci) (cons (check-info-name ci) (check-info-value ci))) stack))) -; create result hash from a successful markus test +; create result hash from a successful test (define (make-success test-case-name result) - (markus-result->hash - (markus-result test-case-name "pass" ""))) + (autotester-result->hash + (autotester-result test-case-name "pass" ""))) -; create result hash from a failed markus test +; create result hash from a failed autotester test (define (make-failure test-case-name result) (let* ( [failure-data (if (exn:test:check? result) (check-infos->hash (exn:test:check-stack result)) (hash))] [message (hash-ref failure-data 'message "")]) - (markus-result->hash - (markus-result + (autotester-result->hash + (autotester-result test-case-name "fail" (format "~s" message))))) -; create result hash from a markus test that caused an error +; create result hash from a test that caused an error (define (make-error test-case-name result) - (markus-result->hash - (markus-result test-case-name "error" (format "~s" (exn-message result))))) + (autotester-result->hash + (autotester-result test-case-name "error" (format "~s" (exn-message result))))) -; create result hash depending for a markus test depending on whether it was a +; create result hash depending for a test depending on whether it was a ; success, failure, or caused an error (see above) (define (show-test-result result) (match result @@ -62,7 +62,7 @@ [(test-error test-case-name result) (make-error test-case-name result)])) ; define a custom error type (currently not used) -(define (raise-markus-error message [error-type 'markus-error]) +(define (raise-autotester-error message [error-type 'autotester-error]) (raise (error error-type message))) ; main module: parses command line arguments, runs tests specified by these arguments diff --git a/src/autotester/testers/racket/markus_racket_tester.py b/src/autotester/testers/racket/markus_racket_tester.py deleted file mode 100644 index 91601b54..00000000 --- a/src/autotester/testers/racket/markus_racket_tester.py +++ /dev/null @@ -1,76 +0,0 @@ -import json -import subprocess -import os - -from testers.markus_tester import MarkusTester, MarkusTest, MarkusTestError - - -class MarkusRacketTest(MarkusTest): - def __init__(self, tester, feedback_open, result): - self._test_name = result["name"] - self.status = result["status"] - self.message = result["message"] - super().__init__(tester, feedback_open) - - @property - def test_name(self): - return self._test_name - - @MarkusTest.run_decorator - def run(self): - if self.status == "pass": - return self.passed() - elif self.status == "fail": - return self.failed(message=self.message) - else: - return self.error(message=self.message) - - -class MarkusRacketTester(MarkusTester): - - ERROR_MSGS = {"bad_json": "Unable to parse test results: {}"} - - def __init__(self, specs, test_class=MarkusRacketTest): - super().__init__(specs, test_class) - - def run_racket_test(self): - """ - Return the subprocess.CompletedProcess object for each test file run using the - markus.rkt tester. - """ - results = {} - markus_rkt = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "lib", "markus.rkt" - ) - for group in self.specs["test_data", "script_files"]: - test_file = group.get("script_file") - if test_file: - suite_name = group.get("test_suite_name", "all-tests") - cmd = [markus_rkt, "--test-suite", suite_name, test_file] - rkt = subprocess.run( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - check=True, - ) - results[test_file] = rkt.stdout - return results - - @MarkusTester.run_decorator - def run(self): - try: - results = self.run_racket_test() - except subprocess.CalledProcessError as e: - raise MarkusTestError(e.stderr) from e - with self.open_feedback() as feedback_open: - for test_file, result in results.items(): - if result.strip(): - try: - test_results = json.loads(result) - except json.JSONDecodeError as e: - msg = MarkusRacketTester.ERROR_MSGS["bad_json"].format(result) - raise MarkusTestError(msg) from e - for t_result in test_results: - test = self.test_class(self, feedback_open, t_result) - print(test.run(), flush=True) diff --git a/src/autotester/testers/racket/racket_tester.py b/src/autotester/testers/racket/racket_tester.py new file mode 100644 index 00000000..d0278e1c --- /dev/null +++ b/src/autotester/testers/racket/racket_tester.py @@ -0,0 +1,88 @@ +import json +import subprocess +import os +from typing import Dict, Optional, IO, Type + +from testers.tester import Tester, Test, TestError + + +class RacketTest(Test): + def __init__(self, tester: "RacketTester", result: Dict, feedback_open: Optional[IO] = None,) -> None: + """ + Initialize a racket test created by tester. + + The result was created after running the tests in test_file and test feedback + will be written to feedback_open. + """ + self._test_name = result["name"] + self.status = result["status"] + self.message = result["message"] + super().__init__(tester, feedback_open) + + @property + def test_name(self) -> None: + """ The name of this test """ + return self._test_name + + @Test.run_decorator + def run(self) -> str: + """ + Return a json string containing all test result information. + """ + if self.status == "pass": + return self.passed() + elif self.status == "fail": + return self.failed(message=self.message) + else: + return self.error(message=self.message) + + +class RacketTester(Tester): + + ERROR_MSGS = {"bad_json": "Unable to parse test results: {}"} + + def __init__(self, specs, test_class: Type[RacketTest] = RacketTest) -> None: + """ + Initialize a racket tester using the specifications in specs. + + This tester will create tests of type test_class. + """ + super().__init__(specs, test_class) + + def run_racket_test(self) -> Dict[str, str]: + """ + Return the stdout captured from running each test script file with autotester.rkt tester. + """ + results = {} + autotester_rkt = os.path.join(os.path.dirname(os.path.realpath(__file__)), "lib", "autotester.rkt") + for group in self.specs["test_data", "script_files"]: + test_file = group.get("script_file") + if test_file: + suite_name = group.get("test_suite_name", "all-tests") + cmd = [autotester_rkt, "--test-suite", suite_name, test_file] + rkt = subprocess.run( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, check=True, + ) + results[test_file] = rkt.stdout + return results + + @Tester.run_decorator + def run(self) -> None: + """ + Runs all tests in this tester. + """ + try: + results = self.run_racket_test() + except subprocess.CalledProcessError as e: + raise TestError(e.stderr) from e + with self.open_feedback() as feedback_open: + for test_file, result in results.items(): + if result.strip(): + try: + test_results = json.loads(result) + except json.JSONDecodeError as e: + msg = RacketTester.ERROR_MSGS["bad_json"].format(result) + raise TestError(msg) from e + for t_result in test_results: + test = self.test_class(self, t_result, feedback_open) + print(test.run(), flush=True) diff --git a/src/autotester/testers/markus_test_specs.py b/src/autotester/testers/specs.py similarity index 66% rename from src/autotester/testers/markus_test_specs.py rename to src/autotester/testers/specs.py index dcddca78..0ed0c591 100644 --- a/src/autotester/testers/markus_test_specs.py +++ b/src/autotester/testers/specs.py @@ -1,16 +1,19 @@ import json from collections.abc import Mapping +from typing import Any, Union, Tuple, Iterable, Optional -class MarkusTestSpecs(Mapping): - def __init__(self, *args, **kwargs): +class TestSpecs(Mapping): + def __init__(self, *args: Any, **kwargs: Any) -> None: + """ Initialize a TestSpecs instance """ self._specs = dict(*args, **kwargs) @classmethod - def from_json(cls, json_str): + def from_json(cls, json_str: str) -> "TestSpecs": + """ Return a TestSpecs instance created from a json string """ return cls(json.loads(json_str)) - def __getitem__(self, key): + def __getitem__(self, key: Union[str, Tuple]) -> Any: """ Behaves like a regular dict.__getitem__ except if the key is not found and the key is a tuple @@ -18,11 +21,11 @@ def __getitem__(self, key): using each element in the tuple as a key in the next nested dictionary - >>> my_dict = {'a':{'b':{'c':123}}} + >>> my_dict = TestSpecs({'a':{'b':{'c': 123}}}) >>> my_dict['a','b','c'] 123 >>> my_dict['a','b'] - {'c':123} + {'c': 123} """ try: return self._specs[key] @@ -34,13 +37,15 @@ def __getitem__(self, key): return d raise - def __iter__(self): + def __iter__(self) -> Iterable: + """ Return an iterator over self._specs """ return iter(self._specs) - def __len__(self): + def __len__(self) -> int: + """ Return the length of self._specs """ return len(self._specs) - def get(self, *keys, default=None): + def get(self, *keys: str, default: Optional[Any] = None) -> Any: """ Behaves like a regular dict.get except if there are multiple keys it will use __getitem__ to diff --git a/src/autotester/testers/markus_tester.py b/src/autotester/testers/tester.py similarity index 71% rename from src/autotester/testers/markus_tester.py rename to src/autotester/testers/tester.py index d9d61cbe..6004afed 100644 --- a/src/autotester/testers/markus_tester.py +++ b/src/autotester/testers/tester.py @@ -1,25 +1,27 @@ from contextlib import contextmanager -import enum import json from abc import ABC, abstractmethod from functools import wraps +from typing import Optional, IO, Callable, Any, Type, Generator +from testers.specs import TestSpecs import traceback -class MarkusTestError(Exception): - pass +class TestError(Exception): + """ Error raised when a test error occurs """ -class MarkusTest(ABC): - class Status(enum.Enum): - PASS = "pass" - PARTIAL = "partial" - FAIL = "fail" - ERROR = "error" - ERROR_ALL = "error_all" +class Test(ABC): + class Status: + PASS: str = "pass" + PARTIAL: str = "partial" + FAIL: str = "fail" + ERROR: str = "error" + ERROR_ALL: str = "error_all" @abstractmethod - def __init__(self, tester, feedback_open=None): + def __init__(self, tester: "Tester", feedback_open: Optional[IO] = None) -> None: + """ Initialize a Test """ self.tester = tester self.points_total = self.get_total_points() if self.points_total <= 0: @@ -28,23 +30,24 @@ def __init__(self, tester, feedback_open=None): @property @abstractmethod - def test_name(self): + def test_name(self) -> str: """ Returns a unique name for the test. """ pass - def get_total_points(self): + def get_total_points(self) -> int: + """ Return the total possible points for this test """ return self.tester.specs.get("points", default={}).get(self.test_name, 1) @staticmethod def format_result( - test_name, status, output, points_earned, points_total, time=None - ): + test_name: str, status: str, output: str, points_earned: int, points_total: int, time: Optional[int] = None, + ) -> str: """ - Formats a test result as expected by Markus. + Formats a test result. :param test_name: The test name - :param status: A member of MarkusTest.Status. + :param status: A member of Test.Status. :param output: The test output. :param points_earned: The points earned by the test, must be a float >= 0 (can be greater than the test total points when assigning bonus points). @@ -66,31 +69,33 @@ def format_result( "output": output, "marks_earned": points_earned, "marks_total": points_total, - "status": status.value, + "status": status, "time": time, } ) return result_json - def format(self, status, output, points_earned): + def format(self, status: str, output: str, points_earned: int) -> str: """ - Formats the result of this test as expected by Markus. - :param status: A member of MarkusTest.Status. + Formats the result of this test. + :param status: A member of Test.Status. :param output: The test output. :param points_earned: The points earned by the test, must be a float >= 0 (can be greater than the test total points when assigning bonus points). :return The formatted test result. """ - return MarkusTest.format_result( - self.test_name, status, output, points_earned, self.points_total - ) + return Test.format_result(self.test_name, status, output, points_earned, self.points_total) def add_feedback( - self, status, feedback="", oracle_solution=None, test_solution=None - ): + self, + status: str, + feedback: str = "", + oracle_solution: Optional[str] = None, + test_solution: Optional[str] = None, + ) -> None: """ Adds the feedback of this test to the feedback file. - :param status: A member of MarkusTest.Status. + :param status: A member of Test.Status. :param feedback: The feedback, can be None. :param oracle_solution: The expected solution, can be None. :param test_solution: The test solution, can be None. @@ -98,11 +103,7 @@ def add_feedback( # TODO Reconcile with format: return both, or print both if self.feedback_open is None: raise ValueError("No feedback file enabled") - self.feedback_open.write( - "========== {}: {} ==========\n\n".format( - self.test_name, status.value.upper() - ) - ) + self.feedback_open.write("========== {}: {} ==========\n\n".format(self.test_name, status.upper())) if feedback: self.feedback_open.write("## Feedback: {}\n\n".format(feedback)) if status != self.Status.PASS: @@ -114,44 +115,42 @@ def add_feedback( self.feedback_open.write(test_solution) self.feedback_open.write("\n") - def passed_with_bonus(self, points_bonus, message=""): + def passed_with_bonus(self, points_bonus: int, message: str = "") -> str: """ Passes this test earning bonus points in addition to the test total points. If a feedback file is enabled, adds feedback to it. - :param points_bonus: The bonus points, must be a float >= 0. + :param points_bonus: The bonus points, must be an int >= 0. :param message: An optional message, will be shown as test output. :return The formatted passed test. """ if points_bonus < 0: raise ValueError("The test bonus points must be >= 0") - result = self.format( - status=self.Status.PASS, - output=message, - points_earned=self.points_total + points_bonus, - ) + result = self.format(status=self.Status.PASS, output=message, points_earned=self.points_total + points_bonus,) if self.feedback_open: self.add_feedback(status=self.Status.PASS) return result - def passed(self, message=""): + def passed(self, message: str = "") -> str: """ Passes this test earning the test total points. If a feedback file is enabled, adds feedback to it. :param message: An optional message, will be shown as test output. :return The formatted passed test. """ - result = self.format( - status=self.Status.PASS, output=message, points_earned=self.points_total - ) + result = self.format(status=self.Status.PASS, output=message, points_earned=self.points_total) if self.feedback_open: self.add_feedback(status=self.Status.PASS) return result def partially_passed( - self, points_earned, message, oracle_solution=None, test_solution=None - ): + self, + points_earned: int, + message: str, + oracle_solution: Optional[str] = None, + test_solution: Optional[str] = None, + ) -> str: """ Partially passes this test with some points earned. If a feedback file is enabled, adds feedback to it. - :param points_earned: The points earned by the test, must be a float > 0 and < the test total points. + :param points_earned: The points earned by the test, must be an int > 0 and < the test total points. :param message: The message explaining why the test was not fully passed, will be shown as test output. :param oracle_solution: The optional correct solution to be added to the feedback file. :param test_solution: The optional student solution to be added to the feedback file. @@ -161,9 +160,7 @@ def partially_passed( raise ValueError("The test points earned must be > 0") if points_earned >= self.points_total: raise ValueError("The test points earned must be < the test total points") - result = self.format( - status=self.Status.PARTIAL, output=message, points_earned=points_earned - ) + result = self.format(status=self.Status.PARTIAL, output=message, points_earned=points_earned) if self.feedback_open: self.add_feedback( status=self.Status.PARTIAL, @@ -173,7 +170,7 @@ def partially_passed( ) return result - def failed(self, message, oracle_solution=None, test_solution=None): + def failed(self, message: str, oracle_solution: Optional[str] = None, test_solution: Optional[str] = None,) -> str: """ Fails this test with 0 points earned. If a feedback file is enabled, adds feedback to it. :param message: The failure message, will be shown as test output. @@ -184,14 +181,17 @@ def failed(self, message, oracle_solution=None, test_solution=None): result = self.format(status=self.Status.FAIL, output=message, points_earned=0) if self.feedback_open: self.add_feedback( - status=self.Status.FAIL, - feedback=message, - oracle_solution=oracle_solution, - test_solution=test_solution, + status=self.Status.FAIL, feedback=message, oracle_solution=oracle_solution, test_solution=test_solution, ) return result - def done(self, points_earned, message="", oracle_solution=None, test_solution=None): + def done( + self, + points_earned: int, + message: str = "", + oracle_solution: Optional[str] = None, + test_solution: Optional[str] = None, + ) -> str: """ Passes, partially passes or fails this test depending on the points earned. If the points are <= 0 this test is failed with 0 points earned, if the points are >= test total points this test is passed earning the test total @@ -211,11 +211,9 @@ def done(self, points_earned, message="", oracle_solution=None, test_solution=No points_bonus = points_earned - self.points_total return self.passed_with_bonus(points_bonus, message) else: - return self.partially_passed( - points_earned, message, oracle_solution, test_solution - ) + return self.partially_passed(points_earned, message, oracle_solution, test_solution) - def error(self, message): + def error(self, message: str) -> str: """ Err this test. If a feedback file is enabled, adds feedback to it. :param message: The error message, will be shown as test output. @@ -226,38 +224,36 @@ def error(self, message): self.add_feedback(status=self.Status.ERROR, feedback=message) return result - def before_test_run(self): + def before_test_run(self) -> None: """ Callback invoked before running a test. Use this for test initialization steps that can fail, rather than using test_class.__init__(). """ - pass - def after_successful_test_run(self): + def after_successful_test_run(self) -> None: """ Callback invoked after successfully running a test. Use this to access test data in the tester. Don't use this for test cleanup steps, use test_class.run() instead. """ - pass @staticmethod - def run_decorator(run_func): + def run_decorator(run_func: Callable) -> Callable: """ Wrapper around a test.run method. Used to print error messages - in the correct json format. If it catches a MarkusTestError then + in the correct json format. If it catches a TestError then only the error message is sent in the description, otherwise the whole traceback is sent. """ @wraps(run_func) - def run_func_wrapper(self, *args, **kwargs): + def run_func_wrapper(self, *args: Any, **kwargs: Any) -> str: try: # if a test __init__ fails it should really stop the whole tester, we don't have enough # info to continue safely, e.g. the total points (which skews the student mark) self.before_test_run() result_json = run_func(self, *args, **kwargs) self.after_successful_test_run() - except MarkusTestError as e: + except TestError as e: result_json = self.error(message=str(e)) except Exception as e: result_json = self.error(message=f"{traceback.format_exc()}\n{e}") @@ -266,22 +262,21 @@ def run_func_wrapper(self, *args, **kwargs): return run_func_wrapper @abstractmethod - def run(self): + def run(self) -> None: """ Runs this test. :return The formatted test. """ - pass -class MarkusTester(ABC): +class Tester(ABC): @abstractmethod - def __init__(self, specs, test_class=MarkusTest): + def __init__(self, specs: TestSpecs, test_class: Optional[Type[Test]] = Test,) -> None: self.specs = specs self.test_class = test_class @staticmethod - def error_all(message, points_total=0, expected=False): + def error_all(message: str, points_total: int = 0, expected: bool = False) -> str: """ Err all tests of this tester with a single message. :param message: The error message. @@ -289,49 +284,42 @@ def error_all(message, points_total=0, expected=False): :param expected: Indicates whether this reports an expected or an unexpected tester error. :return The formatted erred tests. """ - status = MarkusTest.Status.ERROR if expected else MarkusTest.Status.ERROR_ALL - return MarkusTest.format_result( - test_name="All tests", - status=status, - output=message, - points_earned=0, - points_total=points_total, + status = Test.Status.ERROR if expected else Test.Status.ERROR_ALL + return Test.format_result( + test_name="All tests", status=status, output=message, points_earned=0, points_total=points_total, ) - def before_tester_run(self): + def before_tester_run(self) -> None: """ Callback invoked before running this tester. Use this for tester initialization steps that can fail, rather than using __init__. """ - pass - def after_tester_run(self): + def after_tester_run(self) -> None: """ Callback invoked after running this tester, including in case of exceptions. Use this for tester cleanup steps that should always be executed, regardless of errors. """ - pass @staticmethod - def run_decorator(run_func): + def run_decorator(run_func: Callable) -> Callable: """ Wrapper around a tester.run method. Used to print error messages - in the correct json format. If it catches a MarkusTestError then + in the correct json format. If it catches a TestError then only the error message is sent in the description, otherwise the whole traceback is sent. """ @wraps(run_func) - def run_func_wrapper(self, *args, **kwargs): + def run_func_wrapper(self, *args: Any, **kwargs: Any) -> None: try: self.before_tester_run() return run_func(self, *args, **kwargs) - except MarkusTestError as e: - print(MarkusTester.error_all(message=str(e), expected=True), flush=True) + except TestError as e: + print(Tester.error_all(message=str(e), expected=True), flush=True) except Exception as e: print( - MarkusTester.error_all(message=f"{traceback.format_exc()}\n{e}"), - flush=True, + Tester.error_all(message=f"{traceback.format_exc()}\n{e}"), flush=True, ) finally: self.after_tester_run() @@ -339,7 +327,7 @@ def run_func_wrapper(self, *args, **kwargs): return run_func_wrapper @contextmanager - def open_feedback(self, filename=None, mode="w"): + def open_feedback(self, filename: Optional[str] = None, mode: str = "w") -> Generator[Optional[IO], None, None]: """ Yields an open file object, opened in mode if it exists, otherwise it yields None. @@ -359,5 +347,7 @@ def open_feedback(self, filename=None, mode="w"): yield None @abstractmethod - def run(self): - pass + def run(self) -> None: + """ + Runs all tests in this tester. + """ diff --git a/src/autotester/tests/cli_test.py b/src/autotester/tests/cli_test.py index 74e12a8e..c4b98a45 100644 --- a/src/autotester/tests/cli_test.py +++ b/src/autotester/tests/cli_test.py @@ -2,29 +2,20 @@ import json import re import pytest -import inspect -import tempfile import glob -from unittest.mock import patch, ANY, Mock -from contextlib import contextmanager -from fakeredis import FakeStrictRedis +import tempfile +from unittest.mock import patch, ANY from rq.exceptions import NoSuchJobError from autotester import cli - - -@pytest.fixture(autouse=True) -def redis(): - fake_redis = FakeStrictRedis() - with patch("autotester.cli.redis_connection", return_value=fake_redis): - with patch( - "autotester.server.utils.redis_management.redis_connection", - return_value=fake_redis, - ): - yield fake_redis +from contextlib import contextmanager @contextmanager def tmp_script_dir(settings_dict): + """ + Patches a script directory with a settings file that contains + as a json string + """ with tempfile.TemporaryDirectory() as tmp_dir: files_dir = os.path.join(tmp_dir, "files") os.mkdir(files_dir) @@ -36,271 +27,91 @@ def tmp_script_dir(settings_dict): yield tmp_dir -@pytest.fixture(autouse=True) -def empty_test_script_dir(redis): - empty_settings = {"testers": [{"test_data": []}]} - with tmp_script_dir(empty_settings) as tmp_dir: - yield tmp_dir - - -@pytest.fixture -def non_existant_test_script_dir(): - with patch("autotester.cli.test_script_directory", return_value=None): - yield - - -@pytest.fixture -def pop_interval(): - with patch( - "autotester.server.utils.redis_management.get_avg_pop_interval", - return_value=None, - ): - yield - - -@pytest.fixture(autouse=True) -def mock_rmtree(): - with patch("shutil.rmtree") as rm: - yield rm - - -@pytest.fixture(autouse=True) -def mock_enqueue_call(): - with patch("rq.Queue.enqueue_call") as enqueue_func: - yield enqueue_func - - -class DummyTestError(Exception): - pass - - class TestEnqueueTest: - @staticmethod - def get_kwargs(**kw): - param_kwargs = {k: "" for k in inspect.signature(cli.run_test).parameters} - return {**param_kwargs, **kw} - - def test_fails_missing_required_args(self): + def test_fails_if_test_files_do_not_exist(self, non_existant_test_script_dir, enqueue_kwargs): + """ Should not enqueue if test files do not exist """ + with pytest.raises(cli.TestScriptFilesError): + cli.enqueue_tests(**enqueue_kwargs) + + def test_fails_if_test_data_is_empty(self, enqueue_kwargs): + """ Should not enqueue if there is no test data """ + enqueue_kwargs["test_data"] = [] + with pytest.raises(cli.TestParameterError): + cli.enqueue_tests(**enqueue_kwargs) + + def test_can_find_test_files(self, enqueue_kwargs): + """ Should be able to find test files """ try: - cli.enqueue_test("Admin", 1) - except cli.JobArgumentError: - return - except cli.MarkUsError as e: - pytest.fail( - f"should have failed because kwargs are missing but instead failed with: {e}" - ) - pytest.fail("should have failed because kwargs are missing") - - def test_accepts_same_kwargs_as_server_run_test_method(self): - try: - cli.enqueue_test("Admin", 1, **self.get_kwargs()) - except cli.JobArgumentError: - pytest.fail("should not have failed because kwargs are not missing") - except cli.MarkUsError: - pass - - def test_fails_if_cannot_find_valid_queue(self): - try: - cli.enqueue_test("Tim", None, **self.get_kwargs()) - except cli.InvalidQueueError: - return - except cli.MarkUsError as e: - pytest.fail( - f"should have failed because a valid queue is not found but instead failed with: {e}" - ) - pytest.fail("should have failed because a valid queue is not found") - - def test_can_find_valid_queue(self): - try: - cli.enqueue_test("Admin", 1, **self.get_kwargs()) - except cli.InvalidQueueError: - pytest.fail("should not have failed because a valid queue is available") - except cli.MarkUsError: - pass - - def test_fails_if_test_files_do_not_exist(self, non_existant_test_script_dir): - try: - cli.enqueue_test("Admin", 1, **self.get_kwargs()) + settings = {"testers": [{"test_data": [{"category": ["admin"], "timeout": 10}]}]} + with tmp_script_dir(settings): + cli.enqueue_tests(**enqueue_kwargs) except cli.TestScriptFilesError: - return - except cli.MarkUsError as e: - pytest.fail( - f"should have failed because no test scripts could be found but instead failed with: {e}" - ) - pytest.fail("should have failed because no test scripts could be found") + pytest.fail("should not have failed because test scripts do exist for this test") - def test_can_find_test_files(self): - try: - cli.enqueue_test("Admin", 1, **self.get_kwargs()) - except cli.TestScriptFilesError: - pytest.fail("should not have failed because no test scripts could be found") - except cli.MarkUsError: - pass - - def test_writes_queue_info_to_stdout(self, capfd, pop_interval): - try: - cli.enqueue_test("Admin", 1, **self.get_kwargs()) - except cli.MarkUsError: - pass + def test_writes_queue_info_to_stdout(self, capfd, pop_interval, enqueue_kwargs): + """ Should write info about the queue to stdout """ + settings = {"testers": [{"test_data": [{"category": ["admin"], "timeout": 10}]}]} + with tmp_script_dir(settings): + cli.enqueue_tests(**enqueue_kwargs) out, _err = capfd.readouterr() - assert re.search(r"^\d+$", out) + assert re.search(r"^\d+$", out) is not None - def test_fails_if_no_tests_groups(self): - try: - cli.enqueue_test("Admin", 1, **self.get_kwargs()) - except cli.TestParameterError: - return - except cli.MarkUsError: - pass - - def test_fails_if_no_groups_in_category(self): - settings = {"testers": [{"test_data": [{"category": ["admin"]}]}]} + def test_fails_if_no_tests_to_run(self, enqueue_kwargs): + """ Should not enqueue if there are no tests to run """ + settings = {"testers": [{"test_data": []}]} with tmp_script_dir(settings): - try: - cli.enqueue_test( - "Admin", 1, **self.get_kwargs(test_categories=["student"]) - ) - except cli.TestParameterError: - return - except cli.MarkUsError: - pass + with pytest.raises(cli.TestParameterError): + cli.enqueue_tests(**enqueue_kwargs) - def test_can_find_tests_in_given_category(self): - settings = { - "testers": [{"test_data": [{"category": ["admin"], "timeout": 30}]}] - } + def test_can_find_tests_in_given_category(self, enqueue_kwargs): + """ Should enqueue if there are tests to run """ + settings = {"testers": [{"test_data": [{"category": ["admin"], "timeout": 30}]}]} with tmp_script_dir(settings): - try: - cli.enqueue_test( - "Admin", 1, **self.get_kwargs(test_categories=["admin"]) - ) - except cli.TestParameterError: - pytest.fail("should not have failed to find an admin test") - except cli.MarkUsError: - pass + cli.enqueue_tests(**enqueue_kwargs) - def test_can_enqueue_test_with_timeout(self, mock_enqueue_call): - settings = { - "testers": [{"test_data": [{"category": ["admin"], "timeout": 10}]}] - } + def test_can_enqueue_test_with_timeout(self, mock_enqueue_call, enqueue_kwargs): + """ Should set the job timeout when enqueuing """ + settings = {"testers": [{"test_data": [{"category": ["admin"], "timeout": 10}]}]} with tmp_script_dir(settings): - cli.enqueue_test("Admin", 1, **self.get_kwargs(test_categories=["admin"])) - mock_enqueue_call.assert_called_with( - ANY, kwargs=ANY, job_id=ANY, timeout=15 - ) - - def test_cleans_up_files_on_error(self, mock_rmtree): - with pytest.raises(Exception): - cli.enqueue_test("Admin", 1, **self.get_kwargs(files_path="something")) - - -class TestUpdateSpecs: - @staticmethod - def get_kwargs(**kw): - param_kwargs = { - k: "" for k in inspect.signature(cli.update_test_specs).parameters - } - return {**param_kwargs, **kw} - - def test_fails_when_schema_is_invalid(self): - with patch( - "autotester.server.utils.form_validation.validate_with_defaults", - return_value=DummyTestError("error"), - ): - with patch("autotester.cli.update_test_specs"): - try: - cli.update_specs("", **self.get_kwargs(schema={})) - except DummyTestError: - return - pytest.fail("should have failed because the form is invalid") - - def test_succeeds_when_schema_is_valid(self): - with patch( - "autotester.server.utils.form_validation.validate_with_defaults", - return_value=[], - ): - with patch("autotester.cli.update_test_specs"): - try: - cli.update_specs("", **self.get_kwargs(schema={})) - except DummyTestError: - pytest.fail("should not have failed because the form is valid") - - def test_calls_update_test_specs(self): - with patch( - "autotester.server.utils.form_validation.validate_with_defaults", - return_value=[], - ): - with patch("autotester.cli.update_test_specs") as update_test_specs: - cli.update_specs("", **self.get_kwargs(schema={})) - update_test_specs.assert_called_once() - - def test_cleans_up_files_on_error(self, mock_rmtree): - with patch( - "autotester.server.utils.form_validation.validate_with_defaults", - return_value=DummyTestError("error"), - ): - with patch("autotester.cli.update_test_specs"): - with pytest.raises(Exception): - cli.update_specs( - **self.get_kwargs(schema={}, files_path="test_files") - ) - - -@pytest.fixture -def mock_rq_job(): - with patch("rq.job.Job") as job: - enqueued_job = Mock() - job.fetch.return_value = enqueued_job - yield job, enqueued_job + cli.enqueue_tests(**enqueue_kwargs) + mock_enqueue_call.assert_called_with(ANY, kwargs=ANY, job_id=ANY, timeout=15) class TestCancelTest: - def test_do_nothing_if_job_does_not_exist(self, mock_rq_job): + def test_do_nothing_if_job_does_not_exist(self, mock_rq_job, cancel_kwargs): + """ Should not cancel if the job does not exist """ job_class, mock_job = mock_rq_job job_class.fetch.side_effect = NoSuchJobError - cli.cancel_test("something", [1]) + cli.cancel_tests(**cancel_kwargs) mock_job.cancel.assert_not_called() - def test_do_nothing_if_job_not_enqueued(self, mock_rq_job): + def test_do_nothing_if_job_not_enqueued(self, mock_rq_job, cancel_kwargs): + """ Should not cancel if the job exists but is not enqueued """ _, mock_job = mock_rq_job mock_job.is_queued.return_value = False - cli.cancel_test("something", [1]) + cli.cancel_tests(**cancel_kwargs) mock_job.cancel.assert_not_called() - def test_cancel_job(self, mock_rq_job): + def test_cancel_job(self, mock_rq_job, cancel_kwargs): + """ Should call cancel for a single job """ _, mock_job = mock_rq_job mock_job.is_queued.return_value = True - mock_job.kwargs = {"files_path": None} - cli.cancel_test("something", [1]) + cli.cancel_tests(**cancel_kwargs) mock_job.cancel.assert_called_once() - def test_remove_files_when_cancelling(self, mock_rq_job, mock_rmtree): + def test_cancel_multiple_jobs(self, mock_rq_job, cancel_kwargs): + """ Should call cancel for a multiple job """ _, mock_job = mock_rq_job + cancel_kwargs["test_data"] += cancel_kwargs["test_data"] mock_job.is_queued.return_value = True - files_path = "something" - mock_job.kwargs = {"files_path": files_path} - cli.cancel_test("something", [1]) - mock_rmtree.assert_called_once_with(files_path, onerror=ANY) - - def test_cancel_multiple_jobs(self, mock_rq_job): - _, mock_job = mock_rq_job - mock_job.is_queued.return_value = True - mock_job.kwargs = {"files_path": None} - cli.cancel_test("something", [1, 2]) + cli.cancel_tests(**cancel_kwargs) assert mock_job.cancel.call_count == 2 - def test_remove_files_when_cancelling_multiple_jobs(self, mock_rq_job, mock_rmtree): - _, mock_job = mock_rq_job - mock_job.is_queued.return_value = True - files_path = "something" - mock_job.kwargs = {"files_path": files_path} - cli.cancel_test("something", [1, 2]) - assert mock_rmtree.call_count == 2 - class TestGetSchema: @staticmethod def fake_installed_testers(installed): + """ Fake that a tester is installed """ root_dir = os.path.dirname(os.path.abspath(cli.__file__)) paths = [] for tester in installed: @@ -310,6 +121,7 @@ def fake_installed_testers(installed): @staticmethod def assert_tester_in_schema(tester, schema): + """ Assert that a tester is in the schema """ assert tester in schema["definitions"]["installed_testers"]["enum"] installed = [] for option in schema["definitions"]["tester_schemas"]["oneOf"]: @@ -317,18 +129,18 @@ def assert_tester_in_schema(tester, schema): assert tester in installed def test_prints_skeleton_when_none_installed(self, capfd): + """ Should still print the schema skeleton when no tester is installed """ with patch("glob.glob", return_value=[]): cli.get_schema() out, _err = capfd.readouterr() schema = json.loads(out) root_dir = os.path.dirname(os.path.abspath(cli.__file__)) - with open( - os.path.join(root_dir, "lib", "tester_schema_skeleton.json") - ) as f: + with open(os.path.join(root_dir, "lib", "tester_schema_skeleton.json")) as f: skeleton = json.load(f) assert schema == skeleton def test_prints_test_schema_when_one_installed(self, capfd): + """ Should print the schema when a single tester is installed """ with patch("glob.glob", return_value=self.fake_installed_testers(["custom"])): cli.get_schema() out, _err = capfd.readouterr() @@ -336,47 +148,10 @@ def test_prints_test_schema_when_one_installed(self, capfd): self.assert_tester_in_schema("custom", schema) def test_prints_test_schema_when_multiple_installed(self, capfd): - with patch( - "glob.glob", return_value=self.fake_installed_testers(["custom", "py"]) - ): + """ Shold print the schema when multiple testers are installed """ + with patch("glob.glob", return_value=self.fake_installed_testers(["custom", "py"])): cli.get_schema() out, _err = capfd.readouterr() schema = json.loads(out) self.assert_tester_in_schema("custom", schema) self.assert_tester_in_schema("py", schema) - - -class TestParseArgFile: - def test_loads_arg_file(self): - settings = {"some": "data"} - with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, "settings.json") - kwargs = cli.parse_arg_file(arg_file) - try: - kwargs.pop("files_path") - except KeyError: - pass - assert settings == kwargs - - def test_remove_arg_file(self): - settings = {"some": "data"} - with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, "settings.json") - cli.parse_arg_file(arg_file) - assert not os.path.isfile(arg_file) - - def test_adds_file_path_if_not_present(self): - settings = {"some": "data"} - with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, "settings.json") - kwargs = cli.parse_arg_file(arg_file) - assert "files_path" in kwargs - assert os.path.realpath(kwargs["files_path"]) == os.path.realpath(tmp_dir) - - def test_does_not_add_file_path_if_present(self): - settings = {"some": "data", "files_path": "something"} - with tmp_script_dir(settings) as tmp_dir: - arg_file = os.path.join(tmp_dir, "settings.json") - kwargs = cli.parse_arg_file(arg_file) - assert "files_path" in kwargs - assert kwargs["files_path"] == "something" diff --git a/src/autotester/tests/config_test.py b/src/autotester/tests/config_test.py new file mode 100644 index 00000000..6c713f4c --- /dev/null +++ b/src/autotester/tests/config_test.py @@ -0,0 +1,89 @@ +import os +import tempfile +import yaml +import copy +import json +from autotester.config import _Config +from collections.abc import Mapping, Sequence + +CONFIG = { + "workspace": os.environ.get("AUTOTESTER_WORKSPACE", f'{os.environ["HOME"]}/.autotesting/'), + "redis": { + "url": os.environ.get("REDIS_URL", "redis://127.0.0.1:6379/0"), + "_prefix": "redis:", + "_current_test_script_hash": "current_test_scripts", + "_pop_interval_hash": "pop_interval", + }, + "server_user": os.environ.get("AUTOTESTER_SERVER_USER", os.environ["USER"]), + "supervisor": {"url": os.environ.get("AUTOTESTER_SUPERVISOR_URL", "127.0.0.1:9001")}, + "resources": { + "postgresql": { + "port": os.environ.get("PG_PORT", 5432), + "host": os.environ.get("PG_HOST", "localhost"), + "_prefix": "autotest_", + }, + "port": {"_redis_int": "port", "min": 50000, "max": 65535}, + }, + "workers": [{"users": [{"name": os.environ["USER"], "reaper": None}], "queues": ["high", "low", "batch"]}], + "rlimit_settings": {"nproc": [300, 300]}, + "_workspace_contents": { + "_scripts": "scripts", + "_results": "results", + "_specs": "specs", + "_logs": "logs", + "_workers": "workers", + "_default_venv_name": "defaultvenv", + "_settings_file": "settings.json", + "_files_dir": "files", + }, +} + + +def compare_config_settings(conf1, conf2): + """ Asserts equality recursively resulting in better error messages """ + assert type(conf1) == type(conf2) + if isinstance(conf1, Mapping): + assert conf1.keys() == conf2.keys() + for k in conf1.keys(): + compare_config_settings(conf1[k], conf2[k]) + elif isinstance(conf1, Sequence) and not isinstance(conf1, str): + for objs in zip(conf1, conf2): + compare_config_settings(*objs) + else: + assert conf1 == conf2 + + +class TestConfig: + def test_defaults(self): + """ Make sure defaults are loaded correctly including those set by env variables """ + compare_config_settings(_Config()._settings, CONFIG) + + def test_local_config(self): + """ Make sure a local config file overrides default settings """ + config = copy.deepcopy(CONFIG) + config["workspace"] = "some/path" + with tempfile.NamedTemporaryFile(mode="w") as f: + yaml.dump({"workspace": "some/path"}, f) + _Config._local_config = f.name + compare_config_settings(_Config()._settings, config) + + def test_to_json(self): + """ Should return a json object """ + settings = {"a": [{"1": 2}]} + config = _Config() + config._settings = settings + compare_config_settings(json.loads(config.to_json()), settings) + + def test_getitem_simple(self): + """ Should get a value at the top level of the dict """ + settings = {"a": [{"1": 2}]} + config = _Config() + config._settings = settings + compare_config_settings(config["a"], settings["a"]) + + def test_getitem_nested(self): + """ Should get a nested value """ + settings = {"a": [{"1": 2}]} + config = _Config() + config._settings = settings + assert config["a", 0, "1"] == 2 diff --git a/src/autotester/tests/conftest.py b/src/autotester/tests/conftest.py new file mode 100644 index 00000000..451dbc78 --- /dev/null +++ b/src/autotester/tests/conftest.py @@ -0,0 +1,81 @@ +import pytest +from unittest.mock import patch, Mock +from fakeredis import FakeStrictRedis +from autotester import cli + + +@pytest.fixture(autouse=True) +def redis(): + """ Patches the redis connection """ + fake_redis = FakeStrictRedis() + with patch("autotester.cli.redis_connection", return_value=fake_redis): + with patch( + "autotester.server.utils.redis_management.redis_connection", return_value=fake_redis, + ): + yield fake_redis + + +@pytest.fixture +def non_existant_test_script_dir(): + """ Patches the test script directory getter to return None""" + with patch("autotester.cli.test_script_directory", return_value=None): + yield + + +@pytest.fixture +def pop_interval(): + """ Patches the average pop interval getter to return None """ + with patch("autotester.server.utils.redis_management.get_avg_pop_interval", return_value=None): + yield + + +@pytest.fixture +def mock_enqueue_call(): + """ Patches the call to enqueue a rq job """ + with patch("rq.Queue.enqueue_call") as enqueue_func: + yield enqueue_func + + +@pytest.fixture(autouse=True) +def mock_client(): + """ Patches the call to get_client """ + mock_instance = Mock() + with patch("autotester.cli.get_client", return_value=mock_instance): + mock_instance.unique_run_str.return_value = "a" + mock_instance.unique_script_str.return_value = "a" + yield mock_instance + + +@pytest.fixture +def enqueue_kwargs(): + """ Keword arguments that can be passed to enqueue tests """ + yield { + "client_type": "test", + "client_data": {}, + "test_data": [{"test_categories": "admin"}], + "request_high_priority": False, + } + + +@pytest.fixture +def cancel_kwargs(): + """ Keword arguments that can be passed to cancel tests """ + yield {"client_type": "test", "client_data": {}, "test_data": [{}]} + + +@pytest.fixture(autouse=True) +def accept_generic_autotest_error(): + """ Fail silently for non-specific autotest errors """ + try: + yield + except cli.AutotestError: + pass + + +@pytest.fixture +def mock_rq_job(): + """ Patch rq's Job class """ + with patch("rq.job.Job") as job: + enqueued_job = Mock() + job.fetch.return_value = enqueued_job + yield job, enqueued_job