diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..964d1fda --- /dev/null +++ b/.flake8 @@ -0,0 +1,16 @@ +[flake8] +max-line-length = 120 +ignore = E266, BLK100 +# flake8 does not parse the .gitignore file so we need to re-specify what to ignore +exclude = + .git, + .idea, + __pycache__. + .DS_Store, + .hypothesis, + .pytest_cache, + *.egg-info, + .eggs, + venv, + venv2, + build \ No newline at end of file diff --git a/.flake8.ini b/.flake8.ini deleted file mode 100644 index 93298bf0..00000000 --- a/.flake8.ini +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -max-line-length = 120 -ignore = E266, BLK100 diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000..b92e9455 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Initial pre-commit hooks: flake8 and black +3ed4c4c62ae4785cb3d38db49a223dec1f1fa41f \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..2bc60760 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,10 @@ +version: 2 +updates: + - package-ecosystem: pip + directory: "/client" + schedule: + interval: "monthly" + - package-ecosystem: pip + directory: "/server" + schedule: + interval: "monthly" diff --git a/.github/workflows/test_ci.yml b/.github/workflows/test_ci.yml index df2a8cc4..693a9017 100644 --- a/.github/workflows/test_ci.yml +++ b/.github/workflows/test_ci.yml @@ -5,14 +5,14 @@ on: jobs: test: if: github.event.pull_request.draft == false - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 strategy: matrix: python-version: - - 3.6 - 3.7 - 3.8 - 3.9 + - '3.10' test-dir: - client - server diff --git a/.hound.yml b/.hound.yml deleted file mode 100644 index 89747ff9..00000000 --- a/.hound.yml +++ /dev/null @@ -1,3 +0,0 @@ -flake8: - enabled: true - config_file: .flake8.ini diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..9f93c5ff --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +repos: + - repo: https://github.com/psf/black + rev: 22.6.0 + hooks: + - id: black + - repo: https://github.com/pycqa/flake8 + rev: 4.0.1 + hooks: + - id: flake8 \ No newline at end of file diff --git a/Changelog.md b/Changelog.md index 3d92cb3c..4c1efb66 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,6 +1,14 @@ # CHANGELOG All notable changes to this project will be documented here. +## [v2.2.0] +- Support dependencies on specific package versions and non-CRAN sources for R tester (#323) +- Testers no longer generate automated content for feedback files (#375) +- Multiple feedback files can now be created per test run (#375) +- PyTA plaintext reports are sent as part of the test result, not as a feedback file (#375) +- Allow client to define environment variables to pass to individual test runs (#370) +- Add ability to clean up test scripts that haven't been used for X days (#379) + ## [v2.1.2] - Support dependencies on specific package versions and non-CRAN sources for R tester (#323) diff --git a/Layerfile b/Layerfile deleted file mode 100644 index 8dd45877..00000000 --- a/Layerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM vm/ubuntu:18.04 - -RUN apt-get update && \ - apt-get -y install python3 python3-pip python3-venv && \ - rm -rf /var/lib/apt/lists/* - -WORKDIR /app -COPY . . - -RUN python3 -m venv /tmp/venv-server && \ - /tmp/venv-server/bin/pip install -r /app/server/requirements.txt && \ - /tmp/venv-server/bin/pip install pytest fakeredis - -RUN python3 -m venv /tmp/venv-client && \ - /tmp/venv-client/bin/pip install -r /app/client/requirements.txt && \ - /tmp/venv-client/bin/pip install pytest fakeredis - -RUN AUTOTESTER_CONFIG=/app/server/autotest_server/tests/fixtures/test_config.yml /tmp/venv-server/bin/pytest /app/server - -RUN /tmp/venv-client/bin/pytest /app/client diff --git a/README.md b/README.md index 5d036336..b20f5380 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,3 @@ -[![Acceptance tests](https://layerci.com/badge/github/MarkUsProject/markus-autotesting)](https://layerci.com/jobs/github/MarkUsProject/markus-autotesting) - Autotesting =========== @@ -20,7 +18,7 @@ Both the autotester and the API are designed to be run on Ubuntu 20.04 (or suffi #### Installing up the autotester -1. Make sure that your system has python3 installed (at least version 3.6, but we recommend the latest version if +1. Make sure that your system has python3 installed (at least version 3.7, but we recommend the latest version if possible). 2. Create or assign one user to run the autotester. For example, you may create a user named `autotest` for this purpose. 3. Create or assign at least one user to run test code. For example, you may create 3 users named `test1`, `test2`, `test3` @@ -68,7 +66,7 @@ Both the autotester and the API are designed to be run on Ubuntu 20.04 (or suffi 6. [Configure the autotester](#autotester-configuration-options) 7. Optionally install additional python versions. - The `py` (python3) and `pyta` testers can be run using any version of python between versions 3.6 and 3.10. When + The `py` (python3) and `pyta` testers can be run using any version of python between versions 3.7 and 3.10. When these testers are installed the autotester will search the PATH for available python executables. If you want users to be able to run tests with a specific python version, ensure that it is visible in the PATH of both the user running the autotester and all users who run tests. @@ -102,7 +100,7 @@ Both the autotester and the API are designed to be run on Ubuntu 20.04 (or suffi #### Installing the API -1. Make sure that your system has python3 installed (at least version 3.6, but we recommend the latest version if +1. Make sure that your system has python3 installed (at least version 3.7, but we recommend the latest version if possible). 2. Install the python requirements: @@ -293,3 +291,21 @@ where: - if `0 < points_earned < points_total` then `status == "partial"` - `status == "error"` if some error occurred that meant the number of points for this test could not be determined - `time` is optional (can be null) and is the amount of time it took to run the test (in ms) + +## Managing files on disk + +Test settings files and virtual environments are created in the `workspace` directory. These files can build up over +time and are not automatically cleaned up. In order to safely clean up these files, you can use the `clean` command +with the `start_stop.sh` script. By calling this script with the optional `--age` argument, settings files and virtual +environments older than X days will be deleted safely. For example + +```shell +autotest:/$ python3 markus-autotesting/server/start_stop.py clean --age 30 +``` + +will delete settings that have not been accessed (updated or used to run a test) in the last 30 days. + +To see which settings *would be* deleted without actually deleting them, use the optional `--dry-run` flag as well. + +Users who try to run tests after the settings have been cleaned up in this manner will see an error message telling them +that the test settings have expired and prompting them to upload more. diff --git a/client/.dockerfiles/Dockerfile b/client/.dockerfiles/Dockerfile index 1791e00d..e6815ade 100644 --- a/client/.dockerfiles/Dockerfile +++ b/client/.dockerfiles/Dockerfile @@ -7,6 +7,7 @@ RUN apt-get update -y && apt-get install -y python3 python3-venv COPY ./requirements.txt /requirements.txt RUN python3 -m venv /markus_venv && \ + /markus_venv/bin/pip install wheel && \ /markus_venv/bin/pip install -r /requirements.txt WORKDIR /app diff --git a/client/.env b/client/.env index 8408a270..d997fcd3 100644 --- a/client/.env +++ b/client/.env @@ -3,4 +3,4 @@ FLASK_HOST=localhost FLASK_PORT=5000 ACCESS_LOG= ERROR_LOG= -SETTINGS_JOB_TIMEOUT=60 +SETTINGS_JOB_TIMEOUT=600 diff --git a/client/autotest_client/__init__.py b/client/autotest_client/__init__.py index 6cfeb5ba..3b996728 100644 --- a/client/autotest_client/__init__.py +++ b/client/autotest_client/__init__.py @@ -20,7 +20,7 @@ ERROR_LOG = os.environ.get("ERROR_LOG") ACCESS_LOG = os.environ.get("ACCESS_LOG") -SETTINGS_JOB_TIMEOUT = os.environ.get("SETTINGS_JOB_TIMEOUT", 60) +SETTINGS_JOB_TIMEOUT = os.environ.get("SETTINGS_JOB_TIMEOUT", 600) REDIS_URL = os.environ["REDIS_URL"] app = Flask(__name__) @@ -60,19 +60,21 @@ def _handle_error(e): code = e.code with _open_log(ERROR_LOG, fallback=sys.stderr) as f: try: - user = _authorize_user() + api_key = request.headers.get("Api-Key") except Exception: - user = "ERROR: user not found" - f.write(f"{datetime.now()}\n\tuser: {user}\n\t{traceback.format_exc()}\n") + api_key = "ERROR: user not found" + f.write(f"{datetime.now()}\n\tuser: {api_key}\n\t{traceback.format_exc()}\n") f.flush() + if not app.debug: + error = str(e).replace(api_key, "[client-api-key]") return jsonify(message=error), code -def _check_rate_limit(user_name): +def _check_rate_limit(api_key): conn = _redis_connection() - key = f"autotest:ratelimit:{user_name}:{datetime.now().minute}" + key = f"autotest:ratelimit:{api_key}:{datetime.now().minute}" n_requests = conn.get(key) or 0 - user_limit = conn.get(f"autotest:ratelimit:{user_name}:limit") or 20 # TODO: make default limit configurable + user_limit = conn.get(f"autotest:ratelimit:{api_key}:limit") or 20 # TODO: make default limit configurable if int(n_requests) > int(user_limit): abort(make_response(jsonify(message="Too many requests"), 429)) else: @@ -87,7 +89,7 @@ def _authorize_user(): user_name = (_redis_connection().hgetall("autotest:user_credentials") or {}).get(api_key) if user_name is None: abort(make_response(jsonify(message="Unauthorized"), 401)) - _check_rate_limit(user_name) + _check_rate_limit(api_key) return api_key @@ -228,24 +230,33 @@ def update_settings(settings_id, user): @app.route("/settings//test", methods=["PUT"]) @authorize def run_tests(settings_id, user): - file_urls = request.json["file_urls"] + test_data = request.json["test_data"] categories = request.json["categories"] high_priority = request.json.get("request_high_priority") - queue_name = "batch" if len(file_urls) > 1 else ("high" if high_priority else "low") + queue_name = "batch" if len(test_data) > 1 else ("high" if high_priority else "low") queue = rq.Queue(queue_name, connection=_rq_connection()) timeout = 0 for settings_ in settings(settings_id)["testers"]: - for test_data in settings_["test_data"]: - timeout += test_data["timeout"] + for data in settings_["test_data"]: + timeout += data["timeout"] ids = [] - for url in file_urls: + for data in test_data: + url = data["file_url"] + test_env_vars = data.get("env_vars", {}) id_ = _redis_connection().incr("autotest:tests_id") _redis_connection().hset("autotest:tests", key=id_, value=settings_id) ids.append(id_) - data = {"settings_id": settings_id, "test_id": id_, "files_url": url, "categories": categories, "user": user} + data = { + "settings_id": settings_id, + "test_id": id_, + "files_url": url, + "categories": categories, + "user": user, + "test_env_vars": test_env_vars, + } queue.enqueue_call( "autotest_server.run_test", kwargs=data, @@ -285,9 +296,7 @@ def get_feedback_file(settings_id, tests_id, feedback_id, **_kw): if data is None: abort(make_response(jsonify(message="File doesn't exist"), 404)) _redis_connection().delete(key) - return send_file( - io.BytesIO(data), mimetype="application/gzip", as_attachment=True, attachment_filename=str(feedback_id) - ) + return send_file(io.BytesIO(data), mimetype="application/gzip", as_attachment=True, download_name=str(feedback_id)) @app.route("/settings//tests/status", methods=["GET"]) diff --git a/client/autotest_client/tests/test_flask_app.py b/client/autotest_client/tests/test_flask_app.py index 44dcc8b8..e0dd5430 100644 --- a/client/autotest_client/tests/test_flask_app.py +++ b/client/autotest_client/tests/test_flask_app.py @@ -28,10 +28,9 @@ def fake_redis_db(monkeypatch, fake_redis_conn): class TestRegister: - @pytest.fixture def credentials(self): - return {'auth_type': 'test', 'credentials': '12345'} + return {"auth_type": "test", "credentials": "12345"} @pytest.fixture def response(self, client, credentials): @@ -41,7 +40,7 @@ def test_status(self, response): assert response.status_code == 200 def test_api_key_set(self, response): - assert response.json['api_key'] + assert response.json["api_key"] def test_credentials_set(self, response, fake_redis_conn, credentials): - assert json.loads(fake_redis_conn.hget('autotest:user_credentials', response.json['api_key'])) == credentials + assert json.loads(fake_redis_conn.hget("autotest:user_credentials", response.json["api_key"])) == credentials diff --git a/client/requirements.txt b/client/requirements.txt index c4b135c8..56d0effd 100644 --- a/client/requirements.txt +++ b/client/requirements.txt @@ -1,6 +1,5 @@ -flask==1.1.4 -markupsafe==2.0.1 -python-dotenv==0.15.0 -rq==1.7.0 -redis==3.5.3 -jsonschema==3.2.0 +flask==2.1.2 +python-dotenv==0.20.0 +rq==1.10.1 +redis==4.3.4 +jsonschema==4.7.2 diff --git a/docker-compose.yml b/docker-compose.yml index c5fc9fc4..88b76836 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,10 +6,10 @@ services: context: ./server dockerfile: ./.dockerfiles/Dockerfile args: - UBUNTU_VERSION: '18.04' + UBUNTU_VERSION: '20.04' LOGIN_USER: 'docker' WORKSPACE: '/home/docker/.autotesting' - image: markus-autotest-server-dev:1.0.1 + image: markus-autotest-server-dev:1.1.0 volumes: - ./server:/app:cached - workspace:/home/docker/.autotesting:rw @@ -27,8 +27,8 @@ services: context: ./client dockerfile: ./.dockerfiles/Dockerfile args: - UBUNTU_VERSION: '18.04' - image: markus-autotest-client-dev:1.0.0 + UBUNTU_VERSION: '20.04' + image: markus-autotest-client-dev:1.1.0 container_name: 'autotest-client' volumes: - ./client:/app:cached @@ -64,7 +64,7 @@ services: rq_dashboard: image: eoranged/rq-dashboard environment: - - RQ_DASHBOARD_REDIS_URL=redis://redis_autotest:6379/ + - RQ_DASHBOARD_REDIS_URL=redis://redis:6379/ ports: - '9181:9181' depends_on: diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..d6791328 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,4 @@ +[tool.black] +line-length = 120 +include = '\.pyi?$' +extend-exclude = '\.git' diff --git a/server/.dockerfiles/Dockerfile b/server/.dockerfiles/Dockerfile index 82a09dee..315b3aea 100644 --- a/server/.dockerfiles/Dockerfile +++ b/server/.dockerfiles/Dockerfile @@ -10,14 +10,14 @@ ARG WORKSPACE RUN apt-get update -y && \ apt-get -y install software-properties-common && \ add-apt-repository -y ppa:deadsnakes/ppa && \ - apt-get -y install python3.6 \ - python3.6-venv \ - python3.7 \ + apt-get -y install python3.7 \ python3.7-venv \ python3.8 \ python3.8-venv \ python3.9 \ python3.9-venv \ + python3.10 \ + python3.10-venv \ redis-server \ postgresql-client \ libpq-dev \ @@ -34,7 +34,8 @@ RUN useradd -ms /bin/bash $LOGIN_USER && \ COPY . /app -RUN python3.9 -m venv /markus_venv && \ +RUN python3.10 -m venv /markus_venv && \ + /markus_venv/bin/pip install wheel && \ /markus_venv/bin/pip install -r /app/requirements.txt && \ find /app/autotest_server/testers -name requirements.system -exec {} \; && \ rm -rf /app/* diff --git a/server/autotest_server/__init__.py b/server/autotest_server/__init__.py index 66066924..9206c3e8 100644 --- a/server/autotest_server/__init__.py +++ b/server/autotest_server/__init__.py @@ -51,27 +51,30 @@ def run_test_command(test_username: Optional[str] = None) -> str: def _create_test_group_result( - stdout: str, - stderr: str, - run_time: int, - extra_info: Dict, - feedback: Dict, - timeout: Optional[int] = None, + stdout: str, stderr: str, run_time: int, extra_info: Dict, feedback: List, timeout: Optional[int] = None ) -> ResultData: """ Return the arguments passed to this function in a dictionary. If stderr is falsy, change it to None. Load the json string in stdout as a dictionary. """ - test_results, malformed = loads_partial_json(stdout, dict) - return { + all_results, malformed = loads_partial_json(stdout, dict) + result = { "time": run_time, "timeout": timeout, - "tests": test_results, + "tests": [], "stderr": stderr or None, "malformed": stdout if malformed else None, "extra_info": extra_info or {}, - **feedback, + "annotations": None, + "feedback": feedback, } + for res in all_results: + if "annotations" in res: + result["annotations"] = res["annotations"] + else: + result["tests"].append(res) + + return result def _kill_user_processes(test_username: str) -> None: @@ -93,7 +96,7 @@ def _create_test_script_command(tester_type: str) -> str: f'sys.path.append("{os.path.dirname(os.path.abspath(__file__))}")', import_line, "from testers.specs import TestSpecs", - f"Tester(specs=TestSpecs.from_json(sys.stdin.read())).run()", + "Tester(specs=TestSpecs.from_json(sys.stdin.read())).run()", ] python_str = "; ".join(python_lines) return f"\"${{PYTHON}}\" -c '{python_str}'" @@ -132,38 +135,53 @@ def _get_env_vars(test_username: str) -> Dict[str, str]: def _get_feedback(test_data, tests_path, test_id): - feedback_file = test_data.get("feedback_file_name") - annotation_file = test_data.get("annotation_file") - result = {"feedback": None, "annotations": None} - if feedback_file: + feedback_files = test_data.get("feedback_file_names", []) + feedback = [] + for feedback_file in feedback_files: feedback_path = os.path.join(tests_path, feedback_file) if os.path.isfile(feedback_path): with open(feedback_path, "rb") as f: - now = int(time.time()) - key = f"autotest:feedback_file:{test_id}:{now}" conn = redis_connection() + id_ = conn.incr("autotest:feedback_files_id") + key = f"autotest:feedback_file:{test_id}:{id_}" conn.set(key, gzip.compress(f.read())) conn.expire(key, 3600) # TODO: make this configurable - result["feedback"] = { - "filename": feedback_file, - "mime_type": mimetypes.guess_type(feedback_path)[0], - "compression": "gzip", - "id": now, - } - if annotation_file and test_data.get("upload_annotations"): - annotation_path = os.path.join(tests_path, annotation_file) - if os.path.isfile(annotation_path): - with open(annotation_path, "rb") as f: - try: - result["annotations"] = json.load(f) - except json.JSONDecodeError as e: - f.seek(0) - raise Exception(f"Invalid annotation json: {f.read()}") from e - return result + feedback.append( + { + "filename": feedback_file, + "mime_type": mimetypes.guess_type(feedback_path)[0] or "text/plain", + "compression": "gzip", + "id": id_, + } + ) + else: + raise Exception(f"Cannot find feedback file at '{feedback_path}'.") + return feedback + + +def _update_env_vars(base_env: Dict, test_env: Dict) -> Dict: + """ + Update base_env with the key/value pairs in test_env. + If any keys in test_env also occur in base_env, raise an error. + Since, the content of test_env is defined by the client, this ensures that the client cannot overwrite environment + variables set by this autotester. + """ + conflict = base_env.keys() & test_env.keys() + if conflict: + raise Exception( + f"The following environment variables cannot be overwritten for this test: {', '.join(conflict)}" + ) + return {**base_env, **test_env} def _run_test_specs( - cmd: str, test_settings: dict, categories: List[str], tests_path: str, test_username: str, test_id: Union[int, str] + cmd: str, + test_settings: dict, + categories: List[str], + tests_path: str, + test_username: str, + test_id: Union[int, str], + test_env_vars: Dict[str, str], ) -> List[ResultData]: """ Run each test script in test_scripts in the tests_path directory using the @@ -185,7 +203,8 @@ def _run_test_specs( timeout_expired = None timeout = test_data.get("timeout") try: - env_vars = _get_env_vars(test_username) + env_vars = {**os.environ, **_get_env_vars(test_username), **settings["_env"]} + env_vars = _update_env_vars(env_vars, test_env_vars) proc = subprocess.Popen( args, start_new_session=True, @@ -300,16 +319,18 @@ def tester_user() -> Tuple[str, str]: return user_name, user_workspace -def run_test(settings_id, test_id, files_url, categories, user): +def run_test(settings_id, test_id, files_url, categories, user, test_env_vars): results = [] error = None try: settings = json.loads(redis_connection().hget("autotest:settings", key=settings_id)) + settings["_last_access"] = int(time.time()) + redis_connection().hset("autotest:settings", key=settings_id, value=json.dumps(settings)) test_username, tests_path = tester_user() try: _setup_files(settings_id, user, files_url, tests_path, test_username) cmd = run_test_command(test_username=test_username) - results = _run_test_specs(cmd, settings, categories, tests_path, test_username, test_id) + results = _run_test_specs(cmd, settings, categories, tests_path, test_username, test_id, test_env_vars) finally: _stop_tester_processes(test_username) _clear_working_directory(tests_path, test_username) @@ -345,7 +366,7 @@ def update_test_settings(user, settings_id, test_settings, file_url): os.makedirs(files_dir, exist_ok=True) creds = json.loads(redis_connection().hget("autotest:user_credentials", key=user)) r = requests.get(file_url, headers={"Authorization": f"{creds['auth_type']} {creds['credentials']}"}) - extract_zip_stream(r.content, files_dir, ignore_root_dirs=1) + extract_zip_stream(r.content, files_dir, ignore_root_dirs=0) schema = json.loads(redis_connection().get("autotest:schema")) installed_testers = schema["definitions"]["installed_testers"]["enum"] @@ -371,4 +392,5 @@ def update_test_settings(user, settings_id, test_settings, file_url): raise finally: test_settings["_user"] = user + test_settings["_last_access"] = int(time.time()) redis_connection().hset("autotest:settings", key=settings_id, value=json.dumps(test_settings)) diff --git a/server/autotest_server/testers/custom/settings_schema.json b/server/autotest_server/testers/custom/settings_schema.json index 7901379b..bec7ec1c 100644 --- a/server/autotest_server/testers/custom/settings_schema.json +++ b/server/autotest_server/testers/custom/settings_schema.json @@ -40,24 +40,16 @@ "type": "integer", "default": 30 }, - "feedback_file_name": { - "title": "Feedback file", - "type": "string" + "feedback_file_names": { + "title": "Feedback files", + "type": "array", + "items": { + "type": "string" + } }, "extra_info": { "$ref": "#/definitions/extra_group_data" } - }, - "dependencies": { - "feedback_file_name": { - "properties": { - "upload_feedback_file": { - "title": "Upload feedback file for grading", - "type": "boolean", - "default": false - } - } - } } } } diff --git a/server/autotest_server/testers/custom/setup.py b/server/autotest_server/testers/custom/setup.py index 82749d0e..59c8ea18 100644 --- a/server/autotest_server/testers/custom/setup.py +++ b/server/autotest_server/testers/custom/setup.py @@ -3,7 +3,7 @@ def create_environment(_settings, _env_dir, default_env_dir): - return {"PYTHON": os.path.join(default_env_dir, 'bin', 'python3')} + return {"PYTHON": os.path.join(default_env_dir, "bin", "python3")} def install(): diff --git a/server/autotest_server/testers/haskell/haskell_tester.py b/server/autotest_server/testers/haskell/haskell_tester.py index 64b43188..6ab9c7d3 100644 --- a/server/autotest_server/testers/haskell/haskell_tester.py +++ b/server/autotest_server/testers/haskell/haskell_tester.py @@ -2,7 +2,7 @@ import os import tempfile import csv -from typing import Dict, Optional, IO, Type, List, Iterator, Union +from typing import Dict, Type, List, Iterator, Union from ..tester import Tester, Test, TestError from ..specs import TestSpecs @@ -14,19 +14,17 @@ def __init__( tester: "HaskellTester", test_file: str, result: Dict, - feedback_open: Optional[IO] = None, ) -> None: """ Initialize a Haskell test created by tester. - The result was created after running the tests in test_file and test feedback - will be written to feedback_open. + The result was created after running the tests in test_file. """ self._test_name = result.get("name") self._file_name = test_file self.status = result["status"] self.message = result["description"] - super().__init__(tester, feedback_open) + super().__init__(tester) @property def test_name(self) -> str: @@ -123,8 +121,7 @@ def run(self) -> None: results = self.run_haskell_tests() except subprocess.CalledProcessError as e: raise TestError(e.stderr) from e - with self.open_feedback() as feedback_open: - for test_file, result in results.items(): - for res in result: - test = self.test_class(self, test_file, res, feedback_open) - print(test.run(), flush=True) + for test_file, result in results.items(): + for res in result: + test = self.test_class(self, test_file, res) + print(test.run(), flush=True) diff --git a/server/autotest_server/testers/haskell/settings_schema.json b/server/autotest_server/testers/haskell/settings_schema.json index 16293837..f034c97c 100644 --- a/server/autotest_server/testers/haskell/settings_schema.json +++ b/server/autotest_server/testers/haskell/settings_schema.json @@ -52,24 +52,16 @@ "type": "integer", "default": 100 }, - "feedback_file_name": { - "title": "Feedback file", - "type": "string" + "feedback_file_names": { + "title": "Feedback files", + "type": "array", + "items": { + "type": "string" + } }, "extra_info": { "$ref": "#/definitions/extra_group_data" } - }, - "dependencies": { - "feedback_file_name": { - "properties": { - "upload_feedback_file": { - "title": "Upload feedback file for grading", - "type": "boolean", - "default": false - } - } - } } } } diff --git a/server/autotest_server/testers/haskell/setup.py b/server/autotest_server/testers/haskell/setup.py index 08d83579..72f1889d 100644 --- a/server/autotest_server/testers/haskell/setup.py +++ b/server/autotest_server/testers/haskell/setup.py @@ -4,7 +4,7 @@ def create_environment(_settings, _env_dir, default_env_dir): - return {"PYTHON": os.path.join(default_env_dir, 'bin', 'python3')} + return {"PYTHON": os.path.join(default_env_dir, "bin", "python3")} def install(): diff --git a/server/autotest_server/testers/java/java_tester.py b/server/autotest_server/testers/java/java_tester.py index 680c3d82..5e5697e7 100644 --- a/server/autotest_server/testers/java/java_tester.py +++ b/server/autotest_server/testers/java/java_tester.py @@ -9,11 +9,11 @@ class JavaTest(Test): - def __init__(self, tester, result, feedback_open=None): + def __init__(self, tester, result): self._test_name = result["name"] self.status = result["status"] self.message = result["message"] - super().__init__(tester, feedback_open) + super().__init__(tester) @property def test_name(self): @@ -141,8 +141,7 @@ def run(self) -> None: raise TestError(results.stderr) except subprocess.CalledProcessError as e: raise TestError(e) - with self.open_feedback() as feedback_open: - for result in self._parse_junitxml(): - test = self.test_class(self, result, feedback_open) - result_json = test.run() - print(result_json, flush=True) + for result in self._parse_junitxml(): + test = self.test_class(self, result) + result_json = test.run() + print(result_json, flush=True) diff --git a/server/autotest_server/testers/java/settings_schema.json b/server/autotest_server/testers/java/settings_schema.json index 23db8730..073a729a 100644 --- a/server/autotest_server/testers/java/settings_schema.json +++ b/server/autotest_server/testers/java/settings_schema.json @@ -48,24 +48,16 @@ "type": "integer", "default": 30 }, - "feedback_file_name": { - "title": "Feedback file", - "type": "string" + "feedback_file_names": { + "title": "Feedback files", + "type": "array", + "items": { + "type": "string" + } }, "extra_info": { "$ref": "#/definitions/extra_group_data" } - }, - "dependencies": { - "feedback_file_name": { - "properties": { - "upload_feedback_file": { - "title": "Upload feedback file for grading", - "type": "boolean", - "default": false - } - } - } } } } diff --git a/server/autotest_server/testers/java/setup.py b/server/autotest_server/testers/java/setup.py index e3324442..26c8a018 100644 --- a/server/autotest_server/testers/java/setup.py +++ b/server/autotest_server/testers/java/setup.py @@ -5,7 +5,7 @@ def create_environment(_settings, _env_dir, default_env_dir): - return {"PYTHON": os.path.join(default_env_dir, 'bin', 'python3')} + return {"PYTHON": os.path.join(default_env_dir, "bin", "python3")} def install(): diff --git a/server/autotest_server/testers/jupyter/jupyter_tester.py b/server/autotest_server/testers/jupyter/jupyter_tester.py index df987ed4..969d774d 100644 --- a/server/autotest_server/testers/jupyter/jupyter_tester.py +++ b/server/autotest_server/testers/jupyter/jupyter_tester.py @@ -1,7 +1,7 @@ import os import sys import re -from typing import Optional, Type, Dict, IO, List, ContextManager +from typing import Type, Dict, List, ContextManager import pytest import nbformat import tempfile @@ -20,20 +20,18 @@ def __init__( test_file: str, test_file_name: str, result: Dict, - feedback_open: Optional[IO] = None, ): """ Initialize a Jupyter test created by tester. The result was created after running some pytest tests. - Test feedback will be written to feedback_open. """ self._test_name = re.sub(r"^.*?(?=::)", test_file_name, result["name"]) self._file_name = test_file self.description = result.get("description") self.status = result["status"] self.message = result["errors"] - super().__init__(tester, feedback_open) + super().__init__(tester) @property def test_name(self) -> str: @@ -75,7 +73,7 @@ def _run_jupyter_tests(test_file: str) -> List[Dict]: try: sys.stdout = null_out plugin = JupyterPlugin() - pytest.main([test_file], plugins=[plugin]) + pytest.main([test_file], plugins=["notebook_helper.pytest.notebook_collector_plugin", plugin]) results.extend(plugin.results.values()) finally: sys.stdout = sys.__stdout__ @@ -92,11 +90,7 @@ def _merge_ipynb_files(self, test_file: str, submission_file: str) -> ContextMan finally: os.unlink(tempf.name) - def test_merge(self, - test_file: str, - submission_file: str, - feedback_open: Optional[IO], - make_test: bool = False) -> None: + def test_merge(self, test_file: str, submission_file: str, make_test: bool = False) -> None: error = None try: merger.check(test_file, submission_file) @@ -107,7 +101,7 @@ def test_merge(self, result = {"status": "success", "name": "merge_check", "errors": ""} else: result = {"status": "failure", "name": "merge_check", "errors": error} - test = self.test_class(self, test_file, f"{test_file}:{submission_file}", result, feedback_open) + test = self.test_class(self, test_file, f"{test_file}:{submission_file}", result) print(test.run(), flush=True) elif error: sys.stderr.write(error) @@ -118,12 +112,11 @@ def run(self) -> None: """ Runs all tests in this tester. """ - with self.open_feedback() as feedback_open: - for script_files in self.specs["test_data", "script_files"]: - test_file = script_files["test_file"] - submission_file = script_files["student_file"] - self.test_merge(test_file, submission_file, feedback_open, script_files["test_merge"]) - with self._merge_ipynb_files(test_file, submission_file) as merged_file: - for res in self._run_jupyter_tests(merged_file): - test = self.test_class(self, merged_file, f"{test_file}:{submission_file}", res, feedback_open) - print(test.run(), flush=True) + for script_files in self.specs["test_data", "script_files"]: + test_file = script_files["test_file"] + submission_file = script_files["student_file"] + self.test_merge(test_file, submission_file, script_files["test_merge"]) + with self._merge_ipynb_files(test_file, submission_file) as merged_file: + for res in self._run_jupyter_tests(merged_file): + test = self.test_class(self, merged_file, f"{test_file}:{submission_file}", res) + print(test.run(), flush=True) diff --git a/server/autotest_server/testers/jupyter/lib/jupyter_pytest_plugin.py b/server/autotest_server/testers/jupyter/lib/jupyter_pytest_plugin.py index f46b1a15..1824f969 100644 --- a/server/autotest_server/testers/jupyter/lib/jupyter_pytest_plugin.py +++ b/server/autotest_server/testers/jupyter/lib/jupyter_pytest_plugin.py @@ -1,6 +1,4 @@ import pytest -from notebook_helper import importer -import re class JupyterPlugin: @@ -56,57 +54,3 @@ def pytest_collectreport(self, report): "errors": str(report.longrepr), "description": None, } - - def pytest_collect_file(self, parent, path): - if path.ext == ".ipynb": - return IpynbFile.from_parent(parent, fspath=path) - - -class IpynbFile(pytest.File): - TEST_PATTERN = re.compile(r"(?i)^\s*#+\s*(test.*?)\s*$") - - def collect(self): - mod = importer.import_from_path(self.fspath) - setup_cells = [] - for cell in importer.get_cells(mod): - lines = cell.source.splitlines() or [""] # dummy list so the next line works - match = re.match(self.TEST_PATTERN, lines[0]) - if match and match.group(1): - yield IpynbItem.from_parent(self, name=match.group(1), test_cell=cell, setup_cells=setup_cells, mod=mod) - setup_cells = [] - else: - setup_cells.append(cell) - - -class IpynbItem(pytest.Item): - def __init__(self, name, parent, test_cell, setup_cells, mod): - super().__init__(name, parent) - self.test_cell = test_cell - self.setup_cells = setup_cells - self.mod = mod - self._last_cell = None - - def runtest(self) -> None: - for cell in self.setup_cells: - self._last_cell = cell - cell.run() - self._last_cell = self.test_cell - self.test_cell.run() - - @property - def obj(self): - return self.test_cell - - def repr_failure(self, excinfo, style=None): - for tb in reversed(excinfo.traceback): - if excinfo.typename == "SyntaxError" or tb.frame.code.path.startswith(self.mod.__file__): - err_line = tb.lineno - cell_lines = [ - f"-> {l}" if i == err_line else f" {l}" - for i, l in enumerate("".join(self._last_cell.source).splitlines()) - ] - lines = "\n".join(cell_lines) - return f"{lines}\n\n{excinfo.exconly()}" - - def reportinfo(self): - return self.fspath, 0, self.name diff --git a/server/autotest_server/testers/jupyter/requirements.txt b/server/autotest_server/testers/jupyter/requirements.txt index cf477c74..8e31b5a0 100644 --- a/server/autotest_server/testers/jupyter/requirements.txt +++ b/server/autotest_server/testers/jupyter/requirements.txt @@ -1,3 +1,3 @@ -pytest==6.2.1 -psycopg2-binary==2.8.6 +pytest==7.1.2 +psycopg2-binary==2.9.3 git+https://github.com/MarkUsProject/autotest-helpers.git#subdirectory=notebook_helper diff --git a/server/autotest_server/testers/jupyter/settings_schema.json b/server/autotest_server/testers/jupyter/settings_schema.json index 8ce9fa2d..74b98425 100644 --- a/server/autotest_server/testers/jupyter/settings_schema.json +++ b/server/autotest_server/testers/jupyter/settings_schema.json @@ -78,24 +78,16 @@ "type": "integer", "default": 30 }, - "feedback_file_name": { - "title": "Feedback file", - "type": "string" + "feedback_file_names": { + "title": "Feedback files", + "type": "array", + "items": { + "type": "string" + } }, "extra_info": { "$ref": "#/definitions/extra_group_data" } - }, - "dependencies": { - "feedback_file_name": { - "properties": { - "upload_feedback_file": { - "title": "Upload feedback file for grading", - "type": "boolean", - "default": false - } - } - } } } } diff --git a/server/autotest_server/testers/jupyter/setup.py b/server/autotest_server/testers/jupyter/setup.py index 2e3173a5..d4db8b94 100644 --- a/server/autotest_server/testers/jupyter/setup.py +++ b/server/autotest_server/testers/jupyter/setup.py @@ -12,7 +12,7 @@ def create_environment(settings_, env_dir, _default_env_dir): pip = os.path.join(env_dir, "bin", "pip") subprocess.run([f"python{python_version}", "-m", "venv", "--clear", env_dir], check=True) subprocess.run([pip, "install", "-r", requirements, *pip_requirements], check=True) - return {"PYTHON": os.path.join(env_dir, 'bin', 'python3')} + return {"PYTHON": os.path.join(env_dir, "bin", "python3")} def settings(): diff --git a/server/autotest_server/testers/py/py_tester.py b/server/autotest_server/testers/py/py_tester.py index d1be465a..d6777577 100644 --- a/server/autotest_server/testers/py/py_tester.py +++ b/server/autotest_server/testers/py/py_tester.py @@ -1,6 +1,6 @@ import os import unittest -from typing import TextIO, Tuple, Optional, Type, Dict, IO, List +from typing import TextIO, Tuple, Optional, Type, Dict, List from types import TracebackType import pytest import sys @@ -124,20 +124,18 @@ def __init__( tester: "PyTester", test_file: str, result: Dict, - feedback_open: Optional[IO] = None, ): """ Initialize a Python test created by tester. The result was created after running some unittest or pytest tests. - Test feedback will be written to feedback_open. """ self._test_name = result["name"] self._file_name = test_file self.description = result.get("description") self.status = result["status"] self.message = result["errors"] - super().__init__(tester, feedback_open) + super().__init__(tester) @property def test_name(self) -> str: @@ -234,8 +232,7 @@ def run(self) -> None: Runs all tests in this tester. """ results = self.run_python_tests() - with self.open_feedback() as feedback_open: - for test_file, result in results.items(): - for res in result: - test = self.test_class(self, test_file, res, feedback_open) - print(test.run(), flush=True) + for test_file, result in results.items(): + for res in result: + test = self.test_class(self, test_file, res) + print(test.run(), flush=True) diff --git a/server/autotest_server/testers/py/requirements.txt b/server/autotest_server/testers/py/requirements.txt index 5364e879..bd888cdf 100644 --- a/server/autotest_server/testers/py/requirements.txt +++ b/server/autotest_server/testers/py/requirements.txt @@ -1,2 +1,2 @@ -pytest==6.2.1 -psycopg2-binary==2.8.6 \ No newline at end of file +pytest==7.1.2 +psycopg2-binary==2.9.3 diff --git a/server/autotest_server/testers/py/settings_schema.json b/server/autotest_server/testers/py/settings_schema.json index 9600e48b..dfb9bb2b 100644 --- a/server/autotest_server/testers/py/settings_schema.json +++ b/server/autotest_server/testers/py/settings_schema.json @@ -72,24 +72,18 @@ ], "default": "pytest" }, - "feedback_file_name": { - "title": "Feedback file", - "type": "string" + "feedback_file_names": { + "title": "Feedback files", + "type": "array", + "items": { + "type": "string" + } }, "extra_info": { "$ref": "#/definitions/extra_group_data" } }, "dependencies": { - "feedback_file_name": { - "properties": { - "upload_feedback_file": { - "title": "Upload feedback file for grading", - "type": "boolean", - "default": false - } - } - }, "tester": { "oneOf": [ { diff --git a/server/autotest_server/testers/py/setup.py b/server/autotest_server/testers/py/setup.py index 2e3173a5..d4db8b94 100644 --- a/server/autotest_server/testers/py/setup.py +++ b/server/autotest_server/testers/py/setup.py @@ -12,7 +12,7 @@ def create_environment(settings_, env_dir, _default_env_dir): pip = os.path.join(env_dir, "bin", "pip") subprocess.run([f"python{python_version}", "-m", "venv", "--clear", env_dir], check=True) subprocess.run([pip, "install", "-r", requirements, *pip_requirements], check=True) - return {"PYTHON": os.path.join(env_dir, 'bin', 'python3')} + return {"PYTHON": os.path.join(env_dir, "bin", "python3")} def settings(): diff --git a/server/autotest_server/testers/pyta/pyta_tester.py b/server/autotest_server/testers/pyta/pyta_tester.py index 41cdb7c9..c22ce109 100644 --- a/server/autotest_server/testers/pyta/pyta_tester.py +++ b/server/autotest_server/testers/pyta/pyta_tester.py @@ -2,15 +2,14 @@ import sys import io import json -from typing import Optional, IO, Type, Dict, List +from typing import Type, Dict, List import python_ta from ..tester import Tester, Test from ..specs import TestSpecs -class PytaReporter(python_ta.reporters.json_reporter.JSONReporter, - python_ta.reporters.plain_reporter.PlainReporter): +class PytaReporter(python_ta.reporters.json_reporter.JSONReporter, python_ta.reporters.plain_reporter.PlainReporter): pass @@ -23,15 +22,13 @@ def __init__( tester: "PytaTester", student_file_path: str, max_points: int, - feedback_open: Optional[IO] = None, ) -> None: """ Initialize a Python TA test that checks the student_file_path file, - removes 1 point per error from a possible max_points total, and - writes results to feedback_open. + removes 1 point per error from a possible max_points total. """ self.student_file = student_file_path - super().__init__(tester, feedback_open) + super().__init__(tester) self.points_total = max_points self.annotations = [] @@ -45,8 +42,8 @@ def add_annotations(self, data: List[Dict]) -> None: Records annotations from the results extracted from reporter. """ for result in data: - if result.get('filename') == self.student_file: - for msg in result['msgs']: + if result.get("filename") == self.student_file: + for msg in result["msgs"]: self.annotations.append( { "filename": result["filename"], @@ -67,35 +64,39 @@ def run(self) -> str: """ Return a json string containing all test result information. """ + if not os.path.exists(self.student_file): + return self.error(message=f"File does not exist: {self.student_file}") tmp_stderr = io.StringIO() try: sys.stderr = tmp_stderr - with open(os.devnull, 'w') as devnull: + with open(os.devnull, "w") as devnull: sys.stdout = devnull reporter = python_ta.check_all(self.student_file, config=self.tester.pyta_config) tmp_stdout = io.StringIO() reporter.out = tmp_stdout reporter.display_messages(None) - if self.feedback_open: - reporter.out = self.feedback_open - reporter.print_messages() + + report_stdout = io.StringIO() + reporter.out = report_stdout + reporter.print_messages() finally: sys.stderr = sys.__stderr__ sys.stdout = sys.__stdout__ tmp_stdout.seek(0) + report_stdout.seek(0) try: data = json.load(tmp_stdout) except json.JSONDecodeError: tmp_stderr.seek(0) tmp_stdout.seek(0) self.annotations = [] - return self.error(message=f'{tmp_stderr.read()}\n\n{tmp_stdout.read()}') + return self.error(message=f"{tmp_stderr.read()}\n\n{tmp_stdout.read()}") self.add_annotations(data) num_messages = len(self.annotations) points_earned = max(0, self.points_total - num_messages) - message = self.ERROR_MSGS["reported"].format(num_messages) if num_messages > 0 else "" + message = report_stdout.read() return self.done(points_earned, message) @@ -109,8 +110,7 @@ def __init__(self, specs: TestSpecs, test_class: Type[PytaTest] = PytaTest): This tester will create tests of type test_class. """ super().__init__(specs, test_class) - self.feedback_file = self.specs.get("test_data", "feedback_file_name") - self.annotation_file = self.specs.get("test_data", "annotation_file") + self.upload_annotations = self.specs.get("test_data", "upload_annotations") self.pyta_config = self.update_pyta_config() self.annotations = [] @@ -136,21 +136,18 @@ def update_pyta_config(self) -> Dict: def after_tester_run(self) -> None: """ - Write annotations extracted from the test results to the - annotation_file. + Write annotations extracted from the test results to stdout. """ - if self.annotation_file and self.annotations: - with open(self.annotation_file, "w") as annotations_open: - json.dump(self.annotations, annotations_open) + if self.upload_annotations: + print(self.test_class.format_annotations(self.annotations)) @Tester.run_decorator def run(self) -> None: """ Runs all tests in this tester. """ - with self.open_feedback(self.feedback_file) as feedback_open: - for test_data in self.specs.get("test_data", "student_files", default=[]): - student_file_path = test_data["file_path"] - max_points = test_data.get("max_points", 10) - test = self.test_class(self, student_file_path, max_points, feedback_open) - print(test.run()) + for test_data in self.specs.get("test_data", "student_files", default=[]): + student_file_path = test_data["file_path"] + max_points = test_data.get("max_points", 10) + test = self.test_class(self, student_file_path, max_points) + print(test.run()) diff --git a/server/autotest_server/testers/pyta/settings_schema.json b/server/autotest_server/testers/pyta/settings_schema.json index 9330bee9..816b1716 100644 --- a/server/autotest_server/testers/pyta/settings_schema.json +++ b/server/autotest_server/testers/pyta/settings_schema.json @@ -76,38 +76,20 @@ "type": "integer", "default": 90 }, - "feedback_file_name": { - "title": "Feedback file", - "type": "string" + "feedback_file_names": { + "title": "Feedback files", + "type": "array", + "items": { + "type": "string" + } }, "upload_annotations": { - "title": "Upload annotations", - "type": "boolean", - "default": false + "title": "Upload Annotations", + "type": "boolean" }, "extra_info": { "$ref": "#/definitions/extra_group_data" } - }, - "dependencies": { - "feedback_file_name": { - "properties": { - "upload_feedback_file": { - "title": "Upload feedback file for grading", - "type": "boolean", - "default": false - } - } - }, - "upload_annotations": { - "properties": { - "annotation_file": { - "title": "Annotation file", - "type": "string", - "default": "annotations.yml" - } - } - } } } } diff --git a/server/autotest_server/testers/pyta/setup.py b/server/autotest_server/testers/pyta/setup.py index 2e3173a5..d4db8b94 100644 --- a/server/autotest_server/testers/pyta/setup.py +++ b/server/autotest_server/testers/pyta/setup.py @@ -12,7 +12,7 @@ def create_environment(settings_, env_dir, _default_env_dir): pip = os.path.join(env_dir, "bin", "pip") subprocess.run([f"python{python_version}", "-m", "venv", "--clear", env_dir], check=True) subprocess.run([pip, "install", "-r", requirements, *pip_requirements], check=True) - return {"PYTHON": os.path.join(env_dir, 'bin', 'python3')} + return {"PYTHON": os.path.join(env_dir, "bin", "python3")} def settings(): diff --git a/server/autotest_server/testers/r/lib/r_tester_setup.R b/server/autotest_server/testers/r/lib/r_tester_setup.R index 230c54d6..e5d8ab8f 100644 --- a/server/autotest_server/testers/r/lib/r_tester_setup.R +++ b/server/autotest_server/testers/r/lib/r_tester_setup.R @@ -49,7 +49,7 @@ install_dep <- function(row) { if (!is.na(remote_type)) { install_func <- getFromNamespace(paste("install_", remote_type, sep = ""), "remotes") - install_func(name) + name <- install_func(name) # install_func returns the package name } else if (!is.na(version)) { install_version(name, version = paste(compare, version, sep =" ")) } else { diff --git a/server/autotest_server/testers/r/r_tester.py b/server/autotest_server/testers/r/r_tester.py index f3721310..641ca76b 100644 --- a/server/autotest_server/testers/r/r_tester.py +++ b/server/autotest_server/testers/r/r_tester.py @@ -1,7 +1,7 @@ import subprocess import os import json -from typing import Dict, Optional, IO, Type, List, Union +from typing import Dict, Type, List, Union from ..tester import Tester, Test, TestError from ..specs import TestSpecs @@ -13,17 +13,15 @@ def __init__( tester: "RTester", test_file: str, result: Dict, - feedback_open: Optional[IO] = None, ) -> None: """ Initialize a R test created by tester. - The result was created after running the tests in test_file and test feedback - will be written to feedback_open. + The result was created after running the tests in test_file. """ - self._test_name = ':'.join(info for info in [test_file, result.get('context'), result['test']] if info) + self._test_name = ":".join(info for info in [test_file, result.get("context"), result["test"]] if info) self.result = result["results"] - super().__init__(tester, feedback_open) + super().__init__(tester) self.points_total = 0 @property @@ -45,7 +43,7 @@ def run(self): elif result["type"] == "expectation_error": error = True self.points_total += 1 - messages.append('\n'.join(result["trace"])) + messages.append("\n".join(result["trace"])) message = "\n\n".join(messages) if error: @@ -76,14 +74,16 @@ def run_r_tests(self) -> Dict[str, List[Dict[str, Union[int, str]]]]: Return test results for each test file. Results contain a list of parsed test results. """ results = {} - r_tester = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib', 'r_tester.R') + r_tester = os.path.join(os.path.dirname(os.path.realpath(__file__)), "lib", "r_tester.R") for test_file in self.specs["test_data", "script_files"]: - proc = subprocess.run(['Rscript', r_tester, test_file], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - # NO_COLOR is used to ensure R tracebacks are printed without ANSI color codes - env={**os.environ, 'NO_COLOR': '1'}) + proc = subprocess.run( + ["Rscript", r_tester, test_file], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + # NO_COLOR is used to ensure R tracebacks are printed without ANSI color codes + env={**os.environ, "NO_COLOR": "1"}, + ) if not results.get(test_file): results[test_file] = [] if proc.returncode == 0: @@ -101,8 +101,7 @@ def run(self) -> None: results = self.run_r_tests() except subprocess.CalledProcessError as e: raise TestError(e.stderr) from e - with self.open_feedback() as feedback_open: - for test_file, result in results.items(): - for res in result: - test = self.test_class(self, test_file, res, feedback_open) - print(test.run(), flush=True) + for test_file, result in results.items(): + for res in result: + test = self.test_class(self, test_file, res) + print(test.run(), flush=True) diff --git a/server/autotest_server/testers/r/settings_schema.json b/server/autotest_server/testers/r/settings_schema.json index 42561fc3..a1810d60 100644 --- a/server/autotest_server/testers/r/settings_schema.json +++ b/server/autotest_server/testers/r/settings_schema.json @@ -53,24 +53,16 @@ "type": "integer", "default": 30 }, - "feedback_file_name": { - "title": "Feedback file", - "type": "string" + "feedback_file_names": { + "title": "Feedback files", + "type": "array", + "items": { + "type": "string" + } }, "extra_info": { "$ref": "#/definitions/extra_group_data" } - }, - "dependencies": { - "feedback_file_name": { - "properties": { - "upload_feedback_file": { - "title": "Upload feedback file for grading", - "type": "boolean", - "default": false - } - } - } } } } diff --git a/server/autotest_server/testers/r/setup.py b/server/autotest_server/testers/r/setup.py index b3e11f4b..729808ca 100644 --- a/server/autotest_server/testers/r/setup.py +++ b/server/autotest_server/testers/r/setup.py @@ -9,10 +9,10 @@ def create_environment(settings_, env_dir, default_env_dir): os.makedirs(env_dir, exist_ok=True) env = {"R_LIBS_SITE": env_dir, "R_LIBS_USER": env_dir} - r_tester_setup = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib', 'r_tester_setup.R') - subprocess.run(['Rscript', r_tester_setup, req_string], env={**os.environ, **env}, check=True) + r_tester_setup = os.path.join(os.path.dirname(os.path.realpath(__file__)), "lib", "r_tester_setup.R") + subprocess.run(["Rscript", r_tester_setup, req_string], env={**os.environ, **env}, check=True) - return {**env, "PYTHON": os.path.join(default_env_dir, 'bin', 'python3')} + return {**env, "PYTHON": os.path.join(default_env_dir, "bin", "python3")} def settings(): diff --git a/server/autotest_server/testers/racket/racket_tester.py b/server/autotest_server/testers/racket/racket_tester.py index 2b045d1b..d579baa6 100644 --- a/server/autotest_server/testers/racket/racket_tester.py +++ b/server/autotest_server/testers/racket/racket_tester.py @@ -1,22 +1,21 @@ import json import subprocess import os -from typing import Dict, Optional, IO, Type +from typing import Dict, Type from ..tester import Tester, Test, TestError class RacketTest(Test): - def __init__(self, tester: "RacketTester", result: Dict, feedback_open: Optional[IO] = None) -> None: + def __init__(self, tester: "RacketTester", result: Dict) -> None: """ Initialize a racket test created by tester. - The result was created after running the tests in test_file and test feedback - will be written to feedback_open. + The result was created after running the tests in test_file. """ self._test_name = result["name"] self.status = result["status"] self.message = result["message"] - super().__init__(tester, feedback_open) + super().__init__(tester) @property def test_name(self) -> None: @@ -78,14 +77,13 @@ def run(self) -> None: results = self.run_racket_test() except subprocess.CalledProcessError as e: raise TestError(e.stderr) from e - with self.open_feedback() as feedback_open: - for test_file, result in results.items(): - if result.strip(): - try: - test_results = json.loads(result) - except json.JSONDecodeError as e: - msg = RacketTester.ERROR_MSGS["bad_json"].format(result) - raise TestError(msg) from e - for t_result in test_results: - test = self.test_class(self, t_result, feedback_open) - print(test.run(), flush=True) + for test_file, result in results.items(): + if result.strip(): + try: + test_results = json.loads(result) + except json.JSONDecodeError as e: + msg = RacketTester.ERROR_MSGS["bad_json"].format(result) + raise TestError(msg) from e + for t_result in test_results: + test = self.test_class(self, t_result) + print(test.run(), flush=True) diff --git a/server/autotest_server/testers/racket/settings_schema.json b/server/autotest_server/testers/racket/settings_schema.json index 6bdc1790..eaa9b94a 100644 --- a/server/autotest_server/testers/racket/settings_schema.json +++ b/server/autotest_server/testers/racket/settings_schema.json @@ -51,24 +51,16 @@ "type": "integer", "default": 30 }, - "feedback_file_name": { - "title": "Feedback file", - "type": "string" + "feedback_file_names": { + "title": "Feedback files", + "type": "array", + "items": { + "type": "string" + } }, "extra_info": { "$ref": "#/definitions/extra_group_data" } - }, - "dependencies": { - "feedback_file_name": { - "properties": { - "upload_feedback_file": { - "title": "Upload feedback file for grading", - "type": "boolean", - "default": false - } - } - } } } } diff --git a/server/autotest_server/testers/racket/setup.py b/server/autotest_server/testers/racket/setup.py index 16ee50a2..a80bcc6a 100644 --- a/server/autotest_server/testers/racket/setup.py +++ b/server/autotest_server/testers/racket/setup.py @@ -4,7 +4,7 @@ def create_environment(_settings, _env_dir, default_env_dir): - return {"PYTHON": os.path.join(default_env_dir, 'bin', 'python3')} + return {"PYTHON": os.path.join(default_env_dir, "bin", "python3")} def settings(): diff --git a/server/autotest_server/testers/tester.py b/server/autotest_server/testers/tester.py index 53c910e0..6658d68f 100644 --- a/server/autotest_server/testers/tester.py +++ b/server/autotest_server/testers/tester.py @@ -1,8 +1,7 @@ -from contextlib import contextmanager import json from abc import ABC, abstractmethod from functools import wraps -from typing import Optional, IO, Callable, Any, Type, Generator +from typing import Optional, Callable, Any, Type, Dict, List from testers.specs import TestSpecs import traceback @@ -20,13 +19,12 @@ class Status: ERROR_ALL: str = "error_all" @abstractmethod - def __init__(self, tester: "Tester", feedback_open: Optional[IO] = None) -> None: + def __init__(self, tester: "Tester") -> None: """Initialize a Test""" self.tester = tester self.points_total = self.get_total_points() if self.points_total <= 0: raise ValueError("The test total points must be > 0") - self.feedback_open = feedback_open @property @abstractmethod @@ -91,34 +89,14 @@ def format(self, status: str, output: str, points_earned: int) -> str: """ return Test.format_result(self.test_name, status, output, points_earned, self.points_total) - def add_feedback( - self, - status: str, - feedback: str = "", - oracle_solution: Optional[str] = None, - test_solution: Optional[str] = None, - ) -> None: + @staticmethod + def format_annotations(annotation_data: List[Dict[str, Any]]) -> str: """ - Adds the feedback of this test to the feedback file. - :param status: A member of Test.Status. - :param feedback: The feedback, can be None. - :param oracle_solution: The expected solution, can be None. - :param test_solution: The test solution, can be None. - """ - # TODO Reconcile with format: return both, or print both - if self.feedback_open is None: - raise ValueError("No feedback file enabled") - self.feedback_open.write("========== {}: {} ==========\n\n".format(self.test_name, status.upper())) - if feedback: - self.feedback_open.write("## Feedback: {}\n\n".format(feedback)) - if status != self.Status.PASS: - if oracle_solution: - self.feedback_open.write("## Expected Solution:\n\n") - self.feedback_open.write(oracle_solution) - if test_solution: - self.feedback_open.write("## Your Solution:\n\n") - self.feedback_open.write(test_solution) - self.feedback_open.write("\n") + Formats annotation data. + :param annotation_data: a dictionary containing annotation data + :return a json string representation of the annotation data. + """ + return json.dumps({"annotations": annotation_data}) def passed_with_bonus(self, points_bonus: int, message: str = "") -> str: """ @@ -135,8 +113,6 @@ def passed_with_bonus(self, points_bonus: int, message: str = "") -> str: output=message, points_earned=self.points_total + points_bonus, ) - if self.feedback_open: - self.add_feedback(status=self.Status.PASS) return result def passed(self, message: str = "") -> str: @@ -146,23 +122,13 @@ def passed(self, message: str = "") -> str: :return The formatted passed test. """ result = self.format(status=self.Status.PASS, output=message, points_earned=self.points_total) - if self.feedback_open: - self.add_feedback(status=self.Status.PASS) return result - def partially_passed( - self, - points_earned: int, - message: str, - oracle_solution: Optional[str] = None, - test_solution: Optional[str] = None, - ) -> str: + def partially_passed(self, points_earned: int, message: str) -> str: """ Partially passes this test with some points earned. If a feedback file is enabled, adds feedback to it. :param points_earned: The points earned by the test, must be an int > 0 and < the test total points. :param message: The message explaining why the test was not fully passed, will be shown as test output. - :param oracle_solution: The optional correct solution to be added to the feedback file. - :param test_solution: The optional student solution to be added to the feedback file. :return The formatted partially passed test. """ if points_earned <= 0: @@ -170,45 +136,21 @@ def partially_passed( if points_earned >= self.points_total: raise ValueError("The test points earned must be < the test total points") result = self.format(status=self.Status.PARTIAL, output=message, points_earned=points_earned) - if self.feedback_open: - self.add_feedback( - status=self.Status.PARTIAL, - feedback=message, - oracle_solution=oracle_solution, - test_solution=test_solution, - ) return result def failed( self, message: str, - oracle_solution: Optional[str] = None, - test_solution: Optional[str] = None, ) -> str: """ Fails this test with 0 points earned. If a feedback file is enabled, adds feedback to it. :param message: The failure message, will be shown as test output. - :param oracle_solution: The optional correct solution to be added to the feedback file. - :param test_solution: The optional student solution to be added to the feedback file. :return The formatted failed test. """ result = self.format(status=self.Status.FAIL, output=message, points_earned=0) - if self.feedback_open: - self.add_feedback( - status=self.Status.FAIL, - feedback=message, - oracle_solution=oracle_solution, - test_solution=test_solution, - ) return result - def done( - self, - points_earned: int, - message: str = "", - oracle_solution: Optional[str] = None, - test_solution: Optional[str] = None, - ) -> str: + def done(self, points_earned: int, message: str = "") -> str: """ Passes, partially passes or fails this test depending on the points earned. If the points are <= 0 this test is failed with 0 points earned, if the points are >= test total points this test is passed earning the test total @@ -216,19 +158,17 @@ def done( feedback to it. :param points_earned: The points earned by the test. :param message: The optional message explaining the test outcome, will be shown as test output. - :param oracle_solution: The optional correct solution to be added to the feedback file. - :param test_solution: The optional student solution to be added to the feedback file. :return The formatted test. """ if points_earned <= 0: - return self.failed(message, oracle_solution, test_solution) + return self.failed(message) elif points_earned == self.points_total: return self.passed(message) elif points_earned > self.points_total: points_bonus = points_earned - self.points_total return self.passed_with_bonus(points_bonus, message) else: - return self.partially_passed(points_earned, message, oracle_solution, test_solution) + return self.partially_passed(points_earned, message) def error(self, message: str) -> str: """ @@ -237,8 +177,6 @@ def error(self, message: str) -> str: :return The formatted erred test. """ result = self.format(status=self.Status.ERROR, output=message, points_earned=0) - if self.feedback_open: - self.add_feedback(status=self.Status.ERROR, feedback=message) return result def before_test_run(self) -> None: @@ -352,26 +290,6 @@ def run_func_wrapper(self, *args: Any, **kwargs: Any) -> None: return run_func_wrapper - @contextmanager - def open_feedback(self, filename: Optional[str] = None, mode: str = "w") -> Generator[Optional[IO], None, None]: - """ - Yields an open file object, opened in mode if it exists, - otherwise it yields None. - - If is None, the feedback_file_name from self.specs is - used. - """ - if filename is None: - filename = self.specs.get("test_data", "feedback_file_name") - if filename: - feedback_open = open(filename, mode) - try: - yield feedback_open - finally: - feedback_open.close() - else: - yield None - @abstractmethod def run(self) -> None: """ diff --git a/server/requirements.txt b/server/requirements.txt index 47d1759e..2b5a108c 100644 --- a/server/requirements.txt +++ b/server/requirements.txt @@ -1,8 +1,8 @@ -rq==1.8.0 -click<8.0 -redis==3.5.3 -pyyaml==5.4.1 -jsonschema==3.2.0 -requests==2.25.1 -psycopg2-binary==2.8.6 -supervisor==4.2.1 +rq==1.10.1 +click==8.1.3 +redis==4.3.4 +pyyaml==6.0 +jsonschema==4.7.2 +requests==2.28.1 +psycopg2-binary==2.9.3 +supervisor==4.2.4 diff --git a/server/start_stop.py b/server/start_stop.py index c72e1a0e..b5a09f52 100644 --- a/server/start_stop.py +++ b/server/start_stop.py @@ -1,5 +1,9 @@ import argparse +import json import os +import shutil +import time +import redis import sys import signal import subprocess @@ -11,6 +15,8 @@ _SUPERVISORD = os.path.join(os.path.dirname(sys.executable), "supervisord") _RQ = os.path.join(os.path.dirname(sys.executable), "rq") +SECONDS_PER_DAY = 86400 + HEADER = f"""[supervisord] [supervisorctl] @@ -38,6 +44,10 @@ """ +def redis_connection() -> redis.Redis: + return redis.Redis.from_url(config["redis_url"], decode_responses=True) + + def create_enqueuer_wrapper(): with open(_CONF_FILE, "w") as f: f.write(HEADER) @@ -71,6 +81,23 @@ def stat(extra_args): subprocess.run([_RQ, "info", "--url", config["redis_url"], *extra_args], check=True) +def clean(age, dry_run): + for settings_id, settings in dict(redis_connection().hgetall("autotest:settings") or {}).items(): + settings = json.loads(settings) + last_access_timestamp = settings.get("_last_access") + access = int(time.time() - (last_access_timestamp or 0)) + if last_access_timestamp is None or (access > (age * SECONDS_PER_DAY)): + dir_path = os.path.join(config["workspace"], "scripts", str(settings_id)) + if dry_run and os.path.isdir(dir_path): + last_access = "UNKNOWN" if last_access_timestamp is None else access // SECONDS_PER_DAY + print(f"{dir_path} -> last accessed {last_access or '< 1'} days ago") + else: + settings["_error"] = "the settings for this test have expired, please re-upload the settings." + redis_connection().hset("autotest:settings", key=settings_id, value=json.dumps(settings)) + if os.path.isdir(dir_path): + shutil.rmtree(dir_path) + + if __name__ == "__main__": parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest="command") @@ -79,6 +106,14 @@ def stat(extra_args): subparsers.add_parser("stop", help="stop the autotester") subparsers.add_parser("restart", help="restart the autotester") subparsers.add_parser("stat", help="display current status of the autotester queues") + clean_parser = subparsers.add_parser("clean", help="clean up old/unused test scripts") + + clean_parser.add_argument( + "-a", "--age", default=0, type=int, help="clean up tests older than in days. Default=0" + ) + clean_parser.add_argument( + "-d", "--dry-run", action="store_true", help="list files that will be deleted without actually removing them" + ) args, remainder = parser.parse_known_args() if args.command == "start": @@ -90,3 +125,5 @@ def stat(extra_args): start(remainder) elif args.command == "stat": stat(remainder) + elif args.command == "clean": + clean(args.age, args.dry_run)