Skip to content

Commit

Permalink
feat: support testing shell scripts using a configuration file (#15)
Browse files Browse the repository at this point in the history
Support testing shell scripts using  test_config.toml and test.py.

The configuration file specifies:
- The target shell script to be tested
- The `diff` command for output comparison
- Test cases with name, args, and test directory (expected output)

The test.py program runs all defined tests and provides a summarized result.

Refactor the test framework (test_util.py):
- Outputs are now logged directly to a file
- execute(), verify(), and _execute_command(): change the return value from None to bool
- _execute_command(): print the log file path when an error is detected

Usage:

1. Define test cases in test_config.toml
2. Run `test/test.py`
  • Loading branch information
Oreoxmt authored Sep 21, 2023
1 parent f992c91 commit 48c6350
Show file tree
Hide file tree
Showing 6 changed files with 173 additions and 46 deletions.
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,8 @@
# Temporary files
temp

# Environment variables used in tests
.env

.DS_Store
.idea
1 change: 1 addition & 0 deletions test/.gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
__pycache__
actual
*.log
21 changes: 0 additions & 21 deletions test/sync_scaffold/test_sync_scaffold.py

This file was deleted.

125 changes: 125 additions & 0 deletions test/test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
import os
import time
import tomllib

from dataclasses import dataclass
from typing import Dict, List

from tqdm import tqdm

from test_util import DocSitePreviewTest

ENV_FILE: str = ".env"
CONFIG_FILE: str = "test_config.toml"


@dataclass
class TestReport:
start_time: float
end_time: float
success_tests: List[str]
failed_tests: List[str]


@dataclass
class TestCase:
name: str
args: str
directory: str


@dataclass
class TestConfig:
diff_command: str
test_target: str
test_cases: List[TestCase]


class TestRunner:
def __init__(self):
self.tests = self._load_config()
self.report = TestReport(
start_time=time.time(), end_time=time.time(),
success_tests=[], failed_tests=[])
self._env = self._load_env()

@staticmethod
def _load_config() -> List[TestConfig]:
"""
Load test config from test_config.toml.
"""
with open(CONFIG_FILE, "rb") as f:
data = tomllib.load(f)
config = []
for _, test in data.items():
test_cases = [TestCase(**case) for case in test["test_cases"]]
config.append(TestConfig(
diff_command=test["diff_command"],
test_target=test["test_target"],
test_cases=test_cases))
return config

@staticmethod
def _load_env() -> Dict[str, str]:
"""
Load environment variables from .env file.
"""
env = os.environ.copy()
with open(ENV_FILE, "rb") as f:
for line in f:
key, value = line.decode("utf-8").strip().split("=")
env[key] = value
return env

def run(self) -> None:
"""
Run test cases based on given configuration and environment variables.
"""
print(f"Running Tests...")

for config in self.tests:
script_name = config.test_target
diff_command = config.diff_command

for case in tqdm(config.test_cases):
case_name = case.name
feature_dir = os.path.dirname(case_name)
test_dir = os.path.abspath(case.directory)
script_args = case.args

test = DocSitePreviewTest(test_dir, feature_dir, script_name)

if test.execute(args=script_args, env=self._env) and test.verify(diff_command):
self.report.success_tests.append(case_name)
else:
self.report.failed_tests.append(case_name)

self.report.end_time = time.time()

def analyze(self) -> str:
"""
Analyze test results and generate a report.
"""
terminal_width = os.get_terminal_size().columns
hyphens = "-" * ((terminal_width - len("Test Results")) // 2)
duration = self.report.end_time - self.report.start_time

success_count = len(self.report.success_tests)
failed_count = len(self.report.failed_tests)
total_count = success_count + failed_count

result = f"{hyphens}Test Results{hyphens}\n"
for test in self.report.success_tests:
result += f"✅ Test {test} passed successfully\n"
for test in self.report.failed_tests:
result += f"❌ Test {test} failed\n"
result += f"Tests passed: {success_count} of {total_count} {duration:.2f}s\n"
result += "-" * terminal_width
return result


if __name__ == "__main__":
runner = TestRunner()
runner.run()
conclusion = runner.analyze()
print(conclusion)
56 changes: 31 additions & 25 deletions test/test_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,72 +2,78 @@
import shlex
import shutil
import subprocess
import time

from typing import Dict, List


class DocSitePreviewTest:

def __init__(self, test_dir: str, feature_dir: str, script_name: str):
self.test_dir = test_dir
self.feature_dir = feature_dir
self.script_name = script_name
self.test_dir = test_dir

self.test_output = os.path.join(self.test_dir, "actual")
self.test_feature_path = os.path.join(self.test_output, self.script_name)
self.test_script = os.path.join(self.test_output, self.script_name)

self._setup_test_env()

def _setup_test_env(self):
def _setup_test_env(self) -> None:
"""
Generate the test environment for execution.
1. Clean up the test environment.
2. Copy the target script to the test environment.
"""
self._clean_up()
self._copy_and_setup_script()
self._clean()
self._copy_setup_script()

def _clean_up(self):
def _clean(self) -> None:
"""
Clean up the test environment.
"""
if os.path.exists(self.test_output):
shutil.rmtree(self.test_output)
os.makedirs(self.test_output, exist_ok=True)

def _copy_and_setup_script(self):
def _copy_setup_script(self) -> None:
"""
Copy the script to the test environment.
"""
shutil.copy(os.path.join(self.feature_dir, self.script_name), self.test_feature_path)
self._make_script_executable(self.test_feature_path)
shutil.copy(os.path.join(self.feature_dir, self.script_name), self.test_script)
self._make_executable(self.test_script)

@staticmethod
def _make_script_executable(script: str):
def _make_executable(script: str) -> None:
"""
Make the script executable (chmod +x).
"""
os.chmod(script, 0o755)

def execute(self, args: str = "", env: dict | None = None):
def execute(self, args: str = "", env: Dict[str, str] | None = None) -> bool:
"""
Execute the feature command.
"""
command_str = self.test_feature_path + " " + args
command_list = shlex.split(command_str)
self._execute_command(command_list, self.test_output, env)
command = f"{self.test_script} {args}"
return self._execute_command(shlex.split(command), self.test_output, "execute", env)

@staticmethod
def _execute_command(command, cwd, env=None):
def _execute_command(command: List[str], cwd: str, task: str, env: Dict[str, str] | None = None) -> bool:
"""
Execute a command and check its exit code.
Raise an exception if the command does not return 0.
Execute a command and log the output to *.log.
Returns:
bool: True if the command is executed successfully, False otherwise.
"""
process = subprocess.Popen(command, cwd=cwd, env=env)
code = process.wait()
if code != 0:
raise Exception("Error: command returned code {}".format(code))
log_path = os.path.join(cwd, f"{task}_{int(time.time())}.log")
with open(log_path, "w") as f:
result = subprocess.run(command, stdout=f, stderr=f, text=True, cwd=cwd, env=env)
if result.returncode != 0:
print(f"🐛 Error detected! Log available at: {log_path}")
return False
return True

def verify(self, command: str = "diff -r data actual"):
def verify(self, command: str = "diff -r data actual") -> bool:
"""
Use diff command to compare the expected output (data) and the actual output.
"""
args = shlex.split(command)
self._execute_command(args, self.test_dir)
print("Test {} passed successfully".format(self.script_name))
return self._execute_command(shlex.split(command), self.test_dir, "verify")
10 changes: 10 additions & 0 deletions test_config.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[sync_scaffold]

diff_command = "diff -qrs data actual --exclude temp --exclude '*.log' --exclude sync_scaffold.sh"
test_target = "sync_scaffold.sh"

[[sync_scaffold.test_cases]]

name = "Sync scaffold from a commit"
args = "265874160aec258f9c725b0e940bc803ca558bda"
directory = "test/sync_scaffold/"

0 comments on commit 48c6350

Please sign in to comment.