Skip to content

Commit

Permalink
Splitting test class into unit and integration tests
Browse files Browse the repository at this point in the history
  • Loading branch information
pseewald committed May 4, 2024
1 parent 1576631 commit 1303fe9
Show file tree
Hide file tree
Showing 5 changed files with 110 additions and 108 deletions.
2 changes: 1 addition & 1 deletion fortran_tests/testsuites.config
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
suite: builtin
options:

[UnitTests]
[Examples]
obtain: shutil.copytree('../../examples/in', 'examples', dirs_exist_ok=True)
path: examples
suite: builtin
Expand Down
108 changes: 97 additions & 11 deletions fprettify/tests/fortrantests.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
###############################################################################


import sys
import hashlib
import logging
import io
Expand All @@ -28,12 +29,97 @@
import configparser
import shutil
import shlex
from datetime import datetime
import fprettify
from fprettify.tests.test_common import TEST_MAIN_DIR, TEST_EXT_DIR, BACKUP_DIR, RESULT_DIR, RESULT_FILE, FAILED_FILE, FprettifyTestCase, joinpath
from fprettify.tests.test_common import _MYPATH, FprettifyTestCase, joinpath

_TIMESTAMP = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')

# main directory for running tests
TEST_MAIN_DIR = joinpath(_MYPATH, r'../../fortran_tests')

# directory for external Fortran code
TEST_EXT_DIR = joinpath(TEST_MAIN_DIR, r'test_code')

# directory containing Fortran examples
EXAMPLE_DIR = joinpath(_MYPATH, r'../../examples/in')

# backup directory
BACKUP_DIR = joinpath(TEST_MAIN_DIR, r'test_code_in_' + _TIMESTAMP)

# where to store summarized results
RESULT_DIR = joinpath(TEST_MAIN_DIR, r'test_results')

# expected hash-sums
RESULT_FILE = joinpath(RESULT_DIR, r'expected_results')

# test failures
FAILED_FILE = joinpath(RESULT_DIR, r'failed_results')


fprettify.set_fprettify_logger(logging.ERROR)

class FprettifyIntegrationTestCase(FprettifyTestCase):
def shortDescription(self):
"""don't print doc string of testmethod"""
return None

def setUp(self):
"""
setUp to be recognized by unittest.
We have large files to compare, raise the limit
"""
self.maxDiff = None

@classmethod
def setUpClass(cls):
"""
setUpClass to be recognized by unittest.
"""

cls.n_success = 0
cls.n_parsefail = 0
cls.n_internalfail = 0
cls.n_unexpectedfail = 0

FprettifyIntegrationTestCase.eprint("-" * 70)
FprettifyIntegrationTestCase.eprint("recognized Fortran files")
FprettifyIntegrationTestCase.eprint(", ".join(fprettify.FORTRAN_EXTENSIONS))
FprettifyIntegrationTestCase.eprint("-" * 70)
FprettifyIntegrationTestCase.eprint("Applying fprettify to Fortran files in " + TEST_EXT_DIR)
FprettifyIntegrationTestCase.eprint("Writing backup of original files to " + BACKUP_DIR)
FprettifyIntegrationTestCase.eprint("Storing expected results in " + RESULT_FILE)
FprettifyIntegrationTestCase.eprint("Storing failed results in " + FAILED_FILE)
FprettifyIntegrationTestCase.eprint("-" * 70)

@classmethod
def tearDownClass(cls):
"""
tearDownClass to be recognized by unittest. Used for test summary
output.
"""
if cls.n_parsefail + cls.n_internalfail > 0:
format = "{:<20}{:<6}"
FprettifyIntegrationTestCase.eprint('\n' + "=" * 70)
FprettifyIntegrationTestCase.eprint("IGNORED errors: invalid or old Fortran")
FprettifyIntegrationTestCase.eprint("-" * 70)
FprettifyIntegrationTestCase.eprint(format.format("parse errors: ", cls.n_parsefail))
FprettifyIntegrationTestCase.eprint(format.format("internal errors: ", cls.n_internalfail))

@staticmethod
def write_result(filename, content, sep_str): # pragma: no cover
with io.open(filename, 'a', encoding='utf-8') as outfile:
outfile.write(sep_str.join(content) + '\n')

@staticmethod
def eprint(*args, **kwargs):
"""
Print to stderr - to print output compatible with default unittest output.
"""

print(*args, file=sys.stderr, flush=True, **kwargs)


def generate_suite(suite=None, name=None):
import git
config = configparser.ConfigParser()
Expand All @@ -56,15 +142,15 @@ def generate_suite(suite=None, name=None):
os.chdir(orig)

addtestcode(code['path'], code['options'])
return FprettifyTestCase
return FprettifyIntegrationTestCase

def addtestcode(code_path, options):
print(f"creating test cases from {code_path} ...")
# dynamically create test cases from fortran files in test directory
for dirpath, _, filenames in os.walk(joinpath(TEST_EXT_DIR, code_path)):
for example in [f for f in filenames if any(f.endswith(_) for _ in fprettify.FORTRAN_EXTENSIONS)]:
rel_dirpath = os.path.relpath(dirpath, start=TEST_EXT_DIR)
addtestmethod(FprettifyTestCase, rel_dirpath, example, options)
addtestmethod(FprettifyIntegrationTestCase, rel_dirpath, example, options)

def addtestmethod(testcase, fpath, ffile, options):
"""add a test method for each example."""
Expand Down Expand Up @@ -107,19 +193,19 @@ def test_result(path, info):
test_info = "checksum"
test_content = test_result(example, m.hexdigest())

FprettifyTestCase.n_success += 1
FprettifyIntegrationTestCase.n_success += 1
except fprettify.FprettifyParseException as e:
test_info = "parse error"
fprettify.log_exception(e, test_info, level="warning")
test_content = test_result(example, test_info)
FprettifyTestCase.n_parsefail += 1
FprettifyIntegrationTestCase.n_parsefail += 1
except fprettify.FprettifyInternalException as e:
test_info = "internal error"
fprettify.log_exception(e, test_info, level="warning")
test_content = test_result(example, test_info)
FprettifyTestCase.n_internalfail += 1
FprettifyIntegrationTestCase.n_internalfail += 1
except: # pragma: no cover
FprettifyTestCase.n_unexpectedfail += 1
FprettifyIntegrationTestCase.n_unexpectedfail += 1
raise

# overwrite example
Expand All @@ -141,7 +227,7 @@ def test_result(path, info):
line_content = line.strip().split(sep_str)
if line_content[0] == test_content[0]:
found = True
FprettifyTestCase.eprint(test_info, end=" ")
FprettifyIntegrationTestCase.eprint(test_info, end=" ")
msg = '{} (old) != {} (new)'.format(
line_content[1], test_content[1])
if test_info == "checksum" and outstring.count('\n') < 10000:
Expand All @@ -153,14 +239,14 @@ def test_result(path, info):
testcase.assertEqual(
line_content[1], test_content[1], msg)
except AssertionError: # pragma: no cover
FprettifyTestCase.write_result(
FprettifyIntegrationTestCase.write_result(
FAILED_FILE, test_content, sep_str)
raise
break

if not found: # pragma: no cover
FprettifyTestCase.eprint(test_info + " new", end=" ")
FprettifyTestCase.write_result(RESULT_FILE, test_content, sep_str)
FprettifyIntegrationTestCase.eprint(test_info + " new", end=" ")
FprettifyIntegrationTestCase.write_result(RESULT_FILE, test_content, sep_str)

# not sure why this even works, using "test something" (with a space) as function name...
# however it gives optimal test output
Expand Down
87 changes: 1 addition & 86 deletions fprettify/tests/test_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@
###############################################################################
import os, sys, io
import inspect
import hashlib
from datetime import datetime
import unittest
import fprettify

Expand All @@ -31,94 +29,11 @@ def joinpath(path1, path2):
_MYPATH = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))

_TIMESTAMP = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')

# main directory for running tests
TEST_MAIN_DIR = joinpath(_MYPATH, r'../../fortran_tests')

# directory for external Fortran code
TEST_EXT_DIR = joinpath(TEST_MAIN_DIR, r'test_code')

# directory containing Fortran examples
EXAMPLE_DIR = joinpath(_MYPATH, r'../../examples/in')

# backup directory
BACKUP_DIR = joinpath(TEST_MAIN_DIR, r'test_code_in_' + _TIMESTAMP)

# where to store summarized results
RESULT_DIR = joinpath(TEST_MAIN_DIR, r'test_results')

# expected hash-sums
RESULT_FILE = joinpath(RESULT_DIR, r'expected_results')

# test failures
FAILED_FILE = joinpath(RESULT_DIR, r'failed_results')

# path to fprettify
RUNSCRIPT = joinpath(_MYPATH, r"../../fprettify.py")


class FprettifyTestCase(unittest.TestCase):
"""
test class to be recognized by unittest.
test class to be recognized by unittest, specialized for fprettify tests.
"""

def shortDescription(self):
"""don't print doc string of testmethod"""
return None

def setUp(self):
"""
setUp to be recognized by unittest.
We have large files to compare, raise the limit
"""
self.maxDiff = None

@classmethod
def setUpClass(cls):
"""
setUpClass to be recognized by unittest.
"""

cls.n_success = 0
cls.n_parsefail = 0
cls.n_internalfail = 0
cls.n_unexpectedfail = 0

FprettifyTestCase.eprint("-" * 70)
FprettifyTestCase.eprint("recognized Fortran files")
FprettifyTestCase.eprint(", ".join(fprettify.FORTRAN_EXTENSIONS))
FprettifyTestCase.eprint("-" * 70)
FprettifyTestCase.eprint("Applying fprettify to Fortran files in " + TEST_EXT_DIR)
FprettifyTestCase.eprint("Writing backup of original files to " + BACKUP_DIR)
FprettifyTestCase.eprint("Storing expected results in " + RESULT_FILE)
FprettifyTestCase.eprint("Storing failed results in " + FAILED_FILE)
FprettifyTestCase.eprint("-" * 70)

@classmethod
def tearDownClass(cls):
"""
tearDownClass to be recognized by unittest. Used for test summary
output.
"""
if cls.n_parsefail + cls.n_internalfail > 0:
format = "{:<20}{:<6}"
FprettifyTestCase.eprint('\n' + "=" * 70)
FprettifyTestCase.eprint("IGNORED errors: invalid or old Fortran")
FprettifyTestCase.eprint("-" * 70)
FprettifyTestCase.eprint(format.format("parse errors: ", cls.n_parsefail))
FprettifyTestCase.eprint(format.format("internal errors: ", cls.n_internalfail))

@staticmethod
def write_result(filename, content, sep_str): # pragma: no cover
with io.open(filename, 'a', encoding='utf-8') as outfile:
outfile.write(sep_str.join(content) + '\n')

@staticmethod
def eprint(*args, **kwargs):
"""
Print to stderr - to print output compatible with default unittest output.
"""

print(*args, file=sys.stderr, flush=True, **kwargs)

1 change: 0 additions & 1 deletion fprettify/tests/unittests.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import logging
import io
import subprocess
import shlex

sys.stderr = io.TextIOWrapper(
sys.stderr.detach(), encoding='UTF-8', line_buffering=True)
Expand Down
20 changes: 11 additions & 9 deletions run_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@

import unittest
from fprettify.tests.unittests import FprettifyUnitTestCase
from fprettify.tests.fortrantests import generate_suite
from fprettify.tests.test_common import FAILED_FILE, RESULT_FILE, FprettifyTestCase
from fprettify.tests.fortrantests import generate_suite, FAILED_FILE, RESULT_FILE
import fileinput
import io
import os
Expand All @@ -44,18 +43,21 @@
if args.suite[:2] == suite_default and len(args.suite) > 2:
args.suite = args.suite[2:]

test_cases = []

if args.name:
testCase = generate_suite(name=args.name)
test_cases.append(generate_suite(name=args.name))
else:
test_suite = unittest.TestSuite()
for suite in args.suite:
if suite == "unittests":
test_loaded = unittest.TestLoader().loadTestsFromTestCase(FprettifyUnitTestCase)
test_suite.addTest(test_loaded)
test_cases.append(FprettifyUnitTestCase)
else:
testCase = generate_suite(suite=suite)
test_loaded = unittest.TestLoader().loadTestsFromTestCase(testCase)
test_suite.addTest(test_loaded)
test_cases.append(generate_suite(suite=suite))

test_suite = unittest.TestSuite()
for test_case in test_cases:
test_loaded = unittest.TestLoader().loadTestsFromTestCase(test_case)
test_suite.addTest(test_loaded)

unittest.TextTestRunner(verbosity=2).run(test_suite)

Expand Down

0 comments on commit 1303fe9

Please sign in to comment.