Add algorithm for testing Project Euler solutions (#2471)

* Add file for testing Project Euler solutions

* Remove the importlib import

* Update project_euler/solution_test.py

Co-authored-by: Christian Clauss <cclauss@me.com>

* Small tweaks to project_euler/solution_test.py

* Test Project Euler solutions through Travis

* Improved testing for Project Euler solutions:

- Renamed file so that it isn't picked up by pytest
- Fail fast on validating solutions through Travis CI

* Update validate_solutions.py

* Use namedtuple for input parameters and answer

- Remove logging
- Remove unnecessary checks for PROJECT_EULER_PATH as Travis CI
  picks up the same path

* Fix flake8 errors: line too long

* Small tweaks to validate_solutions.py

* Add all answers & back to using dictionary

* Using pytest for testing Project Euler solutions

- As we want to fail fast on testing solutions, we need to test using
  this script first before we use tests written by the author.
- As pytest stops testing as soon as it receives a traceback, we need to
  use pytest-subtests to tell pytest to test all the iterations for the
  function with given parameters.

* Print error messages in oneline format

* Separated answers into a separate file:

- Add custom print function to print all the error messages at the
  end of all the tests
- Let Travis skip if this failed

Co-authored-by: Christian Clauss <cclauss@me.com>
This commit is contained in:
Dhruv 2020-09-28 11:48:19 +05:30 committed by GitHub
parent 187e8ccc95
commit ceacfc6079
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 2972 additions and 1 deletions

View File

@ -17,7 +17,9 @@ jobs:
- mypy --ignore-missing-imports . || true # https://github.com/python/mypy/issues/7907
- pytest --doctest-modules --ignore=project_euler/ --durations=10 --cov-report=term-missing:skip-covered --cov=. .
- name: Project Euler
before_script: pip install pytest-cov
before_script:
- pip install pytest-cov pytest-subtests
- pytest --tb=no --no-summary --capture=no project_euler/validate_solutions.py || true # fail fast on wrong solution
script:
- pytest --doctest-modules --durations=10 --cov-report=term-missing:skip-covered --cov=project_euler/ project_euler/
after_success:

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,67 @@
#!/usr/bin/env python3
import importlib.util
import json
import pathlib
from types import ModuleType
from typing import Generator
import pytest
PROJECT_EULER_DIR_PATH = pathlib.Path.cwd().joinpath("project_euler")
PROJECT_EULER_ANSWERS_PATH = PROJECT_EULER_DIR_PATH.joinpath(
"project_euler_answers.json"
)
with open(PROJECT_EULER_ANSWERS_PATH) as file_handle:
PROBLEM_ANSWERS = json.load(file_handle)
error_msgs = []
def generate_solution_modules(
dir_path: pathlib.Path,
) -> Generator[ModuleType, None, None]:
# Iterating over every file or directory
for file_path in dir_path.iterdir():
if file_path.suffix != ".py" or file_path.name.startswith(("_", "test")):
continue
# Importing the source file through the given path
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
spec = importlib.util.spec_from_file_location(file_path.name, str(file_path))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
yield module
@pytest.mark.parametrize("problem_number, expected", PROBLEM_ANSWERS)
def test_project_euler(subtests, problem_number: int, expected: str):
problem_dir = PROJECT_EULER_DIR_PATH.joinpath(f"problem_{problem_number:02}")
# Check if the problem directory exist. If not, then skip.
if problem_dir.is_dir():
for solution_module in generate_solution_modules(problem_dir):
# All the tests in a loop is considered as one test by pytest so, use
# subtests to make sure all the subtests are considered as different.
with subtests.test(
msg=f"Problem {problem_number} tests", solution_module=solution_module
):
try:
answer = str(solution_module.solution())
assert answer == expected, f"Expected {expected} but got {answer}"
except (AssertionError, AttributeError, TypeError) as err:
error_msgs.append(
f"problem_{problem_number}/{solution_module.__name__}: {err}"
)
raise # We still want pytest to know that this test failed
else:
pytest.skip(f"Solution {problem_number} does not exist yet.")
# Run this function at the end of all the tests
# https://stackoverflow.com/a/52873379
@pytest.fixture(scope="session", autouse=True)
def custom_print_message(request):
def print_error_messages():
if error_msgs:
print("\n" + "\n".join(error_msgs))
request.addfinalizer(print_error_messages)