-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathgrader_tests.py
64 lines (54 loc) · 3.2 KB
/
grader_tests.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
"""
Grader tests that give points and feedback for successful tests.
Random data generation is handled by a property based testing library called Hypothesis:
https://hypothesis.readthedocs.io/en/latest/
"""
import unittest
# Specify test method points and test method level feedback with the points decorator
from graderutils.graderunittest import points
# Strategies for generating random data, more info:
from hypothesis import strategies, given, settings, PrintSettings, Verbosity
# Use a model solution model.py to check the correct behaviour for the submitted file primes.py
from model import is_prime as model_is_prime
from primes import is_prime
# The only requirement for graderutils test classes is that they inherit the standard library unittest.TestCase
class TestPrimes(unittest.TestCase):
"""
Compare the output of a simple prime number checker to the corresponding model solution.
"""
@points(20)
# Use Hypothesis to generate random integers and pass them to the test method as argument x
# Hypothesis uses interal heuristics to choose data distributions and known edge cases depending on the data type
# https://hypothesis.readthedocs.io/en/latest/data.html
@given(x=strategies.integers(min_value=0, max_value=10**6))
# Hypothesis attempts to cause this test method to fail with different x.
# This falsification process ends when 100 instances of x have been found that did not fail the test, i.e. max_examples.
# We also disable the Hypothesis database, i.e. cache, because we are not interested in remembering how the tests behaved for data generated on previous test executions
# https://hypothesis.readthedocs.io/en/latest/settings.html#available-settings
@settings(max_examples=100, database=None, print_blob=PrintSettings.NEVER, verbosity=Verbosity.quiet)
def test3_large_positive_random_integers(self, x):
"""Randomly picked integers in the range [0, 1 000 000]."""
if model_is_prime(x):
self.assertTrue(is_prime(x), "{} is a prime number but your function says it is not.".format(x))
else:
self.assertFalse(is_prime(x), "{} is not a prime number but your function says it is.".format(x))
# Optionally, if you need setUp and tearDown:
# If hypothesis.given is used on a test method, Hypothesis runs several iterations on the method.
# Then, using TestCase.setUp and TestCase.tearDown might appear to behave inconsistently.
# To solve this, Hypothesis provides setup_example and teardown_example for finer granularity:
def setUp(self):
# From unittest, runs once before running each test method
# e.g. once for test3_large_positive_random_integers
pass
def setup_example(self):
# From Hypothesis, runs once before running each Hypothesis example
# e.g. 100 times for test3_large_positive_random_integers
pass
def tearDown(self):
# From unittest, runs once after running each test method
# e.g. once for test3_large_positive_random_integers
pass
def teardown_example(self, example):
# From Hypothesis, runs once after running each Hypothesis example
# e.g. 100 times for test3_large_positive_random_integers
pass