Skip to content

Commit

Permalink
Support for tests (pytest-based) and added a few tests
Browse files Browse the repository at this point in the history
  • Loading branch information
MSeifert04 committed Jun 26, 2019
1 parent c722fd9 commit b285ebb
Show file tree
Hide file tree
Showing 9 changed files with 236 additions and 7 deletions.
30 changes: 26 additions & 4 deletions docs/source/development.rst
Original file line number Diff line number Diff line change
@@ -1,19 +1,25 @@
Development
===========

Prerequisites:

- Cloned or downloaded source repository. For example ``git clone https://github.com/MSeifert04/simple_benchmark.git``.
- You're in the root directory of the cloned (or downloaded) repository.
- Have an installed Python with pip and setuptools (the following will assume that the Python executable is in your path!).

Building the package locally
----------------------------

Navigate to the root directory of the repository (the directory where the
``setup.py`` file is) and then run one of these commands::
Navigate to the root directory of the repository (the directory where the ``setup.py`` file is) and then run one of
these commands::

python setup.py develop

or::

python -m pip install -e .

In case you want to install all the optional dependencies automatically::
In case you want to install all the optional dependencies automatically (**recommended**)::

python -m pip install -e .[optional]

Expand All @@ -30,7 +36,23 @@ Then just run::
python setup.py build_sphinx

The generated HTML documentation should then be available in the
``build/sphinx/html`` folder.
``./build/sphinx/html`` folder.


Running the tests locally
-------------------------

This requires that the package was installed with all development dependencies::

python -m pip install -e .[development]

Then use ``pytest``::

python -m pytest tests

Or to exclude the tests marked as slow::

python -m pytest tests -m "not slow"


Publishing the package to PyPI
Expand Down
3 changes: 3 additions & 0 deletions docs/source/extended.rst
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,9 @@ An example showing both in action::
)

b.plot()
# To save the plotted benchmark as PNG file.
import matplotlib.pyplot as plt
plt.savefig('time_example.png')

.. image:: ./time_example.png

Expand Down
4 changes: 4 additions & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -1,2 +1,6 @@
[build_sphinx]
project = 'simple_benchmark'

[tool:pytest]
markers =
slow: marks tests as slow
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
package_name = "simple_benchmark"

optional_dependencies = ["numpy", "matplotlib", "pandas"]
development_dependencies = ["sphinx"]
development_dependencies = ["sphinx", "pytest"]
maintainer_dependencies = ["twine"]


Expand Down Expand Up @@ -57,6 +57,7 @@ def version():

packages=find_packages(exclude=['ez_setup']),

tests_require=["pytest"],
extras_require={
'optional': optional_dependencies,
'development': optional_dependencies + development_dependencies,
Expand Down
3 changes: 1 addition & 2 deletions simple_benchmark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -758,9 +758,8 @@ def use_random_lists_as_arguments(self, sizes):
sizes : iterable of int
An iterable containing the sizes for the lists (should be sorted).
"""
random_func = random.random

def provide_random_lists():
random_func = random.random
for size in sizes:
yield size, [random_func() for _ in itertools.repeat(None, times=size)]

Expand Down
45 changes: 45 additions & 0 deletions tests/test_assertions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import simple_benchmark

import operator

import numpy as np
import pytest


def sort_in_place(l):
l.sort()
return l


def test_assert_same_results_work():
simple_benchmark.assert_same_results(
funcs=[min, np.min],
arguments={2**i: list(range(2**i)) for i in range(2, 5)},
equality_func=operator.eq
)


def test_assert_same_results_work_when_not_equal():
with pytest.raises(AssertionError):
simple_benchmark.assert_same_results(
funcs=[min, max],
arguments={2**i: list(range(2**i)) for i in range(2, 5)},
equality_func=operator.eq
)


def test_assert_not_mutating_input_work():
simple_benchmark.assert_not_mutating_input(
funcs=[min, np.min],
arguments={2**i: list(range(2**i)) for i in range(2, 5)},
equality_func=operator.eq
)


def test_assert_not_mutating_input_work_when_modifies():
with pytest.raises(AssertionError):
simple_benchmark.assert_not_mutating_input(
funcs=[sorted, sort_in_place],
arguments={2**i: list(reversed(range(2**i))) for i in range(2, 5)},
equality_func=operator.eq
)
10 changes: 10 additions & 0 deletions tests/test_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import simple_benchmark

import collections


def test_simple():
simple_benchmark.benchmark(
funcs=[min, max],
arguments=collections.OrderedDict([(n, [1]*n) for n in [3, 4, 5, 6]])
)
8 changes: 8 additions & 0 deletions tests/test_benchmarkbuilder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import simple_benchmark


def test_simple():
bb = simple_benchmark.BenchmarkBuilder()
bb.add_functions([min, max])
bb.use_random_lists_as_arguments([2, 3, 4])
bb.run()
137 changes: 137 additions & 0 deletions tests/test_doc_examples.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
import pytest


@pytest.mark.slow
def test_readme():
from simple_benchmark import benchmark
import numpy as np
funcs = [sum, np.sum]
arguments = {i: [1] * i for i in [1, 10, 100, 1000, 10000, 100000]}
argument_name = 'list size'
aliases = {sum: 'Python sum', np.sum: 'NumPy sum'}
b = benchmark(funcs, arguments, argument_name, function_aliases=aliases)
b.to_pandas_dataframe()
b.plot()


@pytest.mark.slow
def test_extended_benchmarkbuilder():
from simple_benchmark import BenchmarkBuilder
import math

bench = BenchmarkBuilder()

@bench.add_function()
def sum_using_loop(lst):
sum_ = 0
for item in lst:
sum_ += item
return sum_

@bench.add_function()
def sum_using_range_loop(lst):
sum_ = 0
for idx in range(len(lst)):
sum_ += lst[idx]
return sum_

bench.use_random_lists_as_arguments(sizes=[2**i for i in range(2, 15)])

bench.add_functions([sum, math.fsum])

b = bench.run()
b.plot()


@pytest.mark.slow
def test_extended_multiargument():
from itertools import starmap
from operator import add
from random import random

from simple_benchmark import BenchmarkBuilder, MultiArgument

bench = BenchmarkBuilder()

@bench.add_function()
def list_addition_zip(list1, list2):
res = []
for item1, item2 in zip(list1, list2):
res.append(item1 + item2)
return res

@bench.add_function()
def list_addition_index(list1, list2):
res = []
for idx in range(len(list1)):
res.append(list1[idx] + list2[idx])
return res

@bench.add_function()
def list_addition_map_zip(list1, list2):
return list(starmap(add, zip(list1, list2)))

@bench.add_arguments(name='list sizes')
def benchmark_arguments():
for size_exponent in range(2, 15):
size = 2**size_exponent
arguments = MultiArgument([
[random() for _ in range(size)],
[random() for _ in range(size)]])
yield size, arguments

b = bench.run()
b.plot()


def test_extended_assert_1():
import operator
import random
from simple_benchmark import assert_same_results

funcs = [min, max] # will produce different results
arguments = {2**i: [random.random() for _ in range(2**i)] for i in range(2, 10)}
with pytest.raises(AssertionError):
assert_same_results(funcs, arguments, equality_func=operator.eq)


def test_extended_assert_2():
import operator
import random
from simple_benchmark import assert_not_mutating_input

def sort(l):
l.sort() # modifies the input
return l

funcs = [sorted, sort]
arguments = {2**i: [random.random() for _ in range(2**i)] for i in range(2, 10)}
with pytest.raises(AssertionError):
assert_not_mutating_input(funcs, arguments, equality_func=operator.eq)


@pytest.mark.slow
def test_extended_time_and_max():
from simple_benchmark import benchmark
from datetime import timedelta

def O_n(n):
for i in range(n):
pass

def O_n_squared(n):
for i in range(n ** 2):
pass

def O_n_cube(n):
for i in range(n ** 3):
pass

b = benchmark(
[O_n, O_n_squared, O_n_cube],
{2**i: 2**i for i in range(2, 15)},
time_per_benchmark=timedelta(milliseconds=500),
maximum_time=timedelta(milliseconds=500)
)

b.plot()

0 comments on commit b285ebb

Please sign in to comment.