diff --git a/.coveragerc b/.coveragerc index 40c661b7..124c7c86 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,5 +1,9 @@ [run] -omit = */tests/* +omit = + */tests/* + */utils/toys/* + */utils/log.py + [report] exclude_lines = _log diff --git a/.gitignore b/.gitignore index c22f5005..75d1a1a4 100644 --- a/.gitignore +++ b/.gitignore @@ -27,7 +27,7 @@ dist/ *.egg-info* .tox/ venv* -.coverage +.coverage* .idea *env* .venv* diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..122da47b --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +include hypernetx/utils/toys/HarryPotter_Characters.csv diff --git a/Makefile b/Makefile index 0c7be1a9..83b59381 100644 --- a/Makefile +++ b/Makefile @@ -11,27 +11,22 @@ test: test-deps @$(PYTHON3) -m tox test-ci: test-deps - @$(PYTHON3) -m pip install 'pytest-github-actions-annotate-failures>=0.1.7' pre-commit install pre-commit run --all-files - @$(PYTHON3) -m tox -e py38 -r + @$(PYTHON3) -m tox test-ci-github: test-deps @$(PYTHON3) -m pip install 'pytest-github-actions-annotate-failures>=0.1.7' @$(PYTHON3) -m tox -test-coverage: test-deps - coverage run --source=hypernetx -m pytest - coverage html - -.PHONY: test, test-ci, test-ci-github, test-coverage +.PHONY: test, test-ci, test-ci-github ## Continuous Deployment ## Assumes that scripts are run on a container or test server VM ### Publish to PyPi publish-deps: - @$(PYTHON3) -m pip install -e .'[packaging]' + @$(PYTHON3) -m pip install -e .'[packaging]' --use-pep517 build-dist: publish-deps clean @$(PYTHON3) -m build --wheel --sdist @@ -48,7 +43,7 @@ publish-to-pypi: publish-deps build-dist ### Update version version-deps: - @$(PYTHON3) -m pip install .'[releases]' + @$(PYTHON3) -m pip install .'[releases]' --use-pep517 .PHONY: version-deps diff --git a/hypernetx/classes/tests/conftest.py b/hypernetx/classes/tests/conftest.py index 25ba8294..dca99432 100644 --- a/hypernetx/classes/tests/conftest.py +++ b/hypernetx/classes/tests/conftest.py @@ -6,6 +6,8 @@ import numpy as np from hypernetx import Hypergraph, HarryPotter, EntitySet, LesMis as LM +from hypernetx.classes.helpers import create_dataframe + from collections import OrderedDict, defaultdict @@ -40,8 +42,8 @@ def __init__(self, static=False): ) self.labels = OrderedDict( [ - ("edges", ["P", "R", "S", "L", "O", "I"]), - ("nodes", ["A", "C", "E", "K", "T1", "T2", "V"]), + ("edges", [p, r, s, l, o, i]), + ("nodes", [a, c, e, k, t1, t2, v]), ] ) @@ -49,22 +51,24 @@ def __init__(self, static=False): [ [0, 0], [0, 1], - [0, 2], + [0, 3], + [1, 0], [1, 2], - [1, 3], [2, 0], - [2, 2], - [2, 4], + [2, 3], [2, 5], + [2, 6], [3, 1], - [3, 3], + [3, 2], + [4, 4], [4, 5], - [4, 6], - [5, 0], + [5, 3], [5, 5], ] ) + self.dataframe = create_dataframe(self.edgedict) + class TriLoop: """Example hypergraph with 2 two 1-cells and 1 2-cell forming a loop""" @@ -100,6 +104,8 @@ def __init__(self): ] ) + self.dataframe = create_dataframe(self.edgedict) + class LesMis: def __init__(self): @@ -146,11 +152,66 @@ def __init__(self, n1, n2): self.left, self.right = nx.bipartite.sets(self.g) +@pytest.fixture +def props_dataframe(): + multi_index = pd.MultiIndex.from_tuples([(0, "P")], names=["level", "id"]) + data = { + "properties": [{"prop1": "propval1", "prop2": "propval2"}], + } + return pd.DataFrame(data, index=multi_index) + + +@pytest.fixture +def cell_props_dataframe_multidx(): + multi_index = pd.MultiIndex.from_tuples([("P", "A"), ("P", "C")], names=[0, 1]) + data = { + "cell_properties": [ + {"prop1": "propval1", "prop2": "propval2"}, + {"prop1": "propval1", "prop2": "propval2"}, + ] + } + + return pd.DataFrame(data, index=multi_index) + + +@pytest.fixture +def cell_props_dataframe(): + data = { + 0: ["P", "P"], + 1: ["A", "C"], + "cell_properties": [ + {"prop1": "propval1", "prop2": "propval2"}, + {"prop1": "propval1", "prop2": "propval2"}, + ], + } + return pd.DataFrame(data) + + @pytest.fixture def sbs(): return SevenBySix() +@pytest.fixture +def sbs_dataframe(sbs): + return sbs.dataframe + + +@pytest.fixture +def sbs_dict(sbs): + return sbs.edgedict + + +@pytest.fixture +def sbs_data(sbs): + return np.asarray(sbs.data) + + +@pytest.fixture +def sbs_labels(sbs): + return sbs.labels + + @pytest.fixture def triloop(): return TriLoop() @@ -176,6 +237,11 @@ def sbs_graph(sbs): return G +@pytest.fixture +def sbsd(): + return SBSDupes() + + @pytest.fixture def sbsd_hypergraph(): sbsd = SBSDupes() @@ -217,6 +283,7 @@ def dataframe(): @pytest.fixture def dataframe_example(): + """NOTE: Do not use this dataframe as an input for 'entity' when creating an EntitySet object""" M = np.array([[1, 1, 0, 0], [0, 1, 1, 0], [1, 0, 1, 0]]) index = ["A", "B", "C"] columns = ["a", "b", "c", "d"] diff --git a/hypernetx/classes/tests/test_entityset.py b/hypernetx/classes/tests/test_entityset.py deleted file mode 100644 index ff9e1f37..00000000 --- a/hypernetx/classes/tests/test_entityset.py +++ /dev/null @@ -1,371 +0,0 @@ -import numpy as np -import pytest - -from collections.abc import Iterable -from collections import UserList -from hypernetx.classes import EntitySet -from hypernetx.classes.entityset import restrict_to_two_columns - -from pandas import DataFrame, Series - - -def test_empty_entityset(): - es = EntitySet() - assert es.empty - assert len(es.elements) == 0 - assert es.elements == {} - assert es.dimsize == 0 - - -def test_entityset_from_dataframe(): - data_dict = { - 1: ["A", "D"], - 2: ["A", "C", "D"], - 3: ["D"], - 4: ["A", "B"], - 5: ["B", "C"], - } - - all_edge_pairs = Series(data_dict).explode() - - entity = DataFrame( - {"edges": all_edge_pairs.index.to_list(), "nodes": all_edge_pairs.values} - ) - - es = EntitySet(entity=entity) - - assert not es.empty - assert len(es.elements) == 5 - assert es.dimsize == 2 - assert es.uid is None - - -class TestEntitySetOnSevenBySixDataset: - # Tests on different inputs for entity and data - def test_entityset_from_dictionary(self, sbs): - ent = EntitySet(entity=sbs.edgedict) - assert len(ent.elements) == 6 - - def test_entityset_from_ndarray_sbs(self, sbs): - ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) - - assert ent_sbs.size() == 6 - assert len(ent_sbs.uidset) == 6 - assert len(ent_sbs.children) == 7 - assert isinstance(ent_sbs.incidence_dict["I"], list) - assert "I" in ent_sbs - assert "K" in ent_sbs - - # Tests for properties - @pytest.mark.skip(reason="TODO: implement") - def test_cell_properties(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_cell_weights(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_children(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_data(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_dataframe(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_dimensions(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_dimsize(self): - pass - - def test_dimensions_equal_dimsize(self, sbs): - ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) - assert ent_sbs.dimsize == len(ent_sbs.dimensions) - - @pytest.mark.skip(reason="TODO: implement") - def test_elements(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_empty(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_incidence_dict(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_isstatic(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_labels(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_memberships(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_properties(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_uid(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_uidset(self): - pass - - # Tests for methods - @pytest.mark.skip(reason="TODO: implement") - def test_add(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_add_element(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_add_elements_from(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_assign_properties(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_collapse_identitical_elements(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_elements_by_column(self): - pass - - def test_elements_by_level(self, sbs): - ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) - assert ent_sbs.elements_by_level(0, 1) - - @pytest.mark.skip(reason="TODO: implement") - def test_encode(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_get_cell_properties(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_get_cell_property(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_get_properties(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_get_property(self): - pass - - def test_incidence_matrix(self, sbs): - ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) - assert ent_sbs.incidence_matrix(1, 0).todense().shape == (6, 7) - - def test_index(self, sbs): - ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) - assert ent_sbs.index("nodes") == 1 - assert ent_sbs.index("nodes", "K") == (1, 3) - - def test_indices(self, sbs): - ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) - assert ent_sbs.indices("nodes", "K") == [3] - assert ent_sbs.indices("nodes", ["K", "T1"]) == [3, 4] - - @pytest.mark.skip(reason="TODO: implement") - def test_is_empty(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_level(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_remove(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_remove_elements(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_restrict_to(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_restrict_to_indices(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_restrict_to_levels(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_set_cell_property(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_set_property(self): - pass - - @pytest.mark.skip(reason="TODO: implement") - def test_size(self): - pass - - def test_translate(self, sbs): - ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) - assert ent_sbs.translate(0, 0) == "P" - assert ent_sbs.translate(1, [3, 4]) == ["K", "T1"] - - def test_translate_arr(self, sbs): - ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) - assert ent_sbs.translate_arr((0, 0)) == ["P", "A"] - - @pytest.mark.skip(reason="TODO: implement") - def test_uidset_by_column(self): - pass - - def test_uidset_by_level(self, sbs): - ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) - - assert ent_sbs.uidset_by_level(0) == {"I", "L", "O", "P", "R", "S"} - assert ent_sbs.uidset_by_level(1) == {"A", "C", "E", "K", "T1", "T2", "V"} - - -class TestEntitySetOnHarryPotterDataSet: - def test_entityset_from_ndarray(self, harry_potter): - ent_hp = EntitySet( - data=np.asarray(harry_potter.data), labels=harry_potter.labels - ) - assert len(ent_hp.uidset) == 7 - assert len(ent_hp.elements) == 7 - assert isinstance(ent_hp.elements["Hufflepuff"], UserList) - assert not ent_hp.is_empty() - assert len(ent_hp.incidence_dict["Gryffindor"]) == 6 - - def test_custom_attributes(self, harry_potter): - ent_hp = EntitySet( - data=np.asarray(harry_potter.data), labels=harry_potter.labels - ) - assert ent_hp.__len__() == 7 - assert isinstance(ent_hp.__str__(), str) - assert isinstance(ent_hp.__repr__(), str) - assert isinstance(ent_hp.__contains__("Muggle"), bool) - assert ent_hp.__contains__("Muggle") is True - assert ent_hp.__getitem__("Slytherin") == [ - "Half-blood", - "Pure-blood", - "Pure-blood or half-blood", - ] - assert isinstance(ent_hp.__iter__(), Iterable) - assert isinstance(ent_hp.__call__(), Iterable) - assert ent_hp.__call__().__next__() == "Unknown House" - - def test_restrict_to_levels(self, harry_potter): - ent_hp = EntitySet( - data=np.asarray(harry_potter.data), labels=harry_potter.labels - ) - assert len(ent_hp.restrict_to_levels([0]).uidset) == 7 - - def test_restrict_to_indices(self, harry_potter): - ent_hp = EntitySet( - data=np.asarray(harry_potter.data), labels=harry_potter.labels - ) - assert ent_hp.restrict_to_indices([1, 2]).uidset == { - "Gryffindor", - "Ravenclaw", - } - - -# testing entityset helpers - - -def test_restrict_to_two_columns_on_ndarray(harry_potter): - data = np.asarray(harry_potter.data) - labels = harry_potter.labels - expected_num_cols = 2 - expected_ndarray_first_row = np.array([1, 1]) - - entity, data, labels = restrict_to_two_columns( - entity=None, - data=data, - labels=labels, - cell_properties=None, - weight_col="cell_weights", - weights=1, - level1=0, - level2=1, - misc_cell_props_col="properties", - ) - - assert entity is None - assert len(labels) == 2 - assert 0 in labels - assert 1 in labels - - print(data) - print(type(data[0])) - - assert data.shape[1] == expected_num_cols - assert np.array_equal(data[0], expected_ndarray_first_row) - - -@pytest.mark.skip(reason="TODO: implement") -def test_restrict_to_two_columns_on_dataframe(sbs): - pass - - -@pytest.mark.skip(reason="TODO: implement") -def build_dataframe_from_entity_on_dataframe(sbs): - pass - - -@pytest.mark.xfail( - reason="at some point we are casting out and back to categorical dtype without preserving categories ordering from `labels` provided to constructor" -) -def test_level(sbs): - # TODO: at some point we are casting out and back to categorical dtype without - # preserving categories ordering from `labels` provided to constructor - ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) - assert ent_sbs.level("I") == (0, 5) # fails - assert ent_sbs.level("K") == (1, 3) - assert ent_sbs.level("K", max_level=0) is None - - -@pytest.mark.xfail( - reason="Entity does not remove row duplicates from self._data if constructed from np.ndarray, defaults to first two cols as data cols" -) -def test_attributes(ent_hp): - assert isinstance(ent_hp.data, np.ndarray) - # TODO: Entity does not remove row duplicates from self._data if constructed from np.ndarray - assert ent_hp.data.shape == ent_hp.dataframe[ent_hp._data_cols].shape # fails - assert isinstance(ent_hp.labels, dict) - # TODO: Entity defaults to first two cols as data cols - assert ent_hp.dimensions == (7, 11, 10, 36, 26) # fails - assert ent_hp.dimsize == 5 # fails - df = ent_hp.dataframe[ent_hp._data_cols] - assert list(df.columns) == [ # fails - "House", - "Blood status", - "Species", - "Hair colour", - "Eye colour", - ] - assert ent_hp.dimensions == tuple(df.nunique()) - assert set(ent_hp.labels["House"]) == set(df["House"].unique()) diff --git a/hypernetx/classes/tests/test_entityset_empty.py b/hypernetx/classes/tests/test_entityset_empty.py new file mode 100644 index 00000000..67271c21 --- /dev/null +++ b/hypernetx/classes/tests/test_entityset_empty.py @@ -0,0 +1,37 @@ +import numpy as np +import pytest + +from hypernetx.classes import EntitySet + + +def test_empty_entityset(): + es = EntitySet() + assert es.empty + assert len(es.elements) == 0 + assert es.elements == {} + assert es.dimsize == 0 + + assert isinstance(es.data, np.ndarray) + assert es.data.shape == (0, 0) + + assert es.labels == {} + assert es.cell_weights == {} + assert es.isstatic + assert es.incidence_dict == {} + assert "foo" not in es + assert es.incidence_matrix() is None + + assert es.size() == 0 + + with pytest.raises(AttributeError): + es.get_cell_property("foo", "bar", "roma") + with pytest.raises(AttributeError): + es.get_cell_properties("foo", "bar") + with pytest.raises(KeyError): + es.set_cell_property("foo", "bar", "roma", "ff") + with pytest.raises(KeyError): + es.get_properties("foo") + with pytest.raises(KeyError): + es.get_property("foo", "bar") + with pytest.raises(ValueError): + es.set_property("foo", "bar", "roma") diff --git a/hypernetx/classes/tests/test_entityset_harry_potter_data.py b/hypernetx/classes/tests/test_entityset_harry_potter_data.py new file mode 100644 index 00000000..63bdb684 --- /dev/null +++ b/hypernetx/classes/tests/test_entityset_harry_potter_data.py @@ -0,0 +1,75 @@ +import numpy as np +import pytest + +from collections.abc import Iterable +from collections import UserList +from hypernetx.classes import EntitySet + + +@pytest.mark.xfail( + reason="Entity does not remove row duplicates from self._data if constructed from np.ndarray, defaults to first two cols as data cols" +) +def test_attributes(harry_potter): + assert isinstance(harry_potter.data, np.ndarray) + ent_hp = EntitySet(data=np.asarray(harry_potter.data), labels=harry_potter.labels) + # TODO: Entity does not remove row duplicates from self._data if constructed from np.ndarray + assert ent_hp.data.shape == ent_hp.dataframe[ent_hp._data_cols].shape # fails + assert isinstance(ent_hp.labels, dict) + # TODO: Entity defaults to first two cols as data cols + assert ent_hp.dimensions == (7, 11, 10, 36, 26) # fails + assert ent_hp.dimsize == 5 # fails + df = ent_hp.dataframe[ent_hp._data_cols] + assert list(df.columns) == [ # fails + "House", + "Blood status", + "Species", + "Hair colour", + "Eye colour", + ] + assert ent_hp.dimensions == tuple(df.nunique()) + assert set(ent_hp.labels["House"]) == set(df["House"].unique()) + + +class TestEntitySetOnHarryPotterDataSet: + def test_entityset_from_ndarray(self, harry_potter): + ent_hp = EntitySet( + data=np.asarray(harry_potter.data), labels=harry_potter.labels + ) + assert len(ent_hp.uidset) == 7 + assert len(ent_hp.elements) == 7 + assert isinstance(ent_hp.elements["Hufflepuff"], UserList) + assert not ent_hp.is_empty() + assert len(ent_hp.incidence_dict["Gryffindor"]) == 6 + + def test_custom_attributes(self, harry_potter): + ent_hp = EntitySet( + data=np.asarray(harry_potter.data), labels=harry_potter.labels + ) + assert ent_hp.__len__() == 7 + assert isinstance(ent_hp.__str__(), str) + assert isinstance(ent_hp.__repr__(), str) + assert isinstance(ent_hp.__contains__("Muggle"), bool) + assert ent_hp.__contains__("Muggle") is True + assert ent_hp.__getitem__("Slytherin") == [ + "Half-blood", + "Pure-blood", + "Pure-blood or half-blood", + ] + assert isinstance(ent_hp.__iter__(), Iterable) + assert isinstance(ent_hp.__call__(), Iterable) + assert ent_hp.__call__().__next__() == "Unknown House" + + def test_restrict_to_levels(self, harry_potter): + ent_hp = EntitySet( + data=np.asarray(harry_potter.data), labels=harry_potter.labels + ) + assert len(ent_hp.restrict_to_levels([0]).uidset) == 7 + + def test_restrict_to_indices(self, harry_potter): + ent_hp = EntitySet( + data=np.asarray(harry_potter.data), labels=harry_potter.labels + ) + assert ent_hp.restrict_to_indices([1, 2]).uidset == { + "Gryffindor", + "Ravenclaw", + } diff --git a/hypernetx/classes/tests/test_entityset_on_dataframe.py b/hypernetx/classes/tests/test_entityset_on_dataframe.py new file mode 100644 index 00000000..d49ee408 --- /dev/null +++ b/hypernetx/classes/tests/test_entityset_on_dataframe.py @@ -0,0 +1,412 @@ +import pytest + +import pandas as pd +import numpy as np + +from pytest_lazyfixture import lazy_fixture + +from hypernetx import EntitySet + + +class TestEntitySetOnSBSDataframe: + @pytest.fixture + def es_from_df(self, sbs): + return EntitySet(entity=sbs.dataframe) + + @pytest.fixture + def es_from_dupe_df(self, sbsd): + return EntitySet(entity=sbsd.dataframe) + + # check all methods + @pytest.mark.parametrize( + "data", + [ + pd.DataFrame({0: ["P"], 1: ["E"]}), + {0: ["P"], 1: ["E"]}, + EntitySet(entity={"P": ["E"]}), + ], + ) + def test_add(self, es_from_df, data): + assert es_from_df.data.shape == (15, 2) + assert es_from_df.dataframe.size == 45 + + es_from_df.add(data) + + assert es_from_df.data.shape == (16, 2) + assert es_from_df.dataframe.size == 48 + + def test_remove(self, es_from_df): + assert es_from_df.data.shape == (15, 2) + assert es_from_df.dataframe.size == 45 + + es_from_df.remove("P") + + assert es_from_df.data.shape == (12, 2) + assert es_from_df.dataframe.size == 36 + assert "P" not in es_from_df.elements + + @pytest.mark.parametrize( + "props, multidx, expected_props", + [ + ( + lazy_fixture("props_dataframe"), + (0, "P"), + {"prop1": "propval1", "prop2": "propval2"}, + ), + ( + {0: {"P": {"prop1": "propval1", "prop2": "propval2"}}}, + (0, "P"), + {"prop1": "propval1", "prop2": "propval2"}, + ), + ( + {1: {"A": {"prop1": "propval1", "prop2": "propval2"}}}, + (1, "A"), + {"prop1": "propval1", "prop2": "propval2"}, + ), + ], + ) + def test_assign_properties(self, es_from_df, props, multidx, expected_props): + original_prop = es_from_df.properties.loc[multidx] + assert original_prop.properties == {} + + es_from_df.assign_properties(props) + + updated_prop = es_from_df.properties.loc[multidx] + assert updated_prop.properties == expected_props + + @pytest.mark.parametrize( + "cell_props, multidx, expected_cell_properties", + [ + ( + lazy_fixture("cell_props_dataframe"), + ("P", "A"), + {"prop1": "propval1", "prop2": "propval2"}, + ), + ( + lazy_fixture("cell_props_dataframe_multidx"), + ("P", "A"), + {"prop1": "propval1", "prop2": "propval2"}, + ), + ( + {"P": {"A": {"prop1": "propval1", "prop2": "propval2"}}}, + ("P", "A"), + {"prop1": "propval1", "prop2": "propval2"}, + ), + ], + ) + def test_assign_cell_properties_on_default_cell_properties( + self, es_from_df, cell_props, multidx, expected_cell_properties + ): + es_from_df.assign_cell_properties(cell_props=cell_props) + + updated_cell_prop = es_from_df.cell_properties.loc[multidx] + + assert updated_cell_prop.cell_properties == expected_cell_properties + + def test_assign_cell_properties_on_multiple_properties(self, es_from_df): + multidx = ("P", "A") + + es_from_df.assign_cell_properties( + cell_props={"P": {"A": {"prop1": "propval1", "prop2": "propval2"}}} + ) + + updated_cell_prop = es_from_df.cell_properties.loc[multidx] + assert updated_cell_prop.cell_properties == { + "prop1": "propval1", + "prop2": "propval2", + } + + es_from_df.assign_cell_properties( + cell_props={ + "P": { + "A": {"prop1": "propval1", "prop2": "propval2", "prop3": "propval3"} + } + } + ) + + updated_cell_prop = es_from_df.cell_properties.loc[multidx] + assert updated_cell_prop.cell_properties == { + "prop1": "propval1", + "prop2": "propval2", + "prop3": "propval3", + } + + def test_set_cell_property_on_cell_weights(self, es_from_df): + item1 = "P" + item2 = "A" + prop_name = "cell_weights" + prop_val = 42 + + es_from_df.set_cell_property(item1, item2, prop_name, prop_val) + + assert es_from_df.cell_properties.loc[(item1, item2), prop_name] == 42.0 + + # Check that the other cell_weights were not changed and retained the default value of 1 + for row in es_from_df.cell_properties.itertuples(): + if row.Index != (item1, item2): + assert row.cell_weights == 1 + + def test_set_cell_property_on_non_exisiting_cell_property(self, es_from_df): + item1 = "P" + item2 = "A" + prop_name = "non_existing_cell_property" + prop_val = {"foo": "bar"} + es_from_df.set_cell_property(item1, item2, prop_name, prop_val) + + assert es_from_df.cell_properties.loc[(item1, item2), "cell_properties"] == { + prop_name: prop_val + } + + # Check that the other rows received the default empty dictionary + for row in es_from_df.cell_properties.itertuples(): + if row.Index != (item1, item2): + assert row.cell_properties == {} + + item2 = "K" + es_from_df.set_cell_property(item1, item2, prop_name, prop_val) + + assert es_from_df.cell_properties.loc[(item1, item2), "cell_properties"] == { + prop_name: prop_val + } + + @pytest.mark.parametrize("ret_ec", [True, False]) + def test_collapse_identical_elements_on_duplicates(self, es_from_dupe_df, ret_ec): + # There are two edges that share the same set of 3 (three) nodes + new_es = es_from_dupe_df.collapse_identical_elements( + return_equivalence_classes=ret_ec + ) + + es_temp = new_es + if isinstance(new_es, tuple): + # reset variable for actual EntitySet + es_temp = new_es[0] + + # check equiv classes + collapsed_edge_key = "L: 2" + assert "M: 2" not in es_temp.elements + assert collapsed_edge_key in es_temp.elements + assert set(es_temp.elements.get(collapsed_edge_key)) == {"F", "C", "E"} + + equiv_classes = new_es[1] + assert equiv_classes == { + "I: 1": ["I"], + "L: 2": ["L", "M"], + "O: 1": ["O"], + "P: 1": ["P"], + "R: 1": ["R"], + "S: 1": ["S"], + } + + # check dataframe + assert len(es_temp.dataframe) != len(es_from_dupe_df.dataframe) + assert len(es_temp.dataframe) == len(es_from_dupe_df.dataframe) - 3 + + @pytest.mark.parametrize( + "col1, col2, expected_elements", + [ + ( + 0, + 1, + { + "I": {"K", "T2"}, + "L": {"C", "E"}, + "O": {"T1", "T2"}, + "P": {"K", "A", "C"}, + "R": {"A", "E"}, + "S": {"K", "A", "V", "T2"}, + }, + ), + ( + 1, + 0, + { + "A": {"P", "R", "S"}, + "C": {"P", "L"}, + "E": {"R", "L"}, + "K": {"P", "S", "I"}, + "T1": {"O"}, + "T2": {"S", "O", "I"}, + "V": {"S"}, + }, + ), + ], + ) + def test_elements_by_column(self, es_from_df, col1, col2, expected_elements): + elements_temps = es_from_df.elements_by_column(col1, col2) + actual_elements = { + elements_temps[k]._key[1]: set(v) for k, v in elements_temps.items() + } + + assert actual_elements == expected_elements + + def test_elements_by_level(self, sbs): + ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) + assert ent_sbs.elements_by_level(0, 1) + + def test_encode(self, es_from_df): + df = pd.DataFrame({"Category": ["A", "B", "A", "C", "B"]}) + # Convert 'Category' column to categorical + df["Category"] = df["Category"].astype("category") + + expected_arr = np.array([[0], [1], [0], [2], [1]]) + actual_arr = es_from_df.encode(df) + + assert np.array_equal(actual_arr, expected_arr) + + def test_get_cell_properties(self, es_from_df): + props = es_from_df.get_cell_properties("P", "A") + + assert props == {"cell_weights": 1} + + def test_get_cell_properties_raises_keyerror(self, es_from_df): + assert es_from_df.get_cell_properties("P", "FOOBAR") is None + + def test_get_cell_property(self, es_from_df): + props = es_from_df.get_cell_property("P", "A", "cell_weights") + assert props == 1 + + @pytest.mark.parametrize( + "item1, item2, prop_name, err_msg", + [ + ("P", "FOO", "cell_weights", "Item not exists. cell_properties:"), + ], + ) + def test_get_cell_property_raises_keyerror( + self, es_from_df, item1, item2, prop_name, err_msg + ): + with pytest.raises(KeyError, match=err_msg): + es_from_df.get_cell_property(item1, item2, prop_name) + + def test_get_cell_property_returns_none_on_prop(self, es_from_df): + assert es_from_df.get_cell_property("P", "A", "Not a real property") is None + + @pytest.mark.parametrize("item, level", [("P", 0), ("P", None), ("A", 1)]) + def test_get_properties(self, es_from_df, item, level): + # to avoid duplicate test code, reuse 'level' to get the item_uid + # but if level is None, assume it to be 0 and that the item exists at level 0 + if level is None: + item_uid = es_from_df.properties.loc[(0, item), "uid"] + else: + item_uid = es_from_df.properties.loc[(level, item), "uid"] + + props = es_from_df.get_properties(item, level=level) + + assert props == {"uid": item_uid, "weight": 1, "properties": {}} + + @pytest.mark.parametrize( + "item, level, err_msg", + [ + ("Not a valid item", None, ""), + ("Not a valid item", 0, "no properties initialized for"), + ], + ) + def test_get_properties_raises_keyerror(self, es_from_df, item, level, err_msg): + with pytest.raises(KeyError, match=err_msg): + es_from_df.get_properties(item, level=level) + + @pytest.mark.parametrize( + "item, prop_name, level, expected_prop", + [ + ("P", "weight", 0, 1), + ("P", "properties", 0, {}), + ("P", "uid", 0, 3), + ("A", "weight", 1, 1), + ("A", "properties", 1, {}), + ("A", "uid", 1, 6), + ], + ) + def test_get_property(self, es_from_df, item, prop_name, level, expected_prop): + prop = es_from_df.get_property(item, prop_name, level) + + assert prop == expected_prop + + @pytest.mark.parametrize( + "item, prop_name, err_msg", + [ + ("XXX", "weight", "item does not exist:"), + ], + ) + def test_get_property_raises_keyerror(self, es_from_df, item, prop_name, err_msg): + with pytest.raises(KeyError, match=err_msg): + es_from_df.get_property(item, prop_name) + + def test_get_property_returns_none_on_no_property(self, es_from_df): + assert es_from_df.get_property("P", "non-existing property") is None + + @pytest.mark.parametrize( + "item, prop_name, prop_val, level", + [ + ("P", "weight", 42, 0), + ], + ) + def test_set_property(self, es_from_df, item, prop_name, prop_val, level): + orig_prop_val = es_from_df.get_property(item, prop_name, level) + + es_from_df.set_property(item, prop_name, prop_val, level) + + new_prop_val = es_from_df.get_property(item, prop_name, level) + + assert new_prop_val != orig_prop_val + assert new_prop_val == prop_val + + @pytest.mark.parametrize( + "item, prop_name, prop_val, level, misc_props_col", + [ + ("P", "new_prop", "foobar", 0, "properties"), + ("P", "new_prop", "foobar", 0, "some_new_miscellaneaus_col"), + ], + ) + def test_set_property_on_non_existing_property( + self, es_from_df, item, prop_name, prop_val, level, misc_props_col + ): + es_from_df.set_property(item, prop_name, prop_val, level) + + new_prop_val = es_from_df.get_property(item, prop_name, level) + + assert new_prop_val == prop_val + + def test_set_property_raises_keyerror(self, es_from_df): + with pytest.raises( + ValueError, match="cannot infer 'level' when initializing 'item' properties" + ): + es_from_df.set_property("XXXX", "weight", 42) + + def test_incidence_matrix(self, sbs): + ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) + assert ent_sbs.incidence_matrix(1, 0).todense().shape == (6, 7) + + def test_index(self, sbs): + ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) + assert ent_sbs.index("nodes") == 1 + assert ent_sbs.index("nodes", "K") == (1, 3) + + def test_indices(self, sbs): + ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) + assert ent_sbs.indices("nodes", "K") == [3] + assert ent_sbs.indices("nodes", ["K", "T1"]) == [3, 4] + + @pytest.mark.parametrize("level", [0, 1]) + def test_is_empty(self, es_from_df, level): + assert not es_from_df.is_empty(level) + + @pytest.mark.parametrize( + "item_level, item, min_level, max_level, expected_lidx", + [ + (0, "P", 0, None, (0, 3)), + (0, "P", 0, 0, (0, 3)), + (0, "P", 1, 1, None), + (1, "A", 0, None, (1, 0)), + (1, "A", 0, 0, None), + (1, "K", 0, None, (1, 3)), + ], + ) + def test_level( + self, es_from_df, item_level, item, min_level, max_level, expected_lidx + ): + actual_lidx = es_from_df.level(item, min_level=min_level, max_level=max_level) + + assert actual_lidx == expected_lidx + + if isinstance(actual_lidx, tuple): + index_item_in_labels = actual_lidx[1] + assert index_item_in_labels == es_from_df.labels[item_level].index(item) diff --git a/hypernetx/classes/tests/test_entityset_on_dict.py b/hypernetx/classes/tests/test_entityset_on_dict.py new file mode 100644 index 00000000..9b0e8982 --- /dev/null +++ b/hypernetx/classes/tests/test_entityset_on_dict.py @@ -0,0 +1,177 @@ +import numpy as np +import pytest + +from pytest_lazyfixture import lazy_fixture + +from hypernetx.classes import EntitySet + + +@pytest.mark.parametrize( + "entity, data, data_cols, labels", + [ + (lazy_fixture("sbs_dict"), None, (0, 1), None), + (lazy_fixture("sbs_dict"), None, (0, 1), lazy_fixture("sbs_labels")), + (lazy_fixture("sbs_dict"), None, ["edges", "nodes"], None), + (lazy_fixture("sbs_dict"), lazy_fixture("sbs_data"), (0, 1), None), + (None, lazy_fixture("sbs_data"), (0, 1), lazy_fixture("sbs_labels")), + ], +) +class TestEntitySBSDict: + """Tests on different use cases for combination of the following params: entity, data, data_cols, labels""" + + def test_size(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert es.size() == len(sbs.edgedict) + + # check all the EntitySet properties + def test_isstatic(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert es.isstatic + + def test_uid(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert es.uid is None + + def test_empty(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert not es.empty + + def test_uidset(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert es.uidset == {"I", "R", "S", "P", "O", "L"} + + def test_dimsize(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert es.dimsize == 2 + + def test_elements(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert len(es.elements) == 6 + expected_elements = { + "I": ["K", "T2"], + "L": ["E", "C"], + "O": ["T1", "T2"], + "P": ["C", "K", "A"], + "R": ["E", "A"], + "S": ["K", "V", "A", "T2"], + } + for expected_edge, expected_nodes in expected_elements.items(): + assert expected_edge in es.elements + assert es.elements[expected_edge].sort() == expected_nodes.sort() + + def test_incident_dict(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + expected_incident_dict = { + "I": ["K", "T2"], + "L": ["E", "C"], + "O": ["T1", "T2"], + "P": ["C", "K", "A"], + "R": ["E", "A"], + "S": ["K", "V", "A", "T2"], + } + for expected_edge, expected_nodes in expected_incident_dict.items(): + assert expected_edge in es.incidence_dict + assert es.incidence_dict[expected_edge].sort() == expected_nodes.sort() + assert isinstance(es.incidence_dict["I"], list) + assert "I" in es + assert "K" in es + + def test_children(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert es.children == {"C", "T1", "A", "K", "T2", "V", "E"} + + def test_memberships(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert es.memberships == { + "A": ["P", "R", "S"], + "C": ["P", "L"], + "E": ["R", "L"], + "K": ["P", "S", "I"], + "T1": ["O"], + "T2": ["S", "O", "I"], + "V": ["S"], + } + + def test_cell_properties(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert es.cell_properties.shape == ( + 15, + 1, + ) + + def test_cell_weights(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert es.cell_weights == { + ("P", "C"): 1, + ("P", "K"): 1, + ("P", "A"): 1, + ("R", "E"): 1, + ("R", "A"): 1, + ("S", "K"): 1, + ("S", "V"): 1, + ("S", "A"): 1, + ("S", "T2"): 1, + ("L", "E"): 1, + ("L", "C"): 1, + ("O", "T1"): 1, + ("O", "T2"): 1, + ("I", "K"): 1, + ("I", "T2"): 1, + } + + def test_labels(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + # check labeling based on given attributes for EntitySet + if data_cols == [ + "edges", + "nodes", + ]: # labels should use the data_cols as keys for labels + assert es.labels == { + "edges": ["I", "L", "O", "P", "R", "S"], + "nodes": ["A", "C", "E", "K", "T1", "T2", "V"], + } + elif (labels is not None and not entity) or ( + labels is not None and data + ): # labels should match the labels explicitly given + assert es.labels == labels + else: # if data_cols or labels not given, labels should conform to default format + assert es.labels == { + 0: ["I", "L", "O", "P", "R", "S"], + 1: ["A", "C", "E", "K", "T1", "T2", "V"], + } + + def test_dataframe(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + # check dataframe + # size should be the number of rows times the number of columns, i.e 15 x 3 + assert es.dataframe.size == 45 + + actual_edge_row0 = es.dataframe.iloc[0, 0] + actual_node_row0 = es.dataframe.iloc[0, 1] + actual_cell_weight_row0 = es.dataframe.loc[0, "cell_weights"] + + assert actual_edge_row0 == "P" + assert actual_node_row0 in ["A", "C", "K"] + assert actual_cell_weight_row0 == 1 + + # TODO: validate state of 'data' + def test_data(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert len(es.data) == 15 + + def test_properties(self, entity, data, data_cols, labels, sbs): + es = EntitySet(entity=entity, data=data, data_cols=data_cols, labels=labels) + assert ( + es.properties.size == 39 + ) # Properties has three columns and 13 rows of data (i.e. edges + nodes) + assert list(es.properties.columns) == ["uid", "weight", "properties"] + + +@pytest.mark.xfail(reason="Deprecated; to be removed in next released") +def test_level(sbs): + # at some point we are casting out and back to categorical dtype without + # preserving categories ordering from `labels` provided to constructor + ent_sbs = EntitySet(data=np.asarray(sbs.data), labels=sbs.labels) + assert ent_sbs.level("I") == (0, 5) # fails + assert ent_sbs.level("K") == (1, 3) + assert ent_sbs.level("K", max_level=0) is None diff --git a/hypernetx/classes/tests/test_entityset_on_np_array.py b/hypernetx/classes/tests/test_entityset_on_np_array.py new file mode 100644 index 00000000..f4fd04de --- /dev/null +++ b/hypernetx/classes/tests/test_entityset_on_np_array.py @@ -0,0 +1,108 @@ +import pytest +import numpy as np + +from collections.abc import Iterable +from collections import UserList + +from hypernetx import EntitySet + + +class TestEntitySetOnSBSasNDArray: + def test_ndarray_fail_on_labels(self, sbs_data): + with pytest.raises(ValueError, match="Labels must be of type Dictionary."): + EntitySet(data=np.asarray(sbs_data), labels=[]) + + def test_ndarray_fail_on_length_labels(self, sbs_data): + with pytest.raises( + ValueError, + match="The length of labels must equal the length of columns in the dataframe.", + ): + EntitySet(data=np.asarray(sbs_data), labels=dict()) + + def test_dimensions_equal_dimsize(self, sbs_data, sbs_labels): + ent_sbs = EntitySet(data=np.asarray(sbs_data), labels=sbs_labels) + assert ent_sbs.dimsize == len(ent_sbs.dimensions) + + def test_translate(self, sbs_data, sbs_labels): + ent_sbs = EntitySet(data=np.asarray(sbs_data), labels=sbs_labels) + assert ent_sbs.translate(0, 0) == "P" + assert ent_sbs.translate(1, [3, 4]) == ["K", "T1"] + + def test_translate_arr(self, sbs_data, sbs_labels): + ent_sbs = EntitySet(data=np.asarray(sbs_data), labels=sbs_labels) + assert ent_sbs.translate_arr((0, 0)) == ["P", "A"] + + def test_uidset_by_level(self, sbs_data, sbs_labels): + ent_sbs = EntitySet(data=np.asarray(sbs_data), labels=sbs_labels) + + assert ent_sbs.uidset_by_level(0) == {"I", "L", "O", "P", "R", "S"} + assert ent_sbs.uidset_by_level(1) == {"A", "C", "E", "K", "T1", "T2", "V"} + + +class TestEntitySetOnHarryPotterDataSet: + def test_entityset_from_ndarray(self, harry_potter): + ent_hp = EntitySet( + data=np.asarray(harry_potter.data), labels=harry_potter.labels + ) + assert len(ent_hp.uidset) == 7 + assert len(ent_hp.elements) == 7 + assert isinstance(ent_hp.elements["Hufflepuff"], UserList) + assert not ent_hp.is_empty() + assert len(ent_hp.incidence_dict["Gryffindor"]) == 6 + + def test_custom_attributes(self, harry_potter): + ent_hp = EntitySet( + data=np.asarray(harry_potter.data), labels=harry_potter.labels + ) + assert ent_hp.__len__() == 7 + assert isinstance(ent_hp.__str__(), str) + assert isinstance(ent_hp.__repr__(), str) + assert isinstance(ent_hp.__contains__("Muggle"), bool) + assert ent_hp.__contains__("Muggle") is True + assert ent_hp.__getitem__("Slytherin") == [ + "Half-blood", + "Pure-blood", + "Pure-blood or half-blood", + ] + assert isinstance(ent_hp.__iter__(), Iterable) + assert isinstance(ent_hp.__call__(), Iterable) + assert ent_hp.__call__().__next__() == "Unknown House" + + def test_restrict_to_levels(self, harry_potter): + ent_hp = EntitySet( + data=np.asarray(harry_potter.data), labels=harry_potter.labels + ) + assert len(ent_hp.restrict_to_levels([0]).uidset) == 7 + + def test_restrict_to_indices(self, harry_potter): + ent_hp = EntitySet( + data=np.asarray(harry_potter.data), labels=harry_potter.labels + ) + assert ent_hp.restrict_to_indices([1, 2]).uidset == { + "Gryffindor", + "Ravenclaw", + } + + +@pytest.mark.xfail( + reason="Entity does not remove row duplicates from self._data if constructed from np.ndarray, defaults to first two cols as data cols" +) +def test_attributes(harry_potter): + assert isinstance(harry_potter.data, np.ndarray) + ent_hp = EntitySet(data=np.asarray(harry_potter.data), labels=harry_potter.labels) + # TODO: Entity does not remove row duplicates from self._data if constructed from np.ndarray + assert ent_hp.data.shape == ent_hp.dataframe[ent_hp._data_cols].shape # fails + assert isinstance(ent_hp.labels, dict) + # TODO: Entity defaults to first two cols as data cols + assert ent_hp.dimensions == (7, 11, 10, 36, 26) # fails + assert ent_hp.dimsize == 5 # fails + df = ent_hp.dataframe[ent_hp._data_cols] + assert list(df.columns) == [ # fails + "House", + "Blood status", + "Species", + "Hair colour", + "Eye colour", + ] + assert ent_hp.dimensions == tuple(df.nunique()) + assert set(ent_hp.labels["House"]) == set(df["House"].unique()) diff --git a/hypernetx/classes/tests/test_hypergraph.py b/hypernetx/classes/tests/test_hypergraph.py index 60774faa..b183a01e 100644 --- a/hypernetx/classes/tests/test_hypergraph.py +++ b/hypernetx/classes/tests/test_hypergraph.py @@ -2,6 +2,8 @@ import numpy as np from hypernetx.classes.hypergraph import Hypergraph +from networkx.algorithms import bipartite + def test_hypergraph_from_iterable_of_sets(sbs): H = Hypergraph(sbs.edges) @@ -296,11 +298,7 @@ def test_edge_diameter(sbs): def test_bipartite(sbs_hypergraph): - from networkx.algorithms import bipartite - - h = sbs_hypergraph - b = h.bipartite() - assert bipartite.is_bipartite(b) + assert bipartite.is_bipartite(sbs_hypergraph.bipartite()) def test_dual(sbs_hypergraph): diff --git a/hypernetx/utils/toys/harrypotter.py b/hypernetx/utils/toys/harrypotter.py index 637b5299..6d575c7e 100644 --- a/hypernetx/utils/toys/harrypotter.py +++ b/hypernetx/utils/toys/harrypotter.py @@ -11,9 +11,7 @@ class HarryPotter(object): def __init__(self, cols=None): - # Read dataset in using pandas. Fix index column or use default pandas index. - try: fname = "https://raw.githubusercontent.com/pnnl/HyperNetX/master/hypernetx/utils/toys/HarryPotter_Characters.csv" harrydata = pd.read_csv(fname, encoding="unicode_escape") diff --git a/pytest.ini b/pytest.ini index 286a2cb1..937fc3a8 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,5 +1,8 @@ [pytest] minversion = 6.0 -; addopts are a set of command line arguments given to pytest: -; '-r A' will show all extra test summary as indicated by 'a' -addopts = -r A +; addopts are a set of optional arguments given to pytest: +; '-rA' will show a short test summary with the results for every test' +addopts = -rA +testpaths = + hypernetx/classes/tests + hypernetx/classes/algorithms diff --git a/setup.cfg b/setup.cfg index 3c950a32..8204a7e5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -50,6 +50,7 @@ license_files = LICENSE.rst [options] +include_package_data=True packages = hypernetx hypernetx.algorithms @@ -66,28 +67,25 @@ install_requires = scikit-learn>=0.20.0 pandas>=1.5.3 decorator>=5.1.1 + typing-extensions>=4.8.0 [options.extras_require] releases = commitizen>=3.2.1 -linting = - pre-commit>=3.2.2 - pylint>=2.17.2 - pylint-exit>=1.2.0 - black>=23.3.0 testing = + pytest>=7.2.2 + pytest-cov>=4.1.0 + pytest-lazy-fixture>=0.6.3 + pytest-xdist>=3.2.1 + pytest-env tox>=4.4.11 - pre-commit>=3.2.2 + nbmake>=1.4.1 + pre-commit>=3.2.2 pylint>=2.17.2 pylint-exit>=1.2.0 black>=23.3.0 - pytest>=7.2.2 - coverage>=7.2.2 celluloid>=0.2.0 igraph>=0.10.4 - nbmake>=1.4.1 - pytest-lazy-fixture>=0.6.3 - pytest-xdist>=3.2.1 tutorials = jupyter>=1.0 igraph>=0.10.4 @@ -115,7 +113,7 @@ all = sphinx-autobuild>=2021.3.14 sphinx-copybutton>=0.5.1 pytest>=7.2.2 - coverage>=7.2.2 + pytest-cov>=4.1.0 jupyter>=1.0 igraph>=0.10.4 partition-igraph>=0.0.6 diff --git a/tox.ini b/tox.ini index a840d36b..29a92bcc 100644 --- a/tox.ini +++ b/tox.ini @@ -6,35 +6,26 @@ [tox] min_version = 4.4.11 -envlist = py{38,39,310,311} +envlist = clean, py{38,39,310,311} isolated_build = True skip_missing_interpreters = true [testenv] -deps = - pytest>=7.2.2 - coverage>=7.2.2 - celluloid>=0.2.0 - igraph>=0.10.4 - nbmake>=1.4.1 - pytest-lazy-fixture>=0.6.3 - pytest-xdist>=3.2.1 - partition-igraph>=0.0.6 +extras = testing allowlist_externals = env commands = env - python --version - coverage run --source=hypernetx -m pytest - coverage report -m + coverage run -m pytest -n auto --cov=hypernetx --cov-report term --cov-report html --junit-xml=pytest.xml --cov-fail-under=45 [testenv:py38-notebooks] description = run tests on jupyter notebooks -deps = - hnxwidget>=0.1.1b3 - jupyter-contrib-nbextensions>=0.7.0 - jupyter-nbextensions-configurator>=0.6.2 +extras = widget allowlist_externals = env commands = - env - python --version - pytest --nbmake "tutorials/" --junitxml=pytest.xml -n=auto --nbmake-timeout=20 --nbmake-find-import-errors + env + pytest --nbmake "tutorials/" -n=auto --nbmake-timeout=20 --nbmake-find-import-errors + +[testenv:clean] +deps = coverage +skip_install = true +commands = coverage erase