diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index 9aaad6f5..a36e4228 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -9,8 +9,8 @@ on: branches: [ master ] env: - latest_python: "3.11" - supported_pythons: '["3.7", "3.8", "3.9", "3.10", "3.11"]' + latest_python: "3.12" + supported_pythons: '["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]' miniforge_version: "22.9.0-2" miniforge_variant: "Mambaforge" @@ -34,7 +34,7 @@ jobs: needs: conf runs-on: "ubuntu-latest" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: conda-incubator/setup-miniconda@v2 with: auto-update-conda: true @@ -56,7 +56,7 @@ jobs: needs: ["conf", "lint"] runs-on: "ubuntu-latest" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: conda-incubator/setup-miniconda@v2 with: auto-update-conda: true @@ -81,11 +81,11 @@ jobs: strategy: fail-fast: true matrix: - os: ["ubuntu-latest", "macos-latest"] + os: ["ubuntu-latest", "macos-latest", "windows-latest"] python_version: ${{ fromJSON(needs.conf.outputs.supported_pythons) }} use_conda: [true, false] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: conda-incubator/setup-miniconda@v2 with: auto-update-conda: true @@ -115,7 +115,7 @@ jobs: needs: ["conf", "lint", "doc", "test-all"] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 # setup-buildx-action uses the git context directly # but checklist wants the .git directory - name: Set up QEMU diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d41a9767..6c77edf4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -7,7 +7,7 @@ jobs: name: Build sdist runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build distribution run: | @@ -15,7 +15,7 @@ jobs: pip install numpy cython pipx run build --sdist - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: dist-artifacts path: dist/*.tar.gz @@ -27,13 +27,13 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, macos-latest] - pyver: ["37", "38", "39", "310", "311"] + os: [ubuntu-latest, macos-latest, windows-latest] + pyver: ["37", "38", "39", "310", "311", "312"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.9 @@ -47,7 +47,7 @@ jobs: - name: Build wheels (py ${{ matrix.pyver }}) Linux if: matrix.os == 'ubuntu-latest' env: - CIBW_ARCHS_LINUX: x86_64 + CIBW_ARCHS_LINUX: "x86_64 aarch64" CIBW_SKIP: "*-musllinux*" CIBW_BUILD: "cp${{ matrix.pyver }}-*" @@ -60,10 +60,15 @@ jobs: CIBW_BUILD: "cp${{ matrix.pyver }}-*" uses: pypa/cibuildwheel@v2.12.3 - - + + - name: Build wheels (py ${{ matrix.pyver }}) Windows + if: matrix.os == 'windows-latest' + env: + CIBW_ARCHS_WINDOWS: "amd64 win32 arm64" + CIBW_BUILD: "cp${{ matrix.pyver }}-*" + - name: Upload wheels - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: dist-artifacts path: ./wheelhouse/*.whl diff --git a/ChangeLog.md b/ChangeLog.md index 622634d5..b2f8428b 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -6,6 +6,7 @@ biom 2.1.15-dev Performance improvements: +* Add Windows support. PR[#951](https://github.com/biocore/biom-format/pull/951) revises codebase to be Windows compatible and adds this support to the CI testing matrix. * Add NumPy 2.0 support. PR [#950](https://github.com/biocore/biom-format/pull/950) ensures code compatibility with NumPy 2.0. This support is yet to be added to the CI testing matrix. * Revise `Table._fast_merge` to use COO directly. For very large tables, this reduces runtime by ~50x and memory by ~5x. See PR [#913](https://github.com/biocore/biom-format/pull/933). * Drastically reduce the memory needs of subsampling when sums are large. Also adds 64-bit support. See PR [#935](https://github.com/biocore/biom-format/pull/935). diff --git a/biom/tests/test_cli/test_add_metadata.py b/biom/tests/test_cli/test_add_metadata.py index 16e6d436..16010833 100644 --- a/biom/tests/test_cli/test_add_metadata.py +++ b/biom/tests/test_cli/test_add_metadata.py @@ -9,6 +9,7 @@ # ----------------------------------------------------------------------------- import tempfile +import os from unittest import TestCase, main import biom @@ -20,13 +21,17 @@ class TestAddMetadata(TestCase): def setUp(self): """Set up data for use in unit tests.""" self.cmd = _add_metadata - with tempfile.NamedTemporaryFile('w') as fh: + with tempfile.NamedTemporaryFile('w', delete=False) as fh: fh.write(biom1) fh.flush() self.biom_table1 = biom.load_table(fh.name) + self.temporary_fh_name = fh.name self.sample_md_lines1 = sample_md1.split('\n') self.obs_md_lines1 = obs_md1.split('\n') + def tearDown(self): + os.unlink(self.temporary_fh_name) + def test_add_sample_metadata_no_casting(self): """Correctly adds sample metadata without casting it.""" # Add a subset of sample metadata to a table that doesn't have any diff --git a/biom/tests/test_cli/test_subset_table.py b/biom/tests/test_cli/test_subset_table.py index 19080c6f..0ecedd8a 100644 --- a/biom/tests/test_cli/test_subset_table.py +++ b/biom/tests/test_cli/test_subset_table.py @@ -55,9 +55,10 @@ def test_invalid_input(self): def test_subset_samples_hdf5(self): """Correctly subsets samples in a hdf5 table""" cwd = os.getcwd() - if '/' in __file__: - os.chdir(__file__.rsplit('/', 1)[0]) - obs = _subset_table(hdf5_biom='test_data/test.biom', axis='sample', + if os.path.sep in __file__: + os.chdir(os.path.dirname(__file__)) + obs = _subset_table(hdf5_biom=os.path.join('test_data', 'test.biom'), + axis='sample', ids=['Sample1', 'Sample2', 'Sample3'], json_table_str=None) os.chdir(cwd) @@ -71,9 +72,9 @@ def test_subset_samples_hdf5(self): def test_subset_observations_hdf5(self): """Correctly subsets samples in a hdf5 table""" cwd = os.getcwd() - if '/' in __file__: - os.chdir(__file__.rsplit('/', 1)[0]) - obs = _subset_table(hdf5_biom='test_data/test.biom', + if os.path.sep in __file__: + os.chdir(os.path.dirname(__file__)) + obs = _subset_table(hdf5_biom=os.path.join('test_data', 'test.biom'), axis='observation', ids=['GG_OTU_1', 'GG_OTU_3', 'GG_OTU_5'], json_table_str=None) diff --git a/biom/tests/test_cli/test_summarize_table.py b/biom/tests/test_cli/test_summarize_table.py index a979848b..274a7d4d 100644 --- a/biom/tests/test_cli/test_summarize_table.py +++ b/biom/tests/test_cli/test_summarize_table.py @@ -12,16 +12,21 @@ from biom.parse import load_table import tempfile +import os from unittest import TestCase, main class TestSummarizeTable(TestCase): def setUp(self): - with tempfile.NamedTemporaryFile(mode='w') as fh: + with tempfile.NamedTemporaryFile(mode='w', delete=False) as fh: fh.write(biom1) fh.flush() self.biom1 = load_table(fh.name) + self.temporary_fh_name = fh.name + + def tearDown(self): + os.unlink(self.temporary_fh_name) def test_default(self): """ TableSummarizer functions as expected diff --git a/biom/tests/test_cli/test_table_converter.py b/biom/tests/test_cli/test_table_converter.py index af200130..44ecc97e 100644 --- a/biom/tests/test_cli/test_table_converter.py +++ b/biom/tests/test_cli/test_table_converter.py @@ -8,6 +8,7 @@ # The full license is in the file COPYING.txt, distributed with this software. # ----------------------------------------------------------------------------- +import os from os.path import abspath, dirname, join import tempfile @@ -28,16 +29,18 @@ def setUp(self): self.cmd = _convert self.output_filepath = tempfile.NamedTemporaryFile().name - with tempfile.NamedTemporaryFile('w') as fh: + with tempfile.NamedTemporaryFile('w', delete=False) as fh: fh.write(biom1) fh.flush() self.biom_table1 = load_table(fh.name) + self.temporary_fh_table_name = fh.name self.biom_lines1 = biom1.split('\n') - with tempfile.NamedTemporaryFile('w') as fh: + with tempfile.NamedTemporaryFile('w', delete=False) as fh: fh.write(classic1) fh.flush() self.classic_biom1 = load_table(fh.name) + self.temporary_fh_classic_name = fh.name self.sample_md1 = MetadataMap.from_file(sample_md1.split('\n')) @@ -47,6 +50,10 @@ def setUp(self): self.json_collapsed_samples = join(test_data_dir, 'json_sample_collapsed.biom') + def tearDown(self): + os.unlink(self.temporary_fh_classic_name) + os.unlink(self.temporary_fh_table_name) + def test_classic_to_biom(self): """Correctly converts classic to biom.""" self.cmd(table=self.classic_biom1, diff --git a/biom/tests/test_cli/test_table_normalizer.py b/biom/tests/test_cli/test_table_normalizer.py index 4844b654..d85ebcf9 100755 --- a/biom/tests/test_cli/test_table_normalizer.py +++ b/biom/tests/test_cli/test_table_normalizer.py @@ -24,9 +24,9 @@ def setUp(self): self.cmd = _normalize_table cwd = os.getcwd() - if '/' in __file__: - os.chdir(__file__.rsplit('/', 1)[0]) - self.table = biom.load_table('test_data/test.json') + if os.path.sep in __file__: + os.chdir(os.path.dirname(__file__)) + self.table = biom.load_table(os.path.join('test_data', 'test.json')) os.chdir(cwd) def test_bad_inputs(self): diff --git a/biom/tests/test_cli/test_validate_table.py b/biom/tests/test_cli/test_validate_table.py index b892577d..2a12ccdb 100644 --- a/biom/tests/test_cli/test_validate_table.py +++ b/biom/tests/test_cli/test_validate_table.py @@ -39,7 +39,8 @@ def setUp(self): self.to_remove = [] cur_path = os.path.split(os.path.abspath(__file__))[0] - examples_path = os.path.join(cur_path.rsplit('/', 3)[0], 'examples') + examples_path = os.path.join(cur_path.rsplit(os.path.sep, 3)[0], + 'examples') self.hdf5_file_valid = os.path.join(examples_path, 'min_sparse_otu_table_hdf5.biom') self.hdf5_file_valid_md = os.path.join(examples_path, diff --git a/biom/tests/test_parse.py b/biom/tests/test_parse.py index 8fd5d0b9..1b310160 100644 --- a/biom/tests/test_parse.py +++ b/biom/tests/test_parse.py @@ -46,7 +46,7 @@ def setUp(self): self.legacy_otu_table1 = legacy_otu_table1 self.otu_table1 = otu_table1 self.otu_table1_floats = otu_table1_floats - self.files_to_remove = [] + self.to_remove = [] self.biom_minimal_sparse = biom_minimal_sparse self.classic_otu_table1_w_tax = classic_otu_table1_w_tax.split('\n') @@ -54,6 +54,11 @@ def setUp(self): self.classic_table_with_complex_metadata = \ classic_table_with_complex_metadata.split('\n') + def tearDown(self): + if self.to_remove: + for f in self.to_remove: + os.remove(f) + def test_from_tsv_bug_854(self): data = StringIO('#FeatureID\tSample1') exp = Table([], [], ['Sample1']) @@ -281,38 +286,40 @@ def test_parse_adjacency_table_no_header(self): def test_parse_biom_table_hdf5(self): """Make sure we can parse a HDF5 table through the same loader""" cwd = os.getcwd() - if '/' in __file__[1:]: - os.chdir(__file__.rsplit('/', 1)[0]) - Table.from_hdf5(h5py.File('test_data/test.biom', 'r')) + if os.path.sep in __file__[1:]: + os.chdir(os.path.dirname(__file__)) + Table.from_hdf5(h5py.File(os.path.join('test_data', 'test.biom'), + 'r')) os.chdir(cwd) def test_save_table_filepath(self): t = Table(np.array([[0, 1, 2], [3, 4, 5]]), ['a', 'b'], ['c', 'd', 'e']) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: save_table(t, tmpfile.name) obs = load_table(tmpfile.name) self.assertEqual(obs, t) + self.to_remove.append(tmpfile.name) def test_load_table_filepath(self): cwd = os.getcwd() - if '/' in __file__[1:]: - os.chdir(__file__.rsplit('/', 1)[0]) - load_table('test_data/test.biom') + if os.path.sep in __file__[1:]: + os.chdir(os.path.dirname(__file__)) + load_table(os.path.join('test_data', 'test.biom')) os.chdir(cwd) def test_load_table_inmemory(self): cwd = os.getcwd() - if '/' in __file__[1:]: - os.chdir(__file__.rsplit('/', 1)[0]) - load_table(h5py.File('test_data/test.biom', 'r')) + if os.path.sep in __file__[1:]: + os.chdir(os.path.dirname(__file__)) + load_table(h5py.File(os.path.join('test_data', 'test.biom'), 'r')) os.chdir(cwd) def test_load_table_inmemory_json(self): cwd = os.getcwd() - if '/' in __file__[1:]: - os.chdir(__file__.rsplit('/', 1)[0]) - load_table(open('test_data/test.json')) + if os.path.sep in __file__[1:]: + os.chdir(os.path.dirname(__file__)) + load_table(open(os.path.join('test_data', 'test.json'))) os.chdir(cwd) def test_load_table_inmemory_stringio(self): @@ -350,10 +357,11 @@ def test_parse_biom_table_with_hdf5(self): """tests for parse_biom_table when we have h5py""" # We will round-trip the HDF5 file to several different formats, and # make sure we can recover the same table using parse_biom_table - if '/' in __file__[1:]: - os.chdir(__file__.rsplit('/', 1)[0]) + if os.path.sep in __file__[1:]: + os.chdir(os.path.dirname(__file__)) - t = parse_biom_table(h5py.File('test_data/test.biom', 'r')) + t = parse_biom_table(h5py.File(os.path.join('test_data', 'test.biom'), + 'r')) # These things are not round-trippable using the general-purpose # parse_biom_table function diff --git a/biom/tests/test_table.py b/biom/tests/test_table.py index 75675847..372f6160 100644 --- a/biom/tests/test_table.py +++ b/biom/tests/test_table.py @@ -1016,13 +1016,15 @@ def test_to_from_hdf5_bug_861(self): ['c', 'd', 'e']) t.add_metadata({'a': {'a / problem': 10}, 'b': {'a / problem': 20}}, axis='observation') - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: h5 = h5py.File(tmpfile.name, 'w') t.to_hdf5(h5, 'tests') h5.close() h5 = h5py.File(tmpfile.name, 'r') obs = Table.from_hdf5(h5) + h5.close() + self.to_remove.append(tmpfile.name) self.assertEqual(obs, t) @@ -1030,7 +1032,7 @@ def test_to_from_hdf5_creation_date(self): t = Table(np.array([[0, 1, 2], [3, 4, 5]]), ['a', 'b'], ['c', 'd', 'e']) current = datetime.now() - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: h5 = h5py.File(tmpfile.name, 'w') t.to_hdf5(h5, 'tests', creation_date=current) h5.close() @@ -1038,6 +1040,8 @@ def test_to_from_hdf5_creation_date(self): h5 = h5py.File(tmpfile.name, 'r') obs = Table.from_hdf5(h5) self.assertEqual(obs.create_date, current) + h5.close() + self.to_remove.append(tmpfile.name) self.assertEqual(obs, t) @@ -1045,24 +1049,27 @@ def test_to_hdf5_empty_table(self): """Successfully writes an empty OTU table in HDF5 format""" # Create an empty OTU table t = Table([], [], []) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: h5 = h5py.File(tmpfile.name, 'w') t.to_hdf5(h5, 'tests') h5.close() + self.to_remove.append(tmpfile.name) def test_to_hdf5_empty_table_bug_619(self): """Successfully writes an empty OTU table in HDF5 format""" t = example_table.filter({}, axis='observation', inplace=False) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: h5 = h5py.File(tmpfile.name, 'w') t.to_hdf5(h5, 'tests') h5.close() + self.to_remove.append(tmpfile.name) t = example_table.filter({}, inplace=False) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: h5 = h5py.File(tmpfile.name, 'w') t.to_hdf5(h5, 'tests') h5.close() + self.to_remove.append(tmpfile.name) def test_to_hdf5_missing_metadata_observation(self): # exercises a vlen_list @@ -1070,10 +1077,11 @@ def test_to_hdf5_missing_metadata_observation(self): [{'taxonomy': None}, {'taxonomy': ['foo', 'baz']}]) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: with h5py.File(tmpfile.name, 'w') as h5: t.to_hdf5(h5, 'tests') obs = load_table(tmpfile.name) + self.to_remove.append(tmpfile.name) self.assertEqual(obs.metadata(axis='observation'), ({'taxonomy': None}, {'taxonomy': ['foo', 'baz']})) @@ -1084,10 +1092,11 @@ def test_to_hdf5_missing_metadata_sample(self): [{'dat': None}, {'dat': 'foo'}]) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: with h5py.File(tmpfile.name, 'w') as h5: t.to_hdf5(h5, 'tests') obs = load_table(tmpfile.name) + self.to_remove.append(tmpfile.name) self.assertEqual(obs.metadata(axis='sample'), ({'dat': ''}, {'dat': 'foo'})) @@ -1097,11 +1106,12 @@ def test_to_hdf5_inconsistent_metadata_categories_observation(self): [{'taxonomy_A': 'foo; bar'}, {'taxonomy_B': 'foo; baz'}]) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: with h5py.File(tmpfile.name, 'w') as h5: with self.assertRaisesRegex(ValueError, 'inconsistent metadata'): t.to_hdf5(h5, 'tests') + self.to_remove.append(tmpfile.name) def test_to_hdf5_inconsistent_metadata_categories_sample(self): t = Table(np.array([[0, 1], [2, 3]]), ['a', 'b'], ['c', 'd'], @@ -1109,21 +1119,23 @@ def test_to_hdf5_inconsistent_metadata_categories_sample(self): [{'dat_A': 'foo; bar'}, {'dat_B': 'foo; baz'}]) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: with h5py.File(tmpfile.name, 'w') as h5: with self.assertRaisesRegex(ValueError, 'inconsistent metadata'): t.to_hdf5(h5, 'tests') + self.to_remove.append(tmpfile.name) def test_to_hdf5_malformed_taxonomy(self): t = Table(np.array([[0, 1], [2, 3]]), ['a', 'b'], ['c', 'd'], [{'taxonomy': 'foo; bar'}, {'taxonomy': 'foo; baz'}]) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: with h5py.File(tmpfile.name, 'w') as h5: t.to_hdf5(h5, 'tests') obs = load_table(tmpfile.name) + self.to_remove.append(tmpfile.name) self.assertEqual(obs.metadata(axis='observation'), ({'taxonomy': ['foo', 'bar']}, {'taxonomy': ['foo', 'baz']})) @@ -1134,9 +1146,11 @@ def test_to_hdf5_general_fallback_to_list(self): [{'foo': ['k__a', 'p__b']}, {'foo': ['k__a', 'p__c']}], [{'barcode': 'aatt'}, {'barcode': 'ttgg'}]) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: h5 = h5py.File(tmpfile.name, 'w') st_rich.to_hdf5(h5, 'tests') + h5.close() + self.to_remove.append(tmpfile.name) def test_to_hdf5_custom_formatters(self): self.st_rich = Table(self.vals, @@ -1151,7 +1165,7 @@ def bc_formatter(grp, category, md, compression): grp.create_dataset(name, shape=data.shape, dtype=H5PY_VLEN_STR, data=data, compression=compression) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: h5 = h5py.File(tmpfile.name, 'w') self.st_rich.to_hdf5(h5, 'tests', format_fs={'barcode': bc_formatter}) @@ -1172,10 +1186,11 @@ def bc_formatter(grp, category, md, compression): self.assertNotEqual(m1['barcode'], m2['barcode']) self.assertEqual(m1['barcode'].lower(), m2['barcode']) h5.close() + self.to_remove.append(tmpfile.name) def test_to_hdf5(self): """Write a file""" - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: h5 = h5py.File(tmpfile.name, 'w') self.st_rich.to_hdf5(h5, 'tests') h5.close() @@ -1193,9 +1208,10 @@ def test_to_hdf5(self): obs = Table.from_hdf5(h5) self.assertEqual(obs, self.st_rich) h5.close() + self.to_remove.append(tmpfile.name) # Test with a collapsed table - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: h5 = h5py.File(tmpfile.name, 'w') dt_rich = Table( np.array([[5, 6, 7], [8, 9, 10], [11, 12, 13]]), @@ -1238,9 +1254,10 @@ def bin_f(id_, x): [{'collapsed_ids': ['a', 'c']}, {'collapsed_ids': ['b']}]) self.assertEqual(obs, exp) + self.to_remove.append(tmpfile.name) # Test with table having a None on taxonomy - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: h5 = h5py.File(tmpfile.name, 'w') t = Table(self.vals, ['1', '2'], ['a', 'b'], [{'taxonomy': ['k__a', 'p__b']}, @@ -1262,6 +1279,7 @@ def bin_f(id_, x): obs = Table.from_hdf5(h5) h5.close() self.assertEqual(obs, t) + self.to_remove.append(tmpfile.name) def test_from_tsv(self): tab1_fh = StringIO(otu_table1) diff --git a/biom/tests/test_util.py b/biom/tests/test_util.py index b3d1806c..44bf2341 100644 --- a/biom/tests/test_util.py +++ b/biom/tests/test_util.py @@ -42,6 +42,12 @@ class UtilTests(TestCase): def setUp(self): self.biom_otu_table1_w_tax = parse_biom_table(biom_otu_table1_w_tax) + self.to_remove = [] + + def tearDown(self): + if self.to_remove: + for f in self.to_remove: + os.remove(f) def test_generate_subsamples(self): table = Table(np.array([[3, 1, 1], [0, 3, 3]]), ['O1', 'O2'], @@ -246,11 +252,14 @@ def test_safe_md5(self): tmp_f = NamedTemporaryFile( mode='w', prefix='test_safe_md5', - suffix='txt') + suffix='txt', + delete=False) tmp_f.write('foo\n') tmp_f.flush() obs = safe_md5(open(tmp_f.name)) + tmp_f.close() + self.to_remove.append(tmp_f.name) self.assertEqual(obs, exp) obs = safe_md5(['foo\n']) @@ -262,9 +271,10 @@ def test_safe_md5(self): def test_biom_open_hdf5_pathlib_write(self): t = Table(np.array([[0, 1, 2], [3, 4, 5]]), ['a', 'b'], ['c', 'd', 'e']) - with NamedTemporaryFile() as tmpfile: + with NamedTemporaryFile(delete=False) as tmpfile: with biom_open(pathlib.Path(tmpfile.name), 'w') as fp: t.to_hdf5(fp, 'tests') + self.to_remove.append(tmpfile.name) def test_biom_open_hdf5_pathlib_read(self): cwd = os.getcwd() @@ -309,11 +319,12 @@ def test_is_hdf5_file(self): def test_load_classic(self): tab = load_table(get_data_path('test.json')) - with NamedTemporaryFile(mode='w') as fp: + with NamedTemporaryFile(mode='w', delete=False) as fp: fp.write(str(tab)) fp.flush() obs = load_table(fp.name) + self.to_remove.append(fp.name) npt.assert_equal(obs.ids(), tab.ids()) npt.assert_equal(obs.ids(axis='observation'), diff --git a/setup.py b/setup.py index 31b7b2c4..cc9a55b3 100644 --- a/setup.py +++ b/setup.py @@ -86,15 +86,20 @@ def run_tests(self): Topic :: Software Development :: Libraries :: Application Frameworks Topic :: Software Development :: Libraries :: Python Modules Programming Language :: Python + Programming Language :: Python :: 3 + Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 + Programming Language :: Python :: 3.12 Programming Language :: Python :: Implementation :: CPython Operating System :: OS Independent Operating System :: POSIX :: Linux Operating System :: MacOS :: MacOS X + Operating System :: Microsoft :: Windows """ classifiers = [s.strip() for s in classes.split('\n') if s]