From 0e2b76e89ae02abd6ec9160404057011fec24ab7 Mon Sep 17 00:00:00 2001 From: Aaron Ewall-Wice Date: Fri, 23 Apr 2021 10:16:09 -0700 Subject: [PATCH] Pytest, github-actions, and codecov. (#114) * Create python-package-conda.yml * delete travis. * replace raw_input with input to support python3. * add more python versions. * update yaml to use multiple python versions. * include pytest-cov. * install. * fix space. * sync pytest with codecov. * fix covfile name. * replace test_dspec with pytest statements. * migrate test_plot to pytest. * migrate test_utils. * migrate test_utils. * remove 3.7 --- .github/workflows/python-package-conda.yml | 50 ++++ .travis.yml | 37 --- scripts/plot_uv.py | 10 +- uvtools/tests/test_dspec.py | 278 ++++++++++----------- uvtools/tests/test_plot.py | 93 ++++--- uvtools/tests/test_utils.py | 39 +-- 6 files changed, 260 insertions(+), 247 deletions(-) create mode 100644 .github/workflows/python-package-conda.yml delete mode 100644 .travis.yml diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml new file mode 100644 index 00000000..1312f54f --- /dev/null +++ b/.github/workflows/python-package-conda.yml @@ -0,0 +1,50 @@ +name: Python Package using Conda + +on: [push] + +jobs: + build-linux: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.8, 3.9] + steps: + - uses: actions/checkout@v2 + - name: Set up ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Add conda to system path + run: | + # $CONDA is an environment variable pointing to the root of the miniconda directory + echo $CONDA/bin >> $GITHUB_PATH + - name: Install dependencies + run: | + conda env update --file environment.yml --name base + - name: Lint with flake8 + run: | + conda install flake8 + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Install + run: | + pip install . + - name: Test with pytest + run: | + conda install pytest + conda install pytest-cov + pytest --cov=./ --cov-report=xml + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: ./coverage.xml + directory: ./coverage/reports/ + flags: unittests + env_vars: OS,PYTHON + name: codecov-umbrella + fail_ci_if_error: true + path_to_write_report: ./coverage/codecov_report.txt + verbose: true diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 5b15ca73..00000000 --- a/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -language: python -python: - # We don't actually use the Travis Python, but this keeps it organized. - - "3.6" -env: - global: - - COVERALLS_PARALLEL=true -install: - - sudo apt-get update - # We do this conditionally because it saves us some downloading if the - # version is the same. - - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; - - bash miniconda.sh -b -p $HOME/miniconda - - export PATH="$HOME/miniconda/bin:$PATH" - - hash -r - - conda config --set always_yes yes --set changeps1 no - - conda config --add channels conda-forge - - conda update -q conda - # Useful for debugging any issues with conda - - conda info -a - - # create environment and install dependencies - - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION coverage coveralls - - source activate test-environment - - conda env update -n test-environment -f environment.yml - - pip install git+https://github.com/HERA-Team/hera_sim.git - - pip install . - -before_script: - - "export MPLBACKEND=agg" - -script: - - nosetests uvtools --with-coverage --cover-package=uvtools -after_success: - - coveralls -notifications: - webhooks: https://coveralls.io/webhook?repo_token=$COVERALLS_REPO_TOKEN diff --git a/scripts/plot_uv.py b/scripts/plot_uv.py index 908ab546..a288f3ce 100755 --- a/scripts/plot_uv.py +++ b/scripts/plot_uv.py @@ -27,7 +27,7 @@ help="Dynamic range in color of image, in units matching plotting mode. Default max(data)-min(data).") o.add_option('-m', '--mode', dest='mode', default='log', help='Plot mode can be log (logrithmic), abs (absolute), phs (phase), real, or imag.') -o.add_option('-t', '--time', dest='time', default='all', +o.add_option('-t', '--time', dest='time', default='all', help='Select which time samples to plot. Options are: "all" (default), "_" (a range of times to plot), or "," (a list of times to plot).') o.add_option('-u', '--unmask', dest='unmask', action='store_true', help='Plot masked data, too.') @@ -128,7 +128,7 @@ def parse_range(chanstr): plot_f['freq'] = meta['freqs'].flatten().take(chan) plot_f['chan'] = chan dat, flg, _ = uvf.read(bls, freq_chans=chan) - data.append(dat); flgs.append(flg) + data.append(dat); flgs.append(flg) # Concatenate the data from all the files if len(data) > 1: @@ -243,7 +243,7 @@ def sort_func(a, b): def click(event): print([event.key]) if event.key == 'm': - mode = raw_input('Enter new mode: ') + mode = input('Enter new mode: ') for k in plots: try: d = uvtools.plot.data_mode(plt_data[k], mode) @@ -252,10 +252,10 @@ def click(event): print('Unrecognized plot mode') plt.draw() elif event.key == 'd': - max = raw_input('Enter new max: ') + max = input('Enter new max: ') try: max = float(max) except(ValueError): max = None - drng = raw_input('Enter new drng: ') + drng = input('Enter new drng: ') try: drng = float(drng) except(ValueError): drng = None for k in plots: diff --git a/uvtools/tests/test_dspec.py b/uvtools/tests/test_dspec.py index 94a46cf0..6afc267f 100644 --- a/uvtools/tests/test_dspec.py +++ b/uvtools/tests/test_dspec.py @@ -1,7 +1,7 @@ import unittest import uvtools.dspec as dspec import numpy as np, random -import nose.tools as nt +import pytest from pyuvdata import UVData from uvtools.data import DATA_PATH import os @@ -63,7 +63,7 @@ def test_delay_filter_1D(self): #check that skip_wgt is properly passed to clean wgts[:72] = 0. dmdl, dres, info = dspec.delay_filter(data, wgts, 0., .1/NCHAN, tol=TOL, skip_wgt=0.5, mode='clean') - nt.assert_true(info['status']['axis_1'][0] == 'skipped') + assert info['status']['axis_1'][0] == 'skipped' def test_delay_filter_2D(self): NCHAN = 128 @@ -87,8 +87,8 @@ def test_delay_filter_2D(self): #check that skip_wgt is properly passed to clean wgts[0,:72] = 0. dmdl, dres, info = dspec.delay_filter(data, wgts, 0., .1/NCHAN, tol=TOL, skip_wgt=0.5, mode='clean') - nt.assert_true(info['status']['axis_1'][0] == 'skipped') - nt.assert_true(info['status']['axis_1'][1] == 'success') + assert info['status']['axis_1'][0] == 'skipped' + assert info['status']['axis_1'][1] == 'success' def test_fourier_model(self): NMAX = 7 @@ -101,8 +101,8 @@ def test_fourier_model(self): self.assertEqual((NFREQS,), model.shape) # Test errors - nt.assert_raises(ValueError, dspec.fourier_model, 3, NFREQS) - nt.assert_raises(ValueError, dspec.fourier_model, np.empty((3, 3)), NFREQS) + pytest.raises(ValueError, dspec.fourier_model, 3, NFREQS) + pytest.raises(ValueError, dspec.fourier_model, np.empty((3, 3)), NFREQS) def test_delay_filter_leastsq(self): NCHAN = 128 @@ -155,9 +155,9 @@ def test_delay_filter_leastsq(self): np.testing.assert_allclose(data[0], bf_model, atol=NCHAN * TOL * sigma) # Test errors - nt.assert_raises(ValueError, dspec.delay_filter_leastsq_1d, + pytest.raises(ValueError, dspec.delay_filter_leastsq_1d, data[0], flags[0], sigma, nmax=3, operator=np.empty((3, 3))) - nt.assert_raises(ValueError, dspec.delay_filter_leastsq_1d, + pytest.raises(ValueError, dspec.delay_filter_leastsq_1d, data[0], flags[0], sigma, nmax=3, cn_guess=np.array([3])) def test_skip_wgt(self): @@ -182,30 +182,30 @@ def test_calc_width(self): filter_size = 1e-2 u, l = dspec.calc_width(filter_size, dt, nchan) frs = np.fft.fftfreq(nchan, dt) # negative b/c of ifft convention - nt.assert_true(np.all(np.abs(frs[u:l]) > filter_size)) + assert np.all(np.abs(frs[u:l]) > filter_size) # test multiple entries in filter_size filter_size = (1e-2, 2e-2) u, l = dspec.calc_width(filter_size, dt, nchan) - nt.assert_true(np.all((frs[u:l] < -1e-2) | (frs[u:l] > 2e-2))) + assert np.all((frs[u:l] < -1e-2) | (frs[u:l] > 2e-2)) def test_gen_window(self): for w in ['none', 'blackmanharris', 'hann', 'tukey', 'barthann', 'blackmanharris-7term', 'cosinesum-9term', 'cosinesum-11term']: win = dspec.gen_window(w, 100) - nt.assert_true(len(win), 100) - nt.assert_true(isinstance(win, np.ndarray)) - nt.assert_true(win.min() >= 0.0) - nt.assert_true(win.max() <= 1.0) - nt.assert_raises(ValueError, dspec.gen_window, w, 100, normalization='foo') + assert len(win) == 100 + assert isinstance(win, np.ndarray) + assert win.min() >= 0.0 + assert win.max() <= 1.0 + pytest.raises(ValueError, dspec.gen_window, w, 100, normalization='foo') win2 = dspec.gen_window(w, 100,normalization='mean') - nt.assert_true(np.all(np.isclose(win, win2*np.mean(win),atol=1e-6))) + assert np.all(np.isclose(win, win2*np.mean(win),atol=1e-6)) win3 = dspec.gen_window(w, 100,normalization='rms') - nt.assert_true(np.all(np.isclose(win, win3*np.sqrt(np.mean(win**2.)),atol=1e-6))) + assert np.all(np.isclose(win, win3*np.sqrt(np.mean(win**2.)),atol=1e-6)) - nt.assert_raises(ValueError, dspec.gen_window, 'foo', 200) + pytest.raises(ValueError, dspec.gen_window, 'foo', 200) # check Ncut ValueError - nt.assert_raises(ValueError, dspec.gen_window, 'bh', 200, edgecut_hi=101, edgecut_low=100) + pytest.raises(ValueError, dspec.gen_window, 'bh', 200, edgecut_hi=101, edgecut_low=100) def test_dft_operator(): @@ -233,8 +233,8 @@ def test_dpss_operator(): DF = 100e3 freqs = np.arange(-NF/2, NF/2)*DF + 150e6 freqs_bad = freqs[[0, 12, 14, 18, 22]] - nt.assert_raises(ValueError, dspec.dpss_operator, x=freqs_bad, filter_centers=[0.], filter_half_widths=[1e-6], nterms=[5]) - nt.assert_raises(ValueError, dspec.dpss_operator, x = freqs , filter_centers=[0.], filter_half_widths=[1e-6], nterms=[5], avg_suppression=[1e-12]) + pytest.raises(ValueError, dspec.dpss_operator, x=freqs_bad, filter_centers=[0.], filter_half_widths=[1e-6], nterms=[5]) + pytest.raises(ValueError, dspec.dpss_operator, x = freqs , filter_centers=[0.], filter_half_widths=[1e-6], nterms=[5], avg_suppression=[1e-12]) #now calculate DPSS operator matrices using different cutoff criteria. The columns #should be the same up to the minimum number of columns of the three techniques. amat1, ncol1 = dspec.dpss_operator(freqs, [0.], [100e-9], eigenval_cutoff=[1e-9]) @@ -244,7 +244,7 @@ def test_dpss_operator(): ncolmin = np.min(ncols) ncolmax = np.max(ncols) amat4, ncol4 = dspec.dpss_operator(freqs, [0.], [100e-9], nterms=[ncolmax]) - nt.assert_true(ncol4[0]==ncolmax) + assert ncol4[0]==ncolmax #check that all columns of matrices obtained with different methods #of cutoff are identical. for m in range(ncolmin): @@ -279,16 +279,16 @@ def test_fit_solution_matrix(): #DFT interpolation is meh, so we keep our standards low. #DFT interpolation matrices are poorly conditioned so that's also #Downer. - nt.assert_true(np.all(np.isclose(interp_dft, data, atol=1e-2))) + assert np.all(np.isclose(interp_dft, data, atol=1e-2)) #DPSS interpolation is clutch. We can make our standards high. - nt.assert_true(np.all(np.isclose(interp_dpss, data, atol=1e-6))) + assert np.all(np.isclose(interp_dpss, data, atol=1e-6)) #Check Raising of ValueErrors. amat_dft_pc = dspec.dft_operator(fs, [0.], [4. / 50.], fundamental_period=200.) with warnings.catch_warnings(record=True) as w: dspec.fit_solution_matrix(wmat, amat_dft_pc) - nt.assert_true(len(w) > 0) - nt.assert_raises(ValueError, dspec.fit_solution_matrix, wmat[:50], amat_dft_pc) - nt.assert_raises(ValueError, dspec.fit_solution_matrix, wmat, amat_dft[:-1]) + assert len(w) > 0 + pytest.raises(ValueError, dspec.fit_solution_matrix, wmat[:50], amat_dft_pc) + pytest.raises(ValueError, dspec.fit_solution_matrix, wmat, amat_dft[:-1]) def test_dayenu_filter(): @@ -323,45 +323,45 @@ def test_dayenu_filter(): filter_centers2 = [0., -1400e-9] filter_factors2 = [1e-9, 1e-9] #check if throws error when number of filter_half_widths not equal to len filter_centers - nt.assert_raises(ValueError, dspec.dayenu_filter, freqs, data_1d, wghts_1d, [1], filter_centers, + pytest.raises(ValueError, dspec.dayenu_filter, freqs, data_1d, wghts_1d, [1], filter_centers, filter_half_widths2, filter_factors) #check if throws error when number of filter_half_widths not equal to len filter_factors - nt.assert_raises(ValueError, dspec.dayenu_filter, freqs, data_1d, wghts_1d, 1, filter_centers, + pytest.raises(ValueError, dspec.dayenu_filter, freqs, data_1d, wghts_1d, 1, filter_centers, filter_half_widths, filter_factors2) #check if error thrown when wghts have different length then data - nt.assert_raises(ValueError, dspec.dayenu_filter, freqs, data_1d, wghts_1d[:-1], 1, filter_centers, + pytest.raises(ValueError, dspec.dayenu_filter, freqs, data_1d, wghts_1d[:-1], 1, filter_centers, filter_half_widths, filter_factors) #check if error thrown when dimension of data does not equal dimension of weights. - nt.assert_raises(ValueError, dspec.dayenu_filter, freqs, data_1d, wghts_2d, 1, filter_centers, + pytest.raises(ValueError, dspec.dayenu_filter, freqs, data_1d, wghts_2d, 1, filter_centers, filter_half_widths, filter_factors) #check if error thrown if dimension of data does not equal 2 or 1. - nt.assert_raises(ValueError, dspec.dayenu_filter, freqs, np.zeros((10,10,10)), wghts_1d, 1, filter_centers, + pytest.raises(ValueError, dspec.dayenu_filter, freqs, np.zeros((10,10,10)), wghts_1d, 1, filter_centers, filter_half_widths, filter_factors) #check if error thrown if dimension of weights does not equal 2 or 1. - nt.assert_raises(ValueError, dspec.dayenu_filter, freqs, wghts_1d, np.zeros((10,10,10)), 1, filter_centers, + pytest.raises(ValueError, dspec.dayenu_filter, freqs, wghts_1d, np.zeros((10,10,10)), 1, filter_centers, filter_half_widths, filter_factors) # check error where x is not a numpy array - nt.assert_raises(ValueError, dspec.dayenu_filter, x='x', data=data_1d, wgts=wghts_1d, filter_dimensions=[1], filter_centers=filter_centers, + pytest.raises(ValueError, dspec.dayenu_filter, x='x', data=data_1d, wgts=wghts_1d, filter_dimensions=[1], filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=filter_factors) # check error where filter-dimensions is not an integer or tuple/list - nt.assert_raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_1d, wgts=wghts_1d, filter_dimensions='[1]', + pytest.raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_1d, wgts=wghts_1d, filter_dimensions='[1]', filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=filter_factors) # check lenght of filter_dims is > 2 - nt.assert_raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_1d, wgts=wghts_1d, filter_dimensions=[0, 1, 2], + pytest.raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_1d, wgts=wghts_1d, filter_dimensions=[0, 1, 2], filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=filter_factors) # check that filter_dimensions are integers. - nt.assert_raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_1d, wgts=wghts_1d, filter_dimensions=[0.0], + pytest.raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_1d, wgts=wghts_1d, filter_dimensions=[0.0], filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=filter_factors) # check filter dimensions are either 0 or 1. - nt.assert_raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_1d, wgts=wghts_1d, filter_dimensions=[2], + pytest.raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_1d, wgts=wghts_1d, filter_dimensions=[2], filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=filter_factors) # check error if negative filter_factor provided - nt.assert_raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_1d, wgts=wghts_1d, filter_dimensions=[0], + pytest.raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_1d, wgts=wghts_1d, filter_dimensions=[0], filter_centers=filter_centers, filter_half_widths=filter_half_widths, filter_factors=[-1e-9]) #now filter foregrounds and test that std of residuals are close to std of noise: @@ -383,7 +383,7 @@ def test_dayenu_filter(): wghts_1d[len(wghts_1d)//4 + 5] = 0. filtered_noise, _ = dspec.dayenu_filter(np.arange(-nf/2, nf/2)*df, data_1d, wghts_1d, [1], filter_centers, filter_half_widths, filter_factors) - nt.assert_true(np.all(filtered_noise[~(wghts_1d.astype(bool))] == 0.)) + assert np.all(filtered_noise[~(wghts_1d.astype(bool))] == 0.) @@ -401,15 +401,15 @@ def test_dayenu_filter(): data_2d = signal_2d + noise_2d # check that if we are performing 2d filtering, then x is a length 2 list. - nt.assert_raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_2d, wgts=np.ones_like(data_2d), filter_dimensions=[1, 0], + pytest.raises(ValueError, dspec.dayenu_filter, x=np.arange(-nf/2, nf/2)*df, data=data_2d, wgts=np.ones_like(data_2d), filter_dimensions=[1, 0], filter_centers=[[0.],[0.]], filter_half_widths=[[1e-3], [100e-9]], filter_factors=[[1e-9], [1e-9]]) # check that if we are performing 2d filtering, then x is a length 2 list and each x is a numpy array, list, or tuple. - nt.assert_raises(ValueError, dspec.dayenu_filter, x=['time is a construct', np.arange(-nf/2, nf/2)*df], data=data_2d, wgts=np.ones_like(data_2d), filter_dimensions=[1, 0], + pytest.raises(ValueError, dspec.dayenu_filter, x=['time is a construct', np.arange(-nf/2, nf/2)*df], data=data_2d, wgts=np.ones_like(data_2d), filter_dimensions=[1, 0], filter_centers=[[0.],[0.]], filter_half_widths=[[1e-3], [100e-9]], filter_factors=[[1e-9], [1e-9]]) # check value error if length of one of the filter_centers is greater then 2 - nt.assert_raises(ValueError, dspec.dayenu_filter, x=[times, np.arange(-nf/2, nf/2)*df], data=data_2d, wgts=np.ones_like(data_2d), filter_dimensions=[1, 0], + pytest.raises(ValueError, dspec.dayenu_filter, x=[times, np.arange(-nf/2, nf/2)*df], data=data_2d, wgts=np.ones_like(data_2d), filter_dimensions=[1, 0], filter_centers=[[0.], [0.]], filter_half_widths=[[1e-3], [100e-9]], filter_factors=[[1e-9], [1e-9], [1e-9]]) @@ -456,13 +456,13 @@ def test_dayenu_filter(): filter_centers = [[0.002],[0.]], filter_half_widths = [[0.001],[100e-9]], filter_factors = [[1e-5],[1e-5]], filter_dimensions = [0,1],cache = TEST_CACHE) - nt.assert_true(np.all(filtered_data_df_fr[:, nf // 4 + 3] == 0)) - nt.assert_true(np.all(filtered_data_df_fr[nf // 2 + 5] == 0)) + assert np.all(filtered_data_df_fr[:, nf // 4 + 3] == 0) + assert np.all(filtered_data_df_fr[nf // 2 + 5] == 0) np.testing.assert_almost_equal(np.sqrt(np.mean(np.abs(filtered_data_df_fr.flatten())**2.)), 1., decimal = 1) #test error messages if we do not provide lists of lists. - nt.assert_raises(ValueError,dspec.dayenu_filter,[np.arange(-nf/2,nf/2)*dt, np.arange(-nf/2,nf/2)*df], + pytest.raises(ValueError,dspec.dayenu_filter,[np.arange(-nf/2,nf/2)*dt, np.arange(-nf/2,nf/2)*df], data_2d, np.ones_like(data_2d), filter_centers = [[0.002],0.], filter_half_widths = [[0.001],[100e-9]], @@ -472,11 +472,11 @@ def test_dayenu_filter(): # test skip_wgt: _, info = dspec.dayenu_filter(np.arange(-nf/2,nf/2)*df, data_1d, np.zeros_like(wghts_1d), [1], np.array(filter_centers), np.array(filter_half_widths), np.array(filter_factors)) - nt.assert_true(np.all([info['status']['axis_1'][i] == 'skipped' for i in info['status']['axis_1']])) + assert np.all([info['status']['axis_1'][i] == 'skipped' for i in info['status']['axis_1']]) _, info = dspec.dayenu_filter(np.arange(-nf/2,nf/2)*df, data_1d, np.ones_like(wghts_1d), [1], np.array(filter_centers), np.array(filter_half_widths), np.array(filter_factors)) - nt.assert_true(np.all([info['status']['axis_1'][i] == 'success' for i in info['status']['axis_1']])) + assert np.all([info['status']['axis_1'][i] == 'success' for i in info['status']['axis_1']]) def test_dayenu_mat_inv(): @@ -550,26 +550,26 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): mdl, res, info = dspec.delay_filter(d, w, sdf, bl_len, tol=1e-4, window='none', skip_wgt=1. - 1. / f.shape[1], gain=0.1) # with this skip_wgt, all times should be skipped. - nt.assert_true(np.all([info['status']['axis_1'][i] == 'skipped' for i in info['status']['axis_1'] ])) + assert np.all([info['status']['axis_1'][i] == 'skipped' for i in info['status']['axis_1'] ]) # delay filter basic execution mdl, res, info = dspec.delay_filter(d, w, sdf, bl_len, tol=1e-4, window='none', skip_wgt=0.1, gain=0.1) #check skips . - nt.assert_true(info['status']['axis_1'][20] == 'skipped') - nt.assert_true(np.all([info['status']['axis_1'][i] == 'success' for i in info['status']['axis_1'] if i != 20])) + assert info['status']['axis_1'][20] == 'skipped' + assert np.all([info['status']['axis_1'][i] == 'success' for i in info['status']['axis_1'] if i != 20]) cln = mdl + res # assert recovered snr of input modes snrs = get_snr(cln, fftax=1, avgax=0) - nt.assert_true(np.isclose(snrs[0], freq_snr1, atol=3)) - nt.assert_true(np.isclose(snrs[1], freq_snr2, atol=3)) + assert np.isclose(snrs[0], freq_snr1, atol=3) + assert np.isclose(snrs[1], freq_snr2, atol=3) # test vis filter is the same mdl2, res2, info2 = dspec.vis_filter(d, w, bl_len=bl_len, sdf=sdf, standoff=0, horizon=1.0, min_dly=0.0, tol=1e-4, window='none', skip_wgt=0.1, gain=0.1) - nt.assert_true(np.isclose(mdl - mdl2, 0.0).all()) + assert np.isclose(mdl - mdl2, 0.0).all() # fringe filter basic execution mdl, res, info = dspec.fringe_filter(d, w, frs[15], dt, tol=1e-4, window='none', skip_wgt=0.1, gain=0.1) @@ -577,32 +577,32 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): # assert recovered snr of input modes snrs = get_snr(cln, fftax=0, avgax=1) - nt.assert_true(np.isclose(snrs[0], time_snr1, atol=3)) - nt.assert_true(np.isclose(snrs[1], time_snr2, atol=3)) + assert np.isclose(snrs[0], time_snr1, atol=3) + assert np.isclose(snrs[1], time_snr2, atol=3) # test vis filter is the same mdl2, res2, info2 = dspec.vis_filter(d, w, max_frate=frs[15], dt=dt, tol=1e-4, window='none', skip_wgt=0.1, gain=0.1) cln2 = mdl2 + res2 - nt.assert_true(np.isclose(mdl - mdl2, 0.0).all()) + assert np.isclose(mdl - mdl2, 0.0).all() # try non-symmetric filter mdl, res, info = dspec.fringe_filter(d, w, (frs[-20], frs[10]), dt, tol=1e-4, window='none', skip_wgt=0.1, gain=0.1) cln = mdl + res - nt.assert_true(np.all([info['status']['axis_1'][i] == 'success' for i in info['status']['axis_1'] if i != 20])) + assert np.all([info['status']['axis_1'][i] == 'success' for i in info['status']['axis_1'] if i != 20]) # assert recovered snr of input modes snrs = get_snr(cln, fftax=0, avgax=1) - nt.assert_true(np.isclose(snrs[0], time_snr1, atol=3)) - nt.assert_true(np.isclose(snrs[1], time_snr2, atol=3)) + assert np.isclose(snrs[0], time_snr1, atol=3) + assert np.isclose(snrs[1], time_snr2, atol=3) # 2d clean mdl, res, info = dspec.vis_filter(d, w, bl_len=bl_len, sdf=sdf, max_frate=frs[15], dt=dt, tol=1e-4, window='none', maxiter=100, gain=1e-1) cln = mdl + res # assert recovered snr of input modes snrs = get_snr(cln, fftax=1, avgax=0) - nt.assert_true(np.isclose(snrs[0], freq_snr1, atol=3)) - nt.assert_true(np.isclose(snrs[1], freq_snr2, atol=3)) + assert np.isclose(snrs[0], freq_snr1, atol=3) + assert np.isclose(snrs[1], freq_snr2, atol=3) # non-symmetric 2D clean mdl, res, info = dspec.vis_filter(d, w, bl_len=bl_len, sdf=sdf, max_frate=(frs[-20], frs[10]), dt=dt, tol=1e-4, window='none', maxiter=100, gain=1e-1) @@ -610,8 +610,8 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): # assert recovered snr of input modes snrs = get_snr(cln, fftax=1, avgax=0) - nt.assert_true(np.isclose(snrs[0], freq_snr1, atol=3)) - nt.assert_true(np.isclose(snrs[1], freq_snr2, atol=3)) + assert np.isclose(snrs[0], freq_snr1, atol=3) + assert np.isclose(snrs[1], freq_snr2, atol=3) # try plus filtmode on 2d clean mdl, res, info = dspec.vis_filter(d, w, bl_len=bl_len, sdf=sdf, max_frate=(frs[10], frs[10]), dt=dt, tol=1e-4, window=('none', 'none'), edgecut_low=(0, 5), edgecut_hi=(2, 5), maxiter=100, gain=1e-1, filt2d_mode='plus') @@ -621,17 +621,17 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): # assert clean components fall only in plus area clean_comp = np.where(~np.isclose(np.abs(mfft), 0.0)) for cc in zip(*clean_comp): - nt.assert_true(0 in cc) + assert 0 in cc # exceptions - nt.assert_raises(ValueError, dspec.vis_filter, d, w, bl_len=bl_len, sdf=sdf, max_frate=(frs[-20], frs[10]), dt=dt, filt2d_mode='foo') + pytest.raises(ValueError, dspec.vis_filter, d, w, bl_len=bl_len, sdf=sdf, max_frate=(frs[-20], frs[10]), dt=dt, filt2d_mode='foo') # test add_clean_residual: test res of filtered modes are lower when add_residual is True mdl, res, info = dspec.vis_filter(d, w, bl_len=bl_len, sdf=sdf, max_frate=frs[15], dt=dt, tol=1e-6, window='none', maxiter=100, gain=1e-1, add_clean_residual=False) mdl2, res2, info = dspec.vis_filter(d, w, bl_len=bl_len, sdf=sdf, max_frate=frs[15], dt=dt, tol=1e-6, window='none', maxiter=100, gain=1e-1, add_clean_residual=True) rfft = np.fft.ifft2(res) rfft2 = np.fft.ifft2(res2) - nt.assert_true(np.median(np.abs(rfft2[:15, :23] / rfft[:15, :23])) < 1) + assert np.median(np.abs(rfft2[:15, :23] / rfft[:15, :23])) < 1 def test_delay_interpolation_matrix(): """ @@ -657,15 +657,15 @@ def test_delay_interpolation_matrix(): #interpolate data and see if it matches true data. data_interp = np.dot(dspec.delay_interpolation_matrix(nchan=20, ndelay=5, wgts=wgts, fundamental_period=20, cache=MYCACHE), dw) #check that interpolated data agrees with original data. - nt.assert_true( np.all(np.isclose(data_interp, data, atol=1e-6))) + assert np.all(np.isclose(data_interp, data, atol=1e-6)) #test error raising. - nt.assert_raises(ValueError, dspec.delay_interpolation_matrix, 10, 2, np.ones(5)) - nt.assert_raises(ValueError, dspec.delay_interpolation_matrix, 5, 2, np.asarray([0., 0., 0., 0., 0.])) + pytest.raises(ValueError, dspec.delay_interpolation_matrix, 10, 2, np.ones(5)) + pytest.raises(ValueError, dspec.delay_interpolation_matrix, 5, 2, np.asarray([0., 0., 0., 0., 0.])) #test diagnostic mode. data_interp1 = dspec.delay_interpolation_matrix(nchan=20, ndelay=5, wgts=wgts, fundamental_period=20, cache={}) data_interp1 = np.dot(data_interp1, dw) - nt.assert_true(np.all(np.isclose(data_interp, data_interp1, atol=1e-6))) + assert np.all(np.isclose(data_interp, data_interp1, atol=1e-6)) #test warning with warnings.catch_warnings(record=True) as w: wgtpc = np.ones(100) @@ -673,7 +673,7 @@ def test_delay_interpolation_matrix(): wgtpc[randflags]=0. amat_pc = dspec.delay_interpolation_matrix(nchan=100, ndelay=25, wgts=wgtpc, fundamental_period=200) print(len(w)) - nt.assert_true(len(w) > 0) + assert len(w) > 0 def test_fourier_filter(): # load file @@ -725,11 +725,11 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): mode='dpss_leastsq', **dpss_options1) # check filter dims error is raised - nt.assert_raises(ValueError, dspec.fourier_filter,x=freqs, data=d, wgts=w, filter_centers=[0.], filter_dims=2, + pytest.raises(ValueError, dspec.fourier_filter,x=freqs, data=d, wgts=w, filter_centers=[0.], filter_dims=2, filter_half_widths=[bl_len], suppression_factors=[0.], mode='dpss_leastsq', **dpss_options1) #check that length >2 filter dims will fail. - nt.assert_raises(ValueError, dspec.fourier_filter,x=freqs, data=d, wgts=w, filter_centers=[0.], filter_dims=[0, 1, 1], + pytest.raises(ValueError, dspec.fourier_filter,x=freqs, data=d, wgts=w, filter_centers=[0.], filter_dims=[0, 1, 1], filter_half_widths=[bl_len], suppression_factors=[0.], mode='dpss_leastsq', **dpss_options1) @@ -747,14 +747,14 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): 'edgecut_low':0, 'edgecut_hi':0, 'add_clean_residual':False, 'window':'none', 'gain':0.1, 'alphae':0.5} #check that a ValueError is returned if we include a bad parameter name. - nt.assert_raises(ValueError, dspec.fourier_filter, freqs, d, w, [0.], [bl_len], + pytest.raises(ValueError, dspec.fourier_filter, freqs, d, w, [0.], [bl_len], mode='clean', **clean_options_typo) - nt.assert_true(np.all(np.isclose(mdl3, mdl4, atol=1e-6))) - nt.assert_true(np.all(np.isclose(res3, res4, atol=1e-6))) + assert np.all(np.isclose(mdl3, mdl4, atol=1e-6)) + assert np.all(np.isclose(res3, res4, atol=1e-6)) - nt.assert_true(np.all(np.isclose(mdl1, mdl2, atol=1e-6))) - nt.assert_true(np.all(np.isclose(res1, res2))) + assert np.all(np.isclose(mdl1, mdl2, atol=1e-6)) + assert np.all(np.isclose(res1, res2)) #check that dayenu can be run without fitting options. mdl3, res3, info3 = dspec.fourier_filter(freqs, d, w, [0.], [bl_len], suppression_factors=[1e-9], @@ -763,20 +763,20 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): mdl4, res4, info4 = dspec.fourier_filter(freqs, d, w, [0.], [bl_len], suppression_factors=[1e-9], mode='dayenu') - nt.assert_true(np.all(np.isclose(mdl3, mdl4, atol=1e-6))) - nt.assert_true(np.all(np.isclose(res3, res4, atol=1e-6))) + assert np.all(np.isclose(mdl3, mdl4, atol=1e-6)) + assert np.all(np.isclose(res3, res4, atol=1e-6)) #check that clean skips if all data is equal to zero, avoids infinite loop case. mdl3, res3, info3 = dspec.fourier_filter(freqs, np.zeros_like(d), w, [0.], [bl_len], mode='clean', filter_dims=1) - nt.assert_true(np.all([info3['status']['axis_1'][i] == 'skipped' for i in info3['status']['axis_1']])) + assert np.all([info3['status']['axis_1'][i] == 'skipped' for i in info3['status']['axis_1']]) #check error when unsupported mode provided - nt.assert_raises(ValueError, dspec.fourier_filter, x=freqs, data=d, wgts=w, filter_centers=[0.], + pytest.raises(ValueError, dspec.fourier_filter, x=freqs, data=d, wgts=w, filter_centers=[0.], filter_half_widths=[bl_len], suppression_factors=[0.], mode='foo', **dpss_options1) #check error when wgt dim does not equal data dim. - nt.assert_raises(ValueError, dspec.fourier_filter, x=freqs, data=d, wgts=w[0].squeeze(), filter_centers=[0.], + pytest.raises(ValueError, dspec.fourier_filter, x=freqs, data=d, wgts=w[0].squeeze(), filter_centers=[0.], filter_half_widths=[bl_len], suppression_factors=[0.], mode='dpss_leastsq', **dpss_options1) @@ -788,11 +788,11 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): #test that the info is properly switched. fourier_filter processes all data in frequency_mode and takes transposes for time #filtering mode. for k in info11d: - nt.assert_true(len(info11d[k]['axis_0']) == 0) + assert len(info11d[k]['axis_0']) == 0 if k == 'status': - nt.assert_true(len(info11d[k]['axis_1']) == 1) + assert len(info11d[k]['axis_1']) == 1 - nt.assert_true(np.all(np.isclose(mdl1[0], mdl11d, atol=1e-6))) + assert np.all(np.isclose(mdl1[0], mdl11d, atol=1e-6)) #perform a fringe-rate filter mdl5, res5, info5 = dspec.fourier_filter(x=times, data=d, wgts=w, filter_centers=[0.], filter_half_widths=[fr_len], suppression_factors=[0.], filter_dims=0, @@ -801,12 +801,12 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): #test that the info is properly switched. fourier_filter processes all data in frequency_mode and takes transposes for time #filtering mode. for k in info5: - nt.assert_true(len(info5[k]['axis_1']) == 0) + assert len(info5[k]['axis_1']) == 0 if k == 'status': - nt.assert_true(len(info5[k]['axis_0']) == d.shape[1]) + assert len(info5[k]['axis_0']) == d.shape[1] #check that fringe rate filter model gives similar results to delay filter. - nt.assert_true(np.all(np.isclose(mdl1[~f],mdl5[~f], rtol=1e-2))) + assert np.all(np.isclose(mdl1[~f],mdl5[~f], rtol=1e-2)) #perform some sanity checks on handling of nans in dft_leastsq. If nans are present in fundamental period #then the default behavior should be to set fundamental period to 2 * bandwidth. @@ -814,16 +814,16 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): filter_half_widths=[bl_len], suppression_factors=[1e-9], mode='dft_leastsq') # check that the filter_period is indeed equal to 1 / (2 * bandwidth) - nt.assert_true(np.isclose(info['filter_params']['axis_1']['basis_options']['fundamental_period'], - 2 * (freqs.max() - freqs.min()))) + assert np.isclose(info['filter_params']['axis_1']['basis_options']['fundamental_period'], + 2 * (freqs.max() - freqs.min())) #check that user provided fundamental period agrees with whats in info. mdl, res, info = dspec.fourier_filter(x=freqs, data=d[0], wgts=w[0], filter_centers=[0.], filter_half_widths=[bl_len], suppression_factors=[1e-9], mode='dft_leastsq', fundamental_period=4. * (freqs.max() - freqs.min())) # check that the filter_period is indeed equal to 1 / (2 * bandwidth) - nt.assert_true(np.isclose(info['filter_params']['axis_1']['basis_options']['fundamental_period'], - 4. * (freqs.max() - freqs.min()))) + assert np.isclose(info['filter_params']['axis_1']['basis_options']['fundamental_period'], + 4. * (freqs.max() - freqs.min())) #check fringe rate filter with dft mode mdl6, res6, info6 = dspec.fourier_filter(x=times, data=d, wgts=w, filter_centers=[0.], @@ -834,8 +834,8 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): filter_half_widths=[fr_len], suppression_factors=[0.], filter_dims=0, mode='dft_leastsq', **dft_options1) #check that dft and dpss fringe-rate inpainting give the same results. - nt.assert_true(np.all(np.isclose(mdl5, mdl6, rtol=1e-2))) - nt.assert_true(np.all(np.isclose(mdl62, mdl6, rtol=1e-2))) + assert np.all(np.isclose(mdl5, mdl6, rtol=1e-2)) + assert np.all(np.isclose(mdl62, mdl6, rtol=1e-2)) #Check Dayenu filter. mdl7, res7, info7 = dspec.fourier_filter(x=times, data=d, wgts=w, filter_centers=[0.], @@ -845,18 +845,18 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): mdl8, res8, info8 = dspec.fourier_filter(x=times, data=d, wgts=w, filter_centers=[0.], filter_half_widths=[fr_len], suppression_factors=[1e-8], filter_dims=0, mode='dayenu_dpss_leastsq', **dpss_options1) - nt.assert_true(np.all(np.isclose(mdl7, mdl8, rtol=1e-2))) - nt.assert_true(np.all(np.isclose(mdl5, mdl8, rtol=1e-2))) + assert np.all(np.isclose(mdl7, mdl8, rtol=1e-2)) + assert np.all(np.isclose(mdl5, mdl8, rtol=1e-2)) for k in info8: if not k == 'info_deconv': - nt.assert_true(len(info8[k]['axis_1']) == 0) + assert len(info8[k]['axis_1']) == 0 if k == 'status': - nt.assert_true(len(info8[k]['axis_0']) == d.shape[1]) + assert len(info8[k]['axis_0']) == d.shape[1] for k in info8['info_deconv']: - nt.assert_true(len(info8['info_deconv'][k]['axis_1']) == 0) + assert len(info8['info_deconv'][k]['axis_1']) == 0 if k == 'status': - nt.assert_true(len(info8['info_deconv'][k]['axis_0']) == d.shape[1]) + assert len(info8['info_deconv'][k]['axis_0']) == d.shape[1] #perform 2d dayenu filter with dpss and dft deconvolution. dpss_options1_2d = {'eigenval_cutoff': [[1e-12], [1e-12]]} @@ -867,11 +867,11 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): filter_half_widths=[[fr_len],[bl_len]], suppression_factors=[[1e-8],[1e-8]], mode='dayenu_dpss_leastsq', filter_dims=[1, 0], **dpss_options1_2d) - nt.assert_raises(ValueError, dspec.fourier_filter, x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], + pytest.raises(ValueError, dspec.fourier_filter, x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], filter_half_widths=[[fr_len],[bl_len]], suppression_factors=[[1e-8],[1e-8]], mode='dayenu_dpss_leastsq', filter_dims=[1, 0], **dpss_options1) - nt.assert_raises(ValueError, dspec.fourier_filter, x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], + pytest.raises(ValueError, dspec.fourier_filter, x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], filter_half_widths=[[fr_len],[bl_len]], suppression_factors=[[1e-8],[1e-8]], mode='dayenu_dpss_leastsq', filter_dims=[1, 0], **dft_options1) @@ -879,7 +879,7 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): filter_half_widths=[[fr_len],[bl_len]], suppression_factors=[[1e-8],[1e-8]], mode='dayenu_dft_leastsq', filter_dims=[1, 0], **dft_options1_2d) #check 2d filter dft fundamental period error. - nt.assert_raises(ValueError, dspec.fourier_filter,x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], + pytest.raises(ValueError, dspec.fourier_filter,x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], filter_half_widths=[[fr_len],[bl_len]], suppression_factors=[[0.],[0.]], mode='dft_leastsq', filter_dims=[1, 0], **dpss_options1) mdl_dft, res_dft, info_dft = dspec.fourier_filter(x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], @@ -889,7 +889,7 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): mdl_dft1, res_dft1, info_dft1 = dspec.fourier_filter(x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], filter_half_widths=[[fr_len],[bl_len]], suppression_factors=[[0.],[0.]], mode='dft_leastsq', filter_dims=[1, 0]) - nt.assert_true(np.all(np.isclose(mdl_dft1, mdl_dft, rtol=1e-2))) + assert np.all(np.isclose(mdl_dft1, mdl_dft, rtol=1e-2)) #try 2d iterative clean. mdl11, res11, info11 = dspec.fourier_filter(x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], @@ -904,19 +904,19 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): mdl13, res13, info13 = dspec.fourier_filter(x=[times, freqs], data=d, wgts=np.zeros_like(w), filter_centers=[[0.],[0.]], filter_half_widths=[[fr_len],[bl_len]], mode='clean', filter_dims=[1, 0], **{'filt2d_mode':'plus','tol':1e-5}) - nt.assert_true(info13['clean_status']['axis_0']['skipped']) - nt.assert_true(info13['clean_status']['axis_1']['skipped']) + assert info13['clean_status']['axis_0']['skipped'] + assert info13['clean_status']['axis_1']['skipped'] #test error when cleaning with invalid filt2d mode. - nt.assert_raises(ValueError, dspec.fourier_filter,x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], + pytest.raises(ValueError, dspec.fourier_filter,x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], filter_half_widths=[[fr_len],[bl_len]], mode='clean', filter_dims=[1, 0], **{'filt2d_mode':'bargh','tol':1e-5}) #check equally spaced data value error for clean tlog = np.logspace(np.log10(times.min()), np.log10(times.max()), len(times)) flog = np.logspace(np.log10(freqs.min()), np.log10(freqs.max()), len(freqs)) - nt.assert_raises(ValueError, dspec.fourier_filter,x=[tlog, flog], data=d, wgts=w, filter_centers=[[0.],[0.]], + pytest.raises(ValueError, dspec.fourier_filter,x=[tlog, flog], data=d, wgts=w, filter_centers=[[0.],[0.]], filter_half_widths=[[fr_len],[bl_len]], mode='clean', filter_dims=[1, 0], **{'filt2d_mode':'plus','tol':1e-5}) - nt.assert_raises(ValueError, dspec.fourier_filter,x=flog, data=d, wgts=w, filter_centers=[0.], + pytest.raises(ValueError, dspec.fourier_filter,x=flog, data=d, wgts=w, filter_centers=[0.], filter_half_widths=[bl_len], mode='clean', filter_dims=[1], **{'tol':1e-5}) @@ -925,10 +925,10 @@ def get_snr(clean, fftax=1, avgax=0, modes=[2, 20]): filter_half_widths=[[fr_len],[bl_len]], suppression_factors=[[0.],[0.]], mode='dft_leastsq', filter_dims=[1, 0], **dft_options2_2d) - nt.assert_true(np.isclose(info_dft['filter_params']['axis_1']['basis_options']['fundamental_period'], - dft_options2_2d['fundamental_period'][1])) - nt.assert_true(np.isclose(info_dft['filter_params']['axis_0']['basis_options']['fundamental_period'], - dft_options2_2d['fundamental_period'][0])) + assert np.isclose(info_dft['filter_params']['axis_1']['basis_options']['fundamental_period'], + dft_options2_2d['fundamental_period'][1]) + assert np.isclose(info_dft['filter_params']['axis_0']['basis_options']['fundamental_period'], + dft_options2_2d['fundamental_period'][0]) def test_vis_clean(): # validate that fourier_filter in various clean modes gives close values to vis_clean with equivalent parameters! @@ -965,8 +965,8 @@ def test_vis_clean(): mdl2, res2, info2 = dspec.wedge_filter(d, w, bl_len, sdf, standoff=0, horizon=1.0, min_dly=0.0, tol=1e-4, window='none', skip_wgt=0.1, gain=0.1) # validate models and residuals are close. - nt.assert_true(np.all(np.isclose(mdl1, mdl2))) - nt.assert_true(np.all(np.isclose(res1, res2))) + assert np.all(np.isclose(mdl1, mdl2)) + assert np.all(np.isclose(res1, res2)) mdl2, res2, info2 = dspec.vis_filter(d, w, bl_len=bl_len, sdf=sdf, standoff=0., horizon=1.0, min_dly=0.0, tol=1e-4, window='none', skip_wgt=0.1, gain=0.1) @@ -974,10 +974,10 @@ def test_vis_clean(): # cover tuple arguments. mdl3, res3, info3 = dspec.vis_clean(d, w, filter_size=bl_len, real_delta=sdf, tol=1e-4, window='none', skip_wgt=0.1, gain=0.1) # validate models and residuals are close. - nt.assert_true(np.all(np.isclose(mdl1, mdl2))) - nt.assert_true(np.all(np.isclose(res1, res2))) - nt.assert_true(np.all(np.isclose(res1, res3))) - nt.assert_true(np.all(np.isclose(mdl1, mdl3))) + assert np.all(np.isclose(mdl1, mdl2)) + assert np.all(np.isclose(res1, res2)) + assert np.all(np.isclose(res1, res3)) + assert np.all(np.isclose(mdl1, mdl3)) # Do the same comparison with more complicated windowing and edge cuts. @@ -987,8 +987,8 @@ def test_vis_clean(): mdl2, res2, info2 = dspec.delay_filter(d, w, bl_len, sdf, edgecut_hi=4, edgecut_low=4, tol=1e-4, skip_wgt=0.1, gain=0.1, window='tukey') - nt.assert_true(np.all(np.isclose(mdl1, mdl2))) - nt.assert_true(np.all(np.isclose(res1, res2))) + assert np.all(np.isclose(mdl1, mdl2)) + assert np.all(np.isclose(res1, res2)) #Do a comparison for time domain clean. @@ -1003,13 +1003,13 @@ def test_vis_clean(): mdl3, res3, info3 = dspec.vis_clean(d.T, w.T, (frs[15], frs[15]), dt, edgecut_hi=4, edgecut_low=3, tol=1e-4, window='tukey', skip_wgt=0.1, gain=0.1) - nt.assert_true(np.all(np.isclose(mdl1, mdl2))) - nt.assert_true(np.all(np.isclose(res1, res2))) - nt.assert_true(np.all(np.isclose(res1, res3.T))) - nt.assert_true(np.all(np.isclose(mdl1, mdl3.T))) + assert np.all(np.isclose(mdl1, mdl2)) + assert np.all(np.isclose(res1, res2)) + assert np.all(np.isclose(res1, res3.T)) + assert np.all(np.isclose(mdl1, mdl3.T)) #cover value error if 2-tuple filter sizes and not 2dclean. - nt.assert_raises(ValueError, dspec.fringe_filter,d , w, frs[15], dt, edgecut_hi=4, edgecut_low=3, + pytest.raises(ValueError, dspec.fringe_filter,d , w, frs[15], dt, edgecut_hi=4, edgecut_low=3, tol=1e-4, window='tukey', skip_wgt=0.1, gain=0.1, clean2d=True) #try 2d iterative clean. @@ -1023,8 +1023,8 @@ def test_vis_clean(): window='tukey', tol=1e-5, clean2d=True, add_clean_residual=False) - nt.assert_true(np.all(np.isclose(mdl1, mdl2))) - nt.assert_true(np.all(np.isclose(res1, res2))) + assert np.all(np.isclose(mdl1, mdl2)) + assert np.all(np.isclose(res1, res2)) #check plus mode. mdl1, res1, info1 = dspec.fourier_filter(x=[times, freqs], data=d, wgts=w, filter_centers=[[0.],[0.]], @@ -1043,15 +1043,15 @@ def test_vis_clean(): window='tukey', tol=1e-5, clean2d=True, filt2d_mode='plus', add_clean_residual=False) #cover value error for calling 2d visclean with 1d real_delta. - nt.assert_raises(ValueError, dspec.high_pass_fourier_filter, data=d, wgts=w, filter_size=[[frs[15] , frs[15]], bl_len], + pytest.raises(ValueError, dspec.high_pass_fourier_filter, data=d, wgts=w, filter_size=[[frs[15] , frs[15]], bl_len], real_delta=np.mean(np.diff(times)), window='tukey', tol=1e-5, clean2d=True, filt2d_mode='plus', add_clean_residual=False) - nt.assert_true(np.all(np.isclose(mdl1, mdl2))) - nt.assert_true(np.all(np.isclose(res1, res2))) - nt.assert_true(np.all(np.isclose(res3, res1))) - nt.assert_true(np.all(np.isclose(mdl3, mdl1))) + assert np.all(np.isclose(mdl1, mdl2)) + assert np.all(np.isclose(res1, res2)) + assert np.all(np.isclose(res3, res1)) + assert np.all(np.isclose(mdl3, mdl1)) def test__fit_basis_1d(): @@ -1071,7 +1071,7 @@ def test__fit_basis_1d(): method='leastsq', basis='dpss') mod2, resid2, info2 = dspec._fit_basis_1d(fs, dw, wgts, [0.], [5./50.], basis_options=dpss_opts, method='matrix', basis='dpss') - nt.assert_true(np.all(np.isclose(mod1, mod2, atol=1e-6))) + assert np.all(np.isclose(mod1, mod2, atol=1e-6)) #perform dft interpolation, leastsq and matrix and compare results dft_opts={'fundamental_period':200.} @@ -1088,10 +1088,10 @@ def test__fit_basis_1d(): #Matrices with 2B harmonics are poorly conditioned which is #unfortunate since 2B is generally where DFT performance #approaches DPSS performance. - nt.assert_true(np.all(np.isclose(mod4, mod2, atol=1e-5))) - nt.assert_true(np.all(np.isclose(mod3, mod4, atol=1e-2))) + assert np.all(np.isclose(mod4, mod2, atol=1e-5)) + assert np.all(np.isclose(mod3, mod4, atol=1e-2)) - nt.assert_true(np.all(np.isclose((mod2+resid2)*wgts, dw, atol=1e-6))) + assert np.all(np.isclose((mod2+resid2)*wgts, dw, atol=1e-6)) if __name__ == '__main__': diff --git a/uvtools/tests/test_plot.py b/uvtools/tests/test_plot.py index 15cfccc6..b3e308d6 100644 --- a/uvtools/tests/test_plot.py +++ b/uvtools/tests/test_plot.py @@ -1,4 +1,4 @@ -import nose.tools as nt +import pytest import matplotlib import matplotlib.pyplot as plt import unittest @@ -11,7 +11,7 @@ def axes_contains(ax, obj_list): """Check that a matplotlib.Axes instance contains certain elements. - This function was taken directly from the ``test_plot`` module of + This function was taken directly from the ``test_plot`` module of ``hera_pspec``. Parameters @@ -40,7 +40,7 @@ def axes_contains(ax, obj_list): return True class TestMethods(unittest.TestCase): - + def test_data_mode(self): data = np.ones(100) - 1j*np.ones(100) d = uvt.plot.data_mode(data, mode='abs') @@ -54,7 +54,7 @@ def test_data_mode(self): d = uvt.plot.data_mode(data, mode='imag') self.assertTrue(np.all(d == -1)) self.assertRaises(ValueError, uvt.plot.data_mode, data, mode='') - + def test_waterfall(self): import matplotlib data = np.ones((10,10)) - 1j*np.ones((10,10)) @@ -62,13 +62,13 @@ def test_waterfall(self): uvt.plot.waterfall(data, mode=mode) #matplotlib.pyplot.show() matplotlib.pyplot.clf() - + def test_plot_antpos(self): antpos = {i: [i,i,0] for i in range(10)} import matplotlib uvt.plot.plot_antpos(antpos) #matplotlib.pyplot.show() - + class TestFancyPlotters(unittest.TestCase): def setUp(self): import hera_sim @@ -91,7 +91,7 @@ def test_labeled_waterfall(self): # so this will test some features not exposed in # plot.fourier_transform_waterfalls uvd = self.uvd - + # Dynamic range setting. fig, ax = uvt.plot.labeled_waterfall( uvd, @@ -192,7 +192,7 @@ def test_fourier_transform_waterfalls(self): axes = fig.get_axes() ylabels = list(ax.get_ylabel() for ax in axes) assert sum("mK sr" in ylabel for ylabel in ylabels) == 4 - + # Check custom plot units. plot_units = { "time": "hour", @@ -232,37 +232,37 @@ def test_fourier_transform_waterfalls(self): # Already checked everything but time units, so only check that. ylabels = list(ax.get_ylabel() for ax in axes) assert sum(f"[{plot_units['time']}]" in ylabel for ylabel in ylabels) == 2 - + ylimits = list(ax.get_ylim() for ax in axes) assert sum(np.allclose(ylims, (tmax, tmin)) for ylims in ylimits) == 2 # Do some exception raising checking. - with nt.assert_raises(ValueError): + with pytest.raises(ValueError): uvt.plot.fourier_transform_waterfalls( data=uvd, antpairpol=(0,1,'xx'), time_or_lst="nan" ) - with nt.assert_raises(TypeError): + with pytest.raises(TypeError): uvt.plot.fourier_transform_waterfalls(data={}) - with nt.assert_raises(TypeError): + with pytest.raises(TypeError): uvt.plot.fourier_transform_waterfalls( data=data, freqs=freqs, lsts=lsts, plot_units="bad_type" ) - with nt.assert_raises(ValueError): + with pytest.raises(ValueError): uvt.plot.fourier_transform_waterfalls(data=np.ones((3,5,2), dtype=np.complex)) - with nt.assert_raises(ValueError): + with pytest.raises(ValueError): uvt.plot.fourier_transform_waterfalls(data=data, freqs=freqs) - with nt.assert_raises(ValueError): + with pytest.raises(ValueError): uvt.plot.fourier_transform_waterfalls(data=data, times=times) - with nt.assert_raises(ValueError): + with pytest.raises(ValueError): uvt.plot.fourier_transform_waterfalls(data=uvd) - with nt.assert_raises(TypeError): + with pytest.raises(TypeError): uvt.plot.fourier_transform_waterfalls(data=np.ones((15,20), dtype=np.float)) @@ -286,9 +286,9 @@ def setUp(self): df1 = 1e8 / 1024 df2 = 2e8 / 1024 # actually mock up the data - sim = hera_sim.Simulator(n_freq=10, n_times=10, + sim = hera_sim.Simulator(n_freq=10, n_times=10, antennas=antennas, - integration_time=dt1, + integration_time=dt1, channel_width=df1) self.uvd1 = copy.deepcopy(sim.data) sim.add_eor("noiselike_eor") @@ -299,15 +299,15 @@ def setUp(self): sim.data.vis_units = 'mK' self.uvd_bad_vis_units = copy.deepcopy(sim.data) # mismatched baselines - sim = hera_sim.Simulator(n_freq=10, n_times=10, + sim = hera_sim.Simulator(n_freq=10, n_times=10, antennas=offset_ants, integration_time=dt1, channel_width=df1) self.uvd_bad_bls = copy.deepcopy(sim.data) # wrong number of antennas - sim = hera_sim.Simulator(n_freq=10, n_times=10, + sim = hera_sim.Simulator(n_freq=10, n_times=10, antennas=bad_ants, - integration_time=dt1, + integration_time=dt1, channel_width=df1) self.uvd_bad_ants = copy.deepcopy(sim.data) # bad Nfreq @@ -317,21 +317,21 @@ def setUp(self): channel_width=df1) self.uvd_bad_Nfreq = copy.deepcopy(sim.data) # bad Ntimes - sim = hera_sim.Simulator(n_freq=10, n_times=50, + sim = hera_sim.Simulator(n_freq=10, n_times=50, antennas=antennas, - integration_time=dt1, + integration_time=dt1, channel_width=df1) self.uvd_bad_Ntimes = copy.deepcopy(sim.data) # bad integration time - sim = hera_sim.Simulator(n_freq=10, n_times=10, + sim = hera_sim.Simulator(n_freq=10, n_times=10, antennas=antennas, - integration_time=dt2, + integration_time=dt2, channel_width=df1) self.uvd_bad_int_time = copy.deepcopy(sim.data) # bad channel width - sim = hera_sim.Simulator(n_freq=10, n_times=10, + sim = hera_sim.Simulator(n_freq=10, n_times=10, antennas=antennas, - integration_time=dt1, + integration_time=dt1, channel_width=df2) self.uvd_bad_chan_width = copy.deepcopy(sim.data) @@ -384,7 +384,7 @@ def test_plot_diff_1d(self): elements = [(plt.Subplot, Nplots),] for dimension in dimensions: fig = uvt.plot.plot_diff_1d( - self.uvd1, self.uvd2, self.antpairpol, + self.uvd1, self.uvd2, self.antpairpol, plot_type=plot_type, dimension=dimension ) @@ -402,7 +402,7 @@ def test_plot_diff_1d(self): dim = duals[dimension] else: dim = dimension if i // 3 == 0 else duals[dimension] - + # account for the fact that it plots against lst if # plotting along the time axis dim = "lst" if dim == "time" else dim @@ -426,7 +426,7 @@ def test_plot_diff_1d(self): # check that it works when an axis has length 1 fig = uvt.plot.plot_diff_1d( - self.uvd_1d_freqs, self.uvd_1d_freqs, self.antpairpol, + self.uvd_1d_freqs, self.uvd_1d_freqs, self.antpairpol, plot_type="normal" ) @@ -437,7 +437,7 @@ def test_plot_diff_uv(self): # one per colorbar elements = [(plt.Subplot, 6),] self.assertTrue(axes_contains(fig, elements)) - + # now check that we get three images and three colorbars Nimages = 0 Ncbars = 0 @@ -449,7 +449,7 @@ def test_plot_diff_uv(self): self.assertTrue(contains_image or contains_cbar) Nimages += int(contains_image) Ncbars += int(contains_cbar) - + self.assertTrue(Nimages == 3) self.assertTrue(Ncbars == 3) @@ -458,17 +458,17 @@ def test_plot_diff_uv(self): def test_plot_diff_waterfall(self): - plot_types = ("time_vs_freq", "time_vs_dly", + plot_types = ("time_vs_freq", "time_vs_dly", "fr_vs_freq", "fr_vs_dly") # get all combinations - plot_types = [list(combinations(plot_types, r)) + plot_types = [list(combinations(plot_types, r)) for r in range(1, len(plot_types) + 1)] # unpack the nested list plot_types = [item for items in plot_types for item in items] - # loop over all combinations of plot types, check that the - # right number of subplots are made, noting how many different + # loop over all combinations of plot types, check that the + # right number of subplots are made, noting how many different # differences are taken and how many plot types there are # also account for colorbars techincally being subplots for plot_type in plot_types: @@ -479,19 +479,19 @@ def test_plot_diff_waterfall(self): Nsubplots = 2 * Nplots # make the list of objects to search for elements = [(plt.Subplot, Nsubplots),] - + # actually make the plot - fig = uvt.plot.plot_diff_waterfall(self.uvd1, self.uvd2, + fig = uvt.plot.plot_diff_waterfall(self.uvd1, self.uvd2, self.antpairpol, plot_type=plot_type) - + # check that the correct number of subplots are made self.assertTrue(axes_contains(fig, elements)) Nimages = 0 Ncbars = 0 for ax in fig.axes: - # check that each Axes object contains either an + # check that each Axes object contains either an # AxesImage (from imshow) or a QuadMesh (from colorbar) image = [(matplotlib.image.AxesImage, 1),] cbar = [(matplotlib.collections.QuadMesh, 1),] @@ -500,7 +500,7 @@ def test_plot_diff_waterfall(self): Nimages += int(contains_image) Ncbars += int(contains_cbar) self.assertTrue(contains_image or contains_cbar) - + # check that the amount of colorbars and images is correct self.assertTrue(Nimages == Nplots) self.assertTrue(Ncbars == Nplots) @@ -525,12 +525,11 @@ def test_check_metadata(self): continue if not attr.startswith("uvd_bad"): continue - nt.assert_raises(uvt.utils.MetadataError, - uvt.plot.plot_diff_uv, - self.uvd1, value, - check_metadata=True) + pytest.raises(uvt.utils.MetadataError, + uvt.plot.plot_diff_uv, + self.uvd1, value, + check_metadata=True) if __name__ == '__main__': unittest.main() - diff --git a/uvtools/tests/test_utils.py b/uvtools/tests/test_utils.py index ff7393cf..6069eb60 100644 --- a/uvtools/tests/test_utils.py +++ b/uvtools/tests/test_utils.py @@ -1,4 +1,4 @@ -import nose.tools as nt +import pytest import uvtools as uvt import numpy as np import glob @@ -23,35 +23,36 @@ def test_search_data(): # search data dfs, dps = uvt.utils.search_data(templates, pols) - nt.assert_equal(len(dfs), 2) - nt.assert_equal(len(dfs[0]), len(dfs[1]), 2) - nt.assert_equal(len(dps), 2) - nt.assert_equal(len(dps[0]), len(dps[1]), 2) - nt.assert_equal(dps[0], ['xx', 'xx']) - nt.assert_true(np.all(['.xx.' in df for df in dfs[0]])) + assert len(dfs) == 2 + assert len(dfs[0]) == len(dfs[1]) + assert len(dfs[1]) == 2 + assert len(dps) == 2 + assert len(dps[0]) == len(dps[1]) + assert len(dps[0]) == 2 + assert dps[0], ['xx' == 'xx'] + assert np.all(['.xx.' in df for df in dfs[0]]) # matched pols dfs, dps = uvt.utils.search_data(templates, pols, matched_pols=True) - nt.assert_equal(len(dfs), 2) - nt.assert_equal(len(dfs[0]), len(dfs[1]), 2) - nt.assert_true(np.all(['.xx.' in df for df in dfs[0]])) + assert len(dfs) == 2 + assert len(dfs[0]) == len(dfs[1]) + assert len(dfs[0]) == 2 + assert np.all(['.xx.' in df for df in dfs[0]]) dfs, dps = uvt.utils.search_data(files, pols + ['pI'], matched_pols=True) - nt.assert_equal(len(dfs), 0) + assert len(dfs) == 0 # reverse nesting dfs, dps = uvt.utils.search_data(templates, pols, reverse_nesting=True) - nt.assert_equal(len(dfs), 2) - nt.assert_equal(len(dfs[0]), len(dfs[1]), 2) - nt.assert_true(np.all(['.bar.' in df for df in dfs[0]])) + assert len(dfs) == 2 + assert len(dfs[0]) == len(dfs[1]) + assert len(dfs[1]) == 2 + assert np.all(['.bar.' in df for df in dfs[0]]) # flatten dfs, dps = uvt.utils.search_data(templates, pols, flatten=True) - nt.assert_equal(len(dfs), 4) - nt.assert_true(isinstance(dfs[0], (str, np.str))) + assert len(dfs) == 4 + assert isinstance(dfs[0], (str, np.str)) for f in allfiles: if os.path.exists(f): os.remove(f) - - -