diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 26fe340df..2c879e6fd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -65,7 +65,6 @@ jobs: uses: codecov/codecov-action@v3.1.4 with: file: ./coverage.xml - flags: unittests env_vars: OS,PYTHON name: codecov-umbrella fail_ci_if_error: false diff --git a/docs/release-notes.rst b/docs/release-notes.rst index bf75e8447..823e3982a 100644 --- a/docs/release-notes.rst +++ b/docs/release-notes.rst @@ -8,6 +8,10 @@ Release Notes v2023.10.1 (Unreleased) ----------------------- +Maintenance +^^^^^^^^^^^ +* Convert Unittest to Pytest by `Cora Schneck`_ in (:pr:`462`) + Documentation ^^^^^^^^^^^^^ * Updated office hours link by `Anissa Zacharias`_ in (:pr:`495`) diff --git a/test/test_climatologies.py b/test/test_climatologies.py index d749899ab..4adbc5887 100644 --- a/test/test_climatologies.py +++ b/test/test_climatologies.py @@ -1,10 +1,9 @@ import sys -import unittest import cftime import numpy as np import pandas as pd +import pytest import xarray.testing -from parameterized import parameterized import xarray as xr from geocat.comp import climate_anomaly, month_to_season, calendar_average, climatology_average @@ -75,10 +74,10 @@ def _get_dummy_data(start_date, ##### End Helper Functions ##### -class test_climate_anomaly(unittest.TestCase): +class Test_Climate_Anomaly: daily = _get_dummy_data('2020-01-01', '2021-12-31', 'D', 1, 1) - def test_daily_anomaly(self): + def test_daily_anomaly(self) -> None: expected_anom = np.concatenate([ np.full(59, -183), [0], np.full(306, -182.5), @@ -102,7 +101,7 @@ def test_daily_anomaly(self): anom = climate_anomaly(self.daily, 'day') xarray.testing.assert_allclose(anom, expected_anom) - def test_monthly_anomaly(self): + def test_monthly_anomaly(self) -> None: expected_anom = np.concatenate([ np.arange(-198, -167), np.arange(-193.54386, -165), @@ -146,7 +145,7 @@ def test_monthly_anomaly(self): anom = climate_anomaly(self.daily, 'month') xarray.testing.assert_allclose(anom, expected_anom) - def test_seasonal_anomaly(self): + def test_seasonal_anomaly(self) -> None: expected_anom = np.concatenate([ np.arange(-320.9392265, -261), np.arange(-228, -136), @@ -180,7 +179,7 @@ def test_seasonal_anomaly(self): anom = climate_anomaly(self.daily, 'season') xarray.testing.assert_allclose(anom, expected_anom) - def test_yearly_anomaly(self): + def test_yearly_anomaly(self) -> None: expected_anom = np.concatenate( [np.arange(-182.5, 183), np.arange(-182, 183)]) @@ -201,23 +200,25 @@ def test_yearly_anomaly(self): anom = climate_anomaly(self.daily, 'year') xarray.testing.assert_allclose(anom, expected_anom) - @parameterized.expand([('daily, "month", None', daily, 'month', None), - ('daily, "month", True', daily, 'month', True), - ('daily, "month", False', daily, 'month', False), - ('daily, "season", None', daily, 'season', None), - ('daily, "season", True', daily, 'season', True), - ('daily, "season", False', daily, 'season', False), - ('daily, "year", None', daily, 'year', None), - ('daily, "year", True', daily, 'year', True), - ('daily, "year", False', daily, 'year', False)]) - def test_keep_attrs(self, name, dset, freq, keep_attrs): + @pytest.mark.parametrize( + "name, dset, freq, keep_attrs", + [('daily, "month", None', daily, 'month', None), + ('daily, "month", True', daily, 'month', True), + ('daily, "month", False', daily, 'month', False), + ('daily, "season", None', daily, 'season', None), + ('daily, "season", True', daily, 'season', True), + ('daily, "season", False', daily, 'season', False), + ('daily, "year", None', daily, 'year', None), + ('daily, "year", True', daily, 'year', True), + ('daily, "year", False', daily, 'year', False)]) + def test_keep_attrs(self, name, dset, freq, keep_attrs) -> None: result = climate_anomaly(dset, freq, keep_attrs=keep_attrs) if keep_attrs or keep_attrs == None: assert result.attrs == dset.attrs elif not keep_attrs: assert result.attrs == {} - def test_custom_time_dim(self): + def test_custom_time_dim(self) -> None: time_dim = 'my_time' expected_anom = np.concatenate([ np.arange(-198, -167), @@ -266,7 +267,7 @@ def test_custom_time_dim(self): xr.testing.assert_allclose(anom, expected_anom) -class test_month_to_season(unittest.TestCase): +class Test_Month_to_Season: ds1 = get_fake_dataset(start_month="2000-01", nmonths=12, nlats=1, nlons=1) # Create another dataset for the year 2001. @@ -298,49 +299,58 @@ class test_month_to_season(unittest.TestCase): nlats=10, nlons=10) - @parameterized.expand([('None', None), ('True', True), ('False', False)]) - def test_month_to_season_keep_attrs(self, name, keep_attrs): + @pytest.mark.parametrize("name, keep_attrs", [('None', None), + ('True', True), + ('False', False)]) + def test_month_to_season_keep_attrs(self, name, keep_attrs) -> None: season_ds = month_to_season(self.ds1, 'JFM', keep_attrs=keep_attrs) if keep_attrs or keep_attrs == None: assert season_ds.attrs == self.ds1.attrs elif not keep_attrs: assert season_ds.attrs == {} - @parameterized.expand([('ds1, JFM', ds1, 'JFM', 2.0), - ('ds2, JAA', ds1, 'JJA', 7.0)]) + @pytest.mark.parametrize("name, dset, season, expected", + [('ds1, JFM', ds1, 'JFM', 2.0), + ('ds2, JAA', ds1, 'JJA', 7.0)]) def test_month_to_season_returns_middle_month_value(self, name, dset, - season, expected): + season, + expected) -> None: season_ds = month_to_season(dset, season) np.testing.assert_equal(season_ds["my_var"].data, expected) - def test_month_to_season_bad_season_exception(self): - with self.assertRaises(KeyError): + def test_month_to_season_bad_season_exception(self) -> None: + with pytest.raises(KeyError): month_to_season(self.ds1, "TEST") - def test_month_to_season_partial_years_exception(self): - with self.assertRaises(ValueError): + def test_month_to_season_partial_years_exception(self) -> None: + with pytest.raises(ValueError): month_to_season(self.partial_year_dataset, "JFM") - def test_month_to_season_final_season_returns_2month_average(self): + def test_month_to_season_final_season_returns_2month_average(self) -> None: season_ds = month_to_season(self.ds1, 'NDJ') np.testing.assert_equal(season_ds["my_var"].data, 11.5) - @parameterized.expand([('DJF', 'DJF'), ('JFM', 'JFM'), ('FMA', 'FMA'), - ('MAM', 'MAM'), ('AMJ', 'AMJ'), ('MJJ', 'MJJ'), - ('JJA', 'JJA'), ('JAS', 'JAS'), ('ASO', 'ASO'), - ('SON', 'SON'), ('OND', 'OND'), ('NDJ', 'NDJ')]) - def test_month_to_season_returns_one_point_per_year(self, name, season): + @pytest.mark.parametrize("name, season", [('DJF', 'DJF'), ('JFM', 'JFM'), + ('FMA', 'FMA'), ('MAM', 'MAM'), + ('AMJ', 'AMJ'), ('MJJ', 'MJJ'), + ('JJA', 'JJA'), ('JAS', 'JAS'), + ('ASO', 'ASO'), ('SON', 'SON'), + ('OND', 'OND'), ('NDJ', 'NDJ')]) + def test_month_to_season_returns_one_point_per_year(self, name, + season) -> None: nyears_of_data = self.ds3.sizes["time"] / 12 season_ds = month_to_season(self.ds3, season) assert season_ds["my_var"].size == nyears_of_data - @parameterized.expand([ - ('custom_time_dataset', custom_time_dataset, "my_time", "my_var", 2.0), - ('ds4', ds4.isel(x=110, y=200), None, "Tair", [-10.56, -8.129, -7.125]), - ]) + @pytest.mark.parametrize( + "name, dataset, time_coordinate, var_name, expected", + [('custom_time_dataset', custom_time_dataset, "my_time", "my_var", 2.0), + ('ds4', ds4.isel(x=110, y=200), None, "Tair", [-10.56, -8.129, -7.125]) + ]) def test_month_to_season_custom_time_coordinate(self, name, dataset, time_coordinate, var_name, - expected): + expected) -> None: + season_ds = month_to_season(dataset, "JFM", time_coord_name=time_coordinate) @@ -349,7 +359,7 @@ def test_month_to_season_custom_time_coordinate(self, name, dataset, decimal=1) -class test_calendar_average(unittest.TestCase): +class Test_Calendar_Average(): minute = _get_dummy_data('2020-01-01', '2021-12-31 23:30:00', '30min', 1, 1) hourly = _get_dummy_data('2020-01-01', '2021-12-31 23:00:00', 'H', 1, 1) daily = _get_dummy_data('2020-01-01', '2021-12-31', 'D', 1, 1) @@ -526,24 +536,26 @@ class test_calendar_average(unittest.TestCase): 'lon': [-180.0] }) - @parameterized.expand([('daily, "month", None', daily, 'month', None), - ('daily, "month", True', daily, 'month', True), - ('daily, "month", False', daily, 'month', False), - ('monthly, "season", None', monthly, 'season', None), - ('monthly, "season", True', monthly, 'season', True), - ('monthly, "season", False', monthly, 'season', - False), - ('monthly, "year", None', monthly, 'year', None), - ('monthly, "year", True', monthly, 'year', True), - ('monthly, "year", False', monthly, 'year', False)]) - def test_calendar_average_keep_attrs(self, name, dset, freq, keep_attrs): + @pytest.mark.parametrize( + "name, dset, freq, keep_attrs", + [('daily, "month", None', daily, 'month', None), + ('daily, "month", True', daily, 'month', True), + ('daily, "month", False', daily, 'month', False), + ('monthly, "season", None', monthly, 'season', None), + ('monthly, "season", True', monthly, 'season', True), + ('monthly, "season", False', monthly, 'season', False), + ('monthly, "year", None', monthly, 'year', None), + ('monthly, "year", True', monthly, 'year', True), + ('monthly, "year", False', monthly, 'year', False)]) + def test_calendar_average_keep_attrs(self, name, dset, freq, + keep_attrs) -> None: result = calendar_average(dset, freq, keep_attrs=keep_attrs) if keep_attrs or keep_attrs == None: assert result.attrs == dset.attrs elif not keep_attrs: assert result.attrs == {} - def test_30min_to_hourly_calendar_average(self): + def test_30min_to_hourly_calendar_average(self) -> None: hour_avg = np.arange(0.5, 35088.5, 2).reshape((365 + 366) * 24, 1, 1) hour_avg_time = xr.cftime_range('2020-01-01 00:30:00', '2021-12-31 23:30:00', @@ -559,7 +571,7 @@ def test_30min_to_hourly_calendar_average(self): result = calendar_average(self.minute, freq='hour') xr.testing.assert_equal(result, min_2_hour_avg) - def test_hourly_to_daily_calendar_average(self): + def test_hourly_to_daily_calendar_average(self) -> None: day_avg = np.arange(11.5, 17555.5, 24).reshape(366 + 365, 1, 1) day_avg_time = xr.cftime_range('2020-01-01 12:00:00', '2021-12-31 12:00:00', @@ -574,7 +586,7 @@ def test_hourly_to_daily_calendar_average(self): result = calendar_average(self.hourly, freq='day') xr.testing.assert_equal(result, hour_2_day_avg) - def test_daily_to_monthly_calendar_average(self): + def test_daily_to_monthly_calendar_average(self) -> None: month_avg = np.array([ 15, 45, 75, 105.5, 136, 166.5, 197, 228, 258.5, 289, 319.5, 350, 381, 410.5, 440, 470.5, 501, 531.5, 562, 593, 623.5, 654, 684.5, 715 @@ -595,64 +607,67 @@ def test_daily_to_monthly_calendar_average(self): result = calendar_average(self.daily, freq='month') xr.testing.assert_equal(result, day_2_month_avg) - @parameterized.expand([('daily to seasonal', daily, day_2_season_avg), - ('monthly to seasonal', monthly, month_2_season_avg)] - ) + @pytest.mark.parametrize( + "name, dset, expected", + [('daily to seasonal', daily, day_2_season_avg), + ('monthly to seasonal', monthly, month_2_season_avg)]) def test_daily_monthly_to_seasonal_calendar_average(self, name, dset, - expected): + expected) -> None: result = calendar_average(dset, freq='season') xr.testing.assert_allclose(result, expected) - @parameterized.expand([('daily to yearly', daily, day_2_year_avg), - ('monthly to yearly', monthly, month_2_year_avg)]) + @pytest.mark.parametrize("name, dset, expected", + [('daily to yearly', daily, day_2_year_avg), + ('monthly to yearly', monthly, month_2_year_avg)]) def test_daily_monthly_to_yearly_calendar_average(self, name, dset, - expected): + expected) -> None: result = calendar_average(dset, freq='year') xr.testing.assert_allclose(result, expected) - @parameterized.expand([('freq=TEST', 'TEST'), ('freq=None', None)]) - def test_invalid_freq_calendar_average(self, name, freq): - with self.assertRaises(ValueError): + @pytest.mark.parametrize("name, freq", [('freq=TEST', 'TEST'), + ('freq=None', None)]) + def test_invalid_freq_calendar_average(self, name, freq) -> None: + with pytest.raises(ValueError): calendar_average(self.monthly, freq=freq) - def test_custom_time_coord_calendar_average(self): + def test_custom_time_coord_calendar_average(self) -> None: result = calendar_average(self.custom_time, freq='month', time_dim=self.time_dim) xr.testing.assert_allclose(result, self.custom_time_expected) - def test_xr_DataArray_support_calendar_average(self): + def test_xr_DataArray_support_calendar_average(self) -> None: array = self.daily['data'] array_expected = self.day_2_month_avg['data'] result = calendar_average(array, freq='month') xr.testing.assert_equal(result, array_expected) - def test_non_datetime_like_objects_calendar_average(self): + def test_non_datetime_like_objects_calendar_average(self) -> None: dset_encoded = xr.tutorial.open_dataset("air_temperature", decode_cf=False) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): calendar_average(dset_encoded, 'month') - def test_non_uniformly_spaced_data_calendar_average(self): + def test_non_uniformly_spaced_data_calendar_average(self) -> None: time = pd.to_datetime(['2020-01-01', '2020-01-02', '2020-01-04']) non_uniform = xr.Dataset(data_vars={'data': (('time'), np.arange(3))}, coords={'time': time}) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): calendar_average(non_uniform, freq='day') - @parameterized.expand([ - ('julian_calendar', julian_daily, julian_day_2_month_avg), - ('no_leap_calendar', noleap_daily, noleap_day_2_month_avg), - ('all_leap_calendar', all_leap_daily, all_leap_day_2_month_avg), - ('day_360_calendar', day_360_daily, day_360_leap_day_2_month_avg) - ]) + @pytest.mark.parametrize( + "name, dset, expected", + [('julian_calendar', julian_daily, julian_day_2_month_avg), + ('no_leap_calendar', noleap_daily, noleap_day_2_month_avg), + ('all_leap_calendar', all_leap_daily, all_leap_day_2_month_avg), + ('day_360_calendar', day_360_daily, day_360_leap_day_2_month_avg)]) def test_non_standard_calendars_calendar_average(self, name, dset, - expected): + expected) -> None: result = calendar_average(dset, freq='month') xr.testing.assert_equal(result, expected) -class test_climatology_average(unittest.TestCase): +class Test_Climatology_Average(): minute = _get_dummy_data('2020-01-01', '2021-12-31 23:30:00', '30min', 1, 1) hourly = _get_dummy_data('2020-01-01', '2021-12-31 23:00:00', 'H', 1, 1) @@ -828,22 +843,22 @@ class test_climatology_average(unittest.TestCase): 'lon': [-180.0] }) - @parameterized.expand([ - ('daily, "month", None', daily, 'month', [], None), - ('daily, "month", True', daily, 'month', [], True), - ('daily, "month", False', daily, 'month', [], False), - ('monthly, "season", None', monthly, 'season', [], None), - ('monthly, "season", True', monthly, 'season', [], True), - ('monthly, "season", False', monthly, 'season', [], False), - ('monthly, "season", None', monthly, 'season', - ['DJF', 'MAM', 'JJA', 'SON'], None), - ('monthly, "season", True', monthly, 'season', - ['DJF', 'MAM', 'JJA', 'SON'], True), - ('monthly, "season", False', monthly, 'season', - ['DJF', 'MAM', 'JJA', 'SON'], False) - ]) + @pytest.mark.parametrize( + "name, dset, freq, custom_seasons, keep_attrs", + [('daily, "month", None', daily, 'month', [], None), + ('daily, "month", True', daily, 'month', [], True), + ('daily, "month", False', daily, 'month', [], False), + ('monthly, "season", None', monthly, 'season', [], None), + ('monthly, "season", True', monthly, 'season', [], True), + ('monthly, "season", False', monthly, 'season', [], False), + ('monthly, "season", None', monthly, 'season', + ['DJF', 'MAM', 'JJA', 'SON'], None), + ('monthly, "season", True', monthly, 'season', + ['DJF', 'MAM', 'JJA', 'SON'], True), + ('monthly, "season", False', monthly, 'season', + ['DJF', 'MAM', 'JJA', 'SON'], False)]) def test_climatology_average_keep_attrs(self, name, dset, freq, - custom_seasons, keep_attrs): + custom_seasons, keep_attrs) -> None: result = climatology_average(dset, freq=freq, custom_seasons=custom_seasons, @@ -853,19 +868,19 @@ def test_climatology_average_keep_attrs(self, name, dset, freq, elif not keep_attrs: assert result.attrs == {} - def test_30min_to_hourly_climatology_average(self): + def test_30min_to_hourly_climatology_average(self) -> None: result = climatology_average(self.minute, freq='hour') xr.testing.assert_allclose(result, self.min_2_hourly_clim) - def test_hourly_to_daily_climatology_average(self): + def test_hourly_to_daily_climatology_average(self) -> None: result = climatology_average(self.hourly, freq='day') xr.testing.assert_equal(result, self.hour_2_day_clim) - def test_daily_to_monthly_climatology_average(self): + def test_daily_to_monthly_climatology_average(self) -> None: result = climatology_average(self.daily, freq='month') xr.testing.assert_allclose(result, self.day_2_month_clim) - def test_custom_season_climatology_average(self): + def test_custom_season_climatology_average(self) -> None: result = climatology_average( self.monthly, freq='season', @@ -873,20 +888,22 @@ def test_custom_season_climatology_average(self): expected = climatology_average(self.monthly, freq='season') xr.testing.assert_equal(result, expected) - @parameterized.expand([('daily to seasonal', daily, day_2_season_clim), - ('monthly to seasonal', monthly, month_2_season_clim) - ]) + @pytest.mark.parametrize( + "name, dset, expected", + [('daily to seasonal', daily, day_2_season_clim), + ('monthly to seasonal', monthly, month_2_season_clim)]) def test_daily_monthly_to_seasonal_climatology_average( - self, name, dset, expected): + self, name, dset, expected) -> None: result = climatology_average(dset, freq='season') xr.testing.assert_allclose(result, expected) - @parameterized.expand([('freq=TEST', 'TEST'), ('freq=None', None)]) - def test_invalid_freq_climatology_average(self, name, freq): - with self.assertRaises(ValueError): + @pytest.mark.parametrize("name, freq", [('freq=TEST', 'TEST'), + ('freq=None', None)]) + def test_invalid_freq_climatology_average(self, name, freq) -> None: + with pytest.raises(ValueError): climatology_average(self.monthly, freq=freq) - def test_custom_time_coord_climatology_average(self): + def test_custom_time_coord_climatology_average(self) -> None: time_dim = 'my_time' custom_time = self.daily.rename({'time': time_dim}) @@ -897,33 +914,33 @@ def test_custom_time_coord_climatology_average(self): time_dim=time_dim) xr.testing.assert_allclose(result, custom_time_expected) - def test_xr_DataArray_support_climatology_average(self): + def test_xr_DataArray_support_climatology_average(self) -> None: array = self.daily['data'] array_expected = self.day_2_month_clim['data'] result = climatology_average(array, freq='month') xr.testing.assert_allclose(result, array_expected) - def test_non_datetime_like_objects_climatology_average(self): + def test_non_datetime_like_objects_climatology_average(self) -> None: dset_encoded = xr.tutorial.open_dataset("air_temperature", decode_cf=False) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): climatology_average(dset_encoded, freq='month') - def test_non_uniformly_spaced_data_climatology_average(self): + def test_non_uniformly_spaced_data_climatology_average(self) -> None: time = pd.to_datetime(['2020-01-01', '2020-01-02', '2020-01-04']) non_uniform = xr.Dataset(data_vars={'data': (('time'), np.arange(3))}, coords={'time': time}) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): climatology_average(non_uniform, freq='day') - @parameterized.expand([ - ('julian_calendar', julian_daily, julian_day_2_month_clim), - ('no_leap_calendar', noleap_daily, noleap_day_2_month_clim), - ('all_leap_calendar', all_leap_daily, all_leap_day_2_month_clim), - ('day_360_calendar', day_360_daily, day_360_leap_day_2_month_clim) - ]) + @pytest.mark.parametrize( + "name, dset, expected", + [('julian_calendar', julian_daily, julian_day_2_month_clim), + ('no_leap_calendar', noleap_daily, noleap_day_2_month_clim), + ('all_leap_calendar', all_leap_daily, all_leap_day_2_month_clim), + ('day_360_calendar', day_360_daily, day_360_leap_day_2_month_clim)]) def test_non_standard_calendars_climatology_average(self, name, dset, - expected): + expected) -> None: result = climatology_average(dset, freq='month') xr.testing.assert_allclose(result, expected) diff --git a/test/test_fourier_filters.py b/test/test_fourier_filters.py index e30b29041..4e1d1c3c6 100644 --- a/test/test_fourier_filters.py +++ b/test/test_fourier_filters.py @@ -1,178 +1,191 @@ import math as m import sys - import numpy as np import xarray as xr from geocat.comp import (fourier_band_block, fourier_band_pass, fourier_high_pass, fourier_low_pass) -freq = 1000 -t = np.arange(1000) / freq -t_data = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau) + - np.sin(20 * t * m.tau) / 2 + np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - - -def test_one_low_pass(): - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau)) - t_result = fourier_low_pass(t_data, freq, 15) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_one_high_pass(): - t_expected_result = (np.sin(20 * t * m.tau) / 2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_result = fourier_high_pass(t_data, freq, 15) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_one_band_pass(): - t_expected_result = (np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau) + - np.sin(20 * t * m.tau) / 2) - t_result = fourier_band_pass(t_data, freq, 3, 30) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_one_band_block(): - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_result = fourier_band_block(t_data, freq, 3, 30) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -freq = 1000 -t = np.arange(1000) / freq -t = t[:, None] + t -t_data = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau) + - np.sin(20 * t * m.tau) / 2 + np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - -def test_two_low_pass(): - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau)) - t_result = fourier_low_pass(t_data, freq, 15, time_axis=0) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_two_high_pass(): - t_expected_result = (np.sin(20 * t * m.tau) / 2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_result = fourier_high_pass(t_data, freq, 15, time_axis=0) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_two_band_pass(): - t_expected_result = (np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau) + - np.sin(20 * t * m.tau) / 2) - t_result = fourier_band_pass(t_data, freq, 3, 30, time_axis=0) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_two_band_block(): - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_result = fourier_band_block(t_data, freq, 3, 30, time_axis=0) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -freq = 200 -t = np.arange(200) / freq -t = t[:, None] + t -t = t[:, :, None] + t -t_data = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau) + - np.sin(20 * t * m.tau) / 2 + np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - - -def test_three_low_pass(): - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau)) - t_result = fourier_low_pass(t_data, freq, 15, time_axis=0) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_three_high_pass(): - t_expected_result = (np.sin(20 * t * m.tau) / 2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_result = fourier_high_pass(t_data, freq, 15, time_axis=0) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_three_band_pass(): - t_expected_result = (np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau) + - np.sin(20 * t * m.tau) / 2) - t_result = fourier_band_pass(t_data, freq, 3, 30, time_axis=0) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_three_band_block(): - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_result = fourier_band_block(t_data, freq, 3, 30, time_axis=0) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_three_band_block_t1(): - t_data_ = np.swapaxes(t_data, 1, 0) - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_expected_result = np.swapaxes(t_expected_result, 1, 0) - t_result = fourier_band_block(t_data_, freq, 3, 30, time_axis=1) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_three_band_block_t2(): - t_data_ = np.swapaxes(t_data, 2, 0) - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_expected_result = np.swapaxes(t_expected_result, 2, 0) - t_result = fourier_band_block(t_data_, freq, 3, 30, time_axis=2) - np.testing.assert_almost_equal(t_result, t_expected_result) - - -def test_three_band_block_xr(): - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_data_ = xr.DataArray(t_data) - t_expected_result = xr.DataArray(t_expected_result) - t_result = fourier_band_block(t_data_, freq, 3, 30, time_axis=0) - np.testing.assert_almost_equal(t_result.data, t_expected_result) - - -def test_three_band_block_t1_xr(): - t_data_ = np.swapaxes(t_data, 1, 0) - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_expected_result = np.swapaxes(t_expected_result, 1, 0) - t_data_ = xr.DataArray(t_data_) - t_expected_result = xr.DataArray(t_expected_result) - t_result = fourier_band_block(t_data_, freq, 3, 30, time_axis=1) - np.testing.assert_almost_equal(t_result.data, t_expected_result) - - -def test_three_band_block_t2_xr(): - t_data_ = np.swapaxes(t_data, 2, 0) - t_expected_result = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + - np.sin(50 * t * m.tau) / 5 + - np.sin(100 * t * m.tau) / 10) - t_expected_result = np.swapaxes(t_expected_result, 2, 0) - t_data_ = xr.DataArray(t_data_) - t_expected_result = xr.DataArray(t_expected_result) - t_result = fourier_band_block(t_data_, freq, 3, 30, time_axis=2) - np.testing.assert_almost_equal(t_result.data, t_expected_result) +class Test_Fourier_One_Bands_Pass: + + freq = 1000 + t = np.arange(1000) / freq + t_data = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + + np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau) + + np.sin(20 * t * m.tau) / 2 + np.sin(50 * t * m.tau) / 5 + + np.sin(100 * t * m.tau) / 10) + + def test_one_low_pass(self) -> None: + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(5 * self.t * m.tau) / 0.5 + + np.sin(10 * self.t * m.tau)) + t_result = fourier_low_pass(self.t_data, self.freq, 15) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_one_high_pass(self) -> None: + t_expected_result = (np.sin(20 * self.t * m.tau) / 2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_result = fourier_high_pass(self.t_data, self.freq, 15) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_one_band_pass(self) -> None: + t_expected_result = (np.sin(5 * self.t * m.tau) / 0.5 + + np.sin(10 * self.t * m.tau) + + np.sin(20 * self.t * m.tau) / 2) + t_result = fourier_band_pass(self.t_data, self.freq, 3, 30) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_one_band_block(self) -> None: + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_result = fourier_band_block(self.t_data, self.freq, 3, 30) + np.testing.assert_almost_equal(t_result, t_expected_result) + + +class Test_Fourier_Two_Bands_Pass: + freq = 1000 + t = np.arange(1000) / freq + t = t[:, None] + t + t_data = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + + np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau) + + np.sin(20 * t * m.tau) / 2 + np.sin(50 * t * m.tau) / 5 + + np.sin(100 * t * m.tau) / 10) + + def test_two_low_pass(self) -> None: + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(5 * self.t * m.tau) / 0.5 + + np.sin(10 * self.t * m.tau)) + t_result = fourier_low_pass(self.t_data, self.freq, 15, time_axis=0) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_two_high_pass(self) -> None: + t_expected_result = (np.sin(20 * self.t * m.tau) / 2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_result = fourier_high_pass(self.t_data, self.freq, 15, time_axis=0) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_two_band_pass(self) -> None: + t_expected_result = (np.sin(5 * self.t * m.tau) / 0.5 + + np.sin(10 * self.t * m.tau) + + np.sin(20 * self.t * m.tau) / 2) + t_result = fourier_band_pass(self.t_data, self.freq, 3, 30, time_axis=0) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_two_band_block(self) -> None: + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_result = fourier_band_block(self.t_data, + self.freq, + 3, + 30, + time_axis=0) + np.testing.assert_almost_equal(t_result, t_expected_result) + + +class Test_Fourier_Three_Bands_Pass: + + freq = 200 + t = np.arange(200) / freq + t = t[:, None] + t + t = t[:, :, None] + t + t_data = (np.sin(t * m.tau) / 0.1 + np.sin(2 * t * m.tau) / 0.2 + + np.sin(5 * t * m.tau) / 0.5 + np.sin(10 * t * m.tau) + + np.sin(20 * t * m.tau) / 2 + np.sin(50 * t * m.tau) / 5 + + np.sin(100 * t * m.tau) / 10) + + def test_three_low_pass(self) -> None: + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(5 * self.t * m.tau) / 0.5 + + np.sin(10 * self.t * m.tau)) + t_result = fourier_low_pass(self.t_data, self.freq, 15, time_axis=0) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_three_high_pass(self) -> None: + t_expected_result = (np.sin(20 * self.t * m.tau) / 2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_result = fourier_high_pass(self.t_data, self.freq, 15, time_axis=0) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_three_band_pass(self) -> None: + t_expected_result = (np.sin(5 * self.t * m.tau) / 0.5 + + np.sin(10 * self.t * m.tau) + + np.sin(20 * self.t * m.tau) / 2) + t_result = fourier_band_pass(self.t_data, self.freq, 3, 30, time_axis=0) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_three_band_block(self) -> None: + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_result = fourier_band_block(self.t_data, + self.freq, + 3, + 30, + time_axis=0) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_three_band_block_t1(self) -> None: + t_data_ = np.swapaxes(self.t_data, 1, 0) + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_expected_result = np.swapaxes(t_expected_result, 1, 0) + t_result = fourier_band_block(t_data_, self.freq, 3, 30, time_axis=1) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_three_band_block_t2(self) -> None: + t_data_ = np.swapaxes(self.t_data, 2, 0) + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_expected_result = np.swapaxes(t_expected_result, 2, 0) + t_result = fourier_band_block(t_data_, self.freq, 3, 30, time_axis=2) + np.testing.assert_almost_equal(t_result, t_expected_result) + + def test_three_band_block_xr(self) -> None: + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_data_ = xr.DataArray(self.t_data) + t_expected_result = xr.DataArray(t_expected_result) + t_result = fourier_band_block(t_data_, self.freq, 3, 30, time_axis=0) + np.testing.assert_almost_equal(t_result.data, t_expected_result) + + def test_three_band_block_t1_xr(self) -> None: + t_data_ = np.swapaxes(self.t_data, 1, 0) + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_expected_result = np.swapaxes(t_expected_result, 1, 0) + t_data_ = xr.DataArray(t_data_) + t_expected_result = xr.DataArray(t_expected_result) + t_result = fourier_band_block(t_data_, self.freq, 3, 30, time_axis=1) + np.testing.assert_almost_equal(t_result.data, t_expected_result) + + def test_three_band_block_t2_xr(self) -> None: + t_data_ = np.swapaxes(self.t_data, 2, 0) + t_expected_result = (np.sin(self.t * m.tau) / 0.1 + + np.sin(2 * self.t * m.tau) / 0.2 + + np.sin(50 * self.t * m.tau) / 5 + + np.sin(100 * self.t * m.tau) / 10) + t_expected_result = np.swapaxes(t_expected_result, 2, 0) + t_data_ = xr.DataArray(t_data_) + t_expected_result = xr.DataArray(t_expected_result) + t_result = fourier_band_block(t_data_, self.freq, 3, 30, time_axis=2) + np.testing.assert_almost_equal(t_result.data, t_expected_result) diff --git a/test/test_gradient.py b/test/test_gradient.py index 701c5ac5b..3e8cc63a1 100644 --- a/test/test_gradient.py +++ b/test/test_gradient.py @@ -1,5 +1,5 @@ import sys -import unittest +import pytest import numpy as np import xarray as xr @@ -7,185 +7,91 @@ from geocat.comp import gradient -class Test_Gradient(unittest.TestCase): - test_data_xr = None - test_data_np = None - test_data_dask = None - test_results_lon = None - test_results_lat = None - test_coords_1d_lon = None - test_coords_1d_lat = None - test_coords_2d_lon_np = None - test_coords_2d_lat_np = None - test_coords_1d_lat_np = None - test_coords_1d_lon_np = None +class Test_Gradient: - results = None - results_lon = None - results_lat = None - - @classmethod - def setUpClass(cls): - cls.test_data_xr = xr.load_dataset( + @pytest.fixture(scope="class") + def test_data_xr(self): + return xr.load_dataset( 'test/gradient_test_data.nc').to_array().squeeze() - cls.test_data_xr_nocoords = xr.DataArray(cls.test_data_xr, coords={}) - cls.test_data_np = cls.test_data_xr.values - cls.test_data_dask = cls.test_data_xr.chunk(10) - cls.test_results_lon = xr.load_dataset( - 'test/gradient_test_results_longitude.nc').to_array().squeeze() - cls.test_results_lat = xr.load_dataset( - 'test/gradient_test_results_latitude.nc').to_array().squeeze() - cls.test_coords_1d_lon = cls.test_data_xr.coords['lon'] - cls.test_coords_1d_lat = cls.test_data_xr.coords['lat'] - cls.test_coords_2d_lon_np, cls.test_coords_2d_lat_np = np.meshgrid( - cls.test_coords_1d_lon, cls.test_coords_1d_lat) - cls.test_data_xr_2d_coords = xr.DataArray( - cls.test_data_xr, - dims=['x', 'y'], - coords=dict( - lon=(['x', 'y'], cls.test_coords_2d_lon_np), - lat=(['x', 'y'], cls.test_coords_2d_lat_np), - ), - ) - cls.test_coords_1d_lon_np = cls.test_coords_1d_lon.values - cls.test_coords_1d_lat_np = cls.test_coords_1d_lat.values - - def test_gradient_axis0_xr(self): - self.results = gradient(self.test_data_xr) - self.results_axis0 = self.results[0] - np.testing.assert_almost_equal( - self.results_axis0.values, - self.test_results_lon.values, - decimal=3, - ) - - def test_gradient_axis1_xr(self): - self.results = gradient(self.test_data_xr) - self.results_axis1 = self.results[1] - np.testing.assert_almost_equal( - self.results_axis1.values, - self.test_results_lat.values, - decimal=3, - ) - - def test_gradient_axis0_dask(self): - self.results = gradient(self.test_data_dask) - self.results_axis0 = self.results[0] - np.testing.assert_almost_equal( - self.results_axis0.values, - self.test_results_lon.values, - decimal=3, - ) - - def test_gradient_axis1_dask(self): - self.results = gradient(self.test_data_dask) - self.results_axis1 = self.results[1] - np.testing.assert_almost_equal( - self.results_axis1.values, - self.test_results_lat.values, - decimal=3, - ) - - def test_gradient_axis0_xr_1d_nocoords(self): - self.results = gradient(self.test_data_xr_nocoords, - lon=self.test_coords_1d_lon, - lat=self.test_coords_1d_lat) - self.results_axis0 = self.results[0] - np.testing.assert_almost_equal( - self.results_axis0.values, - self.test_results_lon.values, - decimal=3, - ) - - def test_gradient_axis1_xr_1d_nocoords(self): - self.results = gradient(self.test_data_xr_nocoords, - lon=self.test_coords_1d_lon, - lat=self.test_coords_1d_lat) - self.results_axis1 = self.results[1] - np.testing.assert_almost_equal( - self.results_axis1.values, - self.test_results_lat.values, - decimal=3, - ) - - def test_gradient_axis0_xr_2d_nocoords(self): - self.results = gradient(self.test_data_xr_nocoords, - self.test_coords_2d_lon_np, - self.test_coords_2d_lat_np) - self.results_axis0 = self.results[0] - np.testing.assert_almost_equal( - self.results_axis0.values, - self.test_results_lon.values, - decimal=3, - ) - - def test_gradient_axis1_xr_2d_nocoords(self): - self.results = gradient(self.test_data_xr_nocoords, - self.test_coords_2d_lon_np, - self.test_coords_2d_lat_np) - self.results_axis1 = self.results[1] - np.testing.assert_almost_equal( - self.results_axis1.values, - self.test_results_lat.values, - decimal=3, - ) - - def test_gradient_axis0_xr_2d_coords(self): - self.results = gradient(self.test_data_xr_2d_coords) - self.results_axis0 = self.results[0] - np.testing.assert_almost_equal( - self.results_axis0.values, - self.test_results_lon.values, - decimal=3, - ) - def test_gradient_axis1_xr_2d_coords(self): - self.results = gradient(self.test_data_xr_2d_coords) - self.results_axis1 = self.results[1] - np.testing.assert_almost_equal( - self.results_axis1.values, - self.test_results_lat.values, - decimal=3, + @pytest.fixture(scope="class") + def expected_results(self): + return [ + xr.load_dataset( + 'test/gradient_test_results_longitude.nc').to_array().squeeze(), + xr.load_dataset( + 'test/gradient_test_results_latitude.nc').to_array().squeeze() + ] + + @pytest.fixture(scope="class") + def lat_lon_meshgrid(self, test_data_xr): + return np.meshgrid(test_data_xr.coords["lon"], + test_data_xr.coords["lat"]) + + def test_gradient_xr(self, test_data_xr, expected_results) -> None: + actual_result = gradient(test_data_xr) + np.testing.assert_almost_equal(np.array(actual_result), + np.array(expected_results), + decimal=3) + + def test_gradient_dask(self, test_data_xr, expected_results) -> None: + actual_result = gradient(test_data_xr.chunk(10)) + np.testing.assert_almost_equal(np.array(actual_result), + np.array(expected_results), + decimal=3) + + def test_gradient_xr_1d_nocoords(self, test_data_xr, + expected_results) -> None: + actual_result = gradient(xr.DataArray(test_data_xr, coords={}), + lon=test_data_xr.coords["lon"], + lat=test_data_xr.coords["lat"]) + np.testing.assert_almost_equal(np.array(actual_result), + np.array(expected_results), + decimal=3) + + def test_gradient_xr_2d_nocoords(self, test_data_xr, expected_results, + lat_lon_meshgrid) -> None: + (lon_2d, lat_2d) = lat_lon_meshgrid + actual_result = gradient( + xr.DataArray(test_data_xr, coords={}), + lon=lon_2d, + lat=lat_2d, ) - - def test_gradient_axis0_np_1d_nocoords(self): - self.results = gradient(self.test_data_np, - lon=self.test_coords_1d_lon_np, - lat=self.test_coords_1d_lat_np) - self.results_axis0 = self.results[0] - np.testing.assert_almost_equal( - self.results_axis0, - self.test_results_lon.values, - decimal=3, - ) - - def test_gradient_axis1_np_1d_nocoords(self): - self.results = gradient(self.test_data_np, - lon=self.test_coords_1d_lon_np, - lat=self.test_coords_1d_lat_np) - self.results_axis1 = self.results[1] - np.testing.assert_almost_equal( - self.results_axis1, - self.test_results_lat.values, - decimal=3, - ) - - def test_gradient_axis0_np_2d_nocoords(self): - self.results = gradient(self.test_data_np, self.test_coords_2d_lon_np, - self.test_coords_2d_lat_np) - self.results_axis0 = self.results[0] - np.testing.assert_almost_equal( - self.results_axis0, - self.test_results_lon.values, - decimal=3, + np.testing.assert_almost_equal(np.array(actual_result), + np.array(expected_results), + decimal=3) + + def test_gradient_xr_2d_coords(self, test_data_xr, expected_results, + lat_lon_meshgrid) -> None: + test_data_xr_2d_coords = xr.DataArray( + test_data_xr, + dims=["x", "y"], + coords=dict( + lon=(["x", "y"], lat_lon_meshgrid[0]), + lat=(["x", "y"], lat_lon_meshgrid[1]), + ), ) - - def test_gradient_axis1_np_2d_nocoords(self): - self.results = gradient(self.test_data_np, self.test_coords_2d_lon_np, - self.test_coords_2d_lat_np) - self.results_axis1 = self.results[1] - np.testing.assert_almost_equal( - self.results_axis1, - self.test_results_lat.values, - decimal=3, + actual_result = gradient(test_data_xr_2d_coords) + np.testing.assert_almost_equal(np.array(actual_result), + np.array(expected_results), + decimal=3) + + def test_gradient_np_1d_nocoords(self, test_data_xr, + expected_results) -> None: + actual_result = gradient( + test_data_xr.values, + lon=test_data_xr.coords["lon"].values, + lat=test_data_xr.coords["lat"].values, ) + np.testing.assert_almost_equal(actual_result, + np.array(expected_results), + decimal=3) + + def test_gradient_np_2d_nocoords(self, test_data_xr, expected_results, + lat_lon_meshgrid) -> None: + (lon_2d, lat_2d) = lat_lon_meshgrid + actual_result = gradient(test_data_xr.values, lon_2d, lat_2d) + + np.testing.assert_almost_equal(actual_result, + np.array(expected_results), + decimal=3) diff --git a/test/test_interpolation.py b/test/test_interpolation.py index 21ecd07cc..279ddb66c 100644 --- a/test/test_interpolation.py +++ b/test/test_interpolation.py @@ -1,11 +1,10 @@ import sys -import unittest -from unittest import TestCase import geocat.datafiles as gdf import numpy as np import numpy.testing as nt import xarray as xr +import pytest from geocat.comp import interp_multidim, interp_hybrid_to_pressure, interp_sigma_to_hybrid @@ -23,27 +22,26 @@ _p0 = 1000. * 100 # Pa -class Test_interp_hybrid_to_pressure(TestCase): +class Test_interp_hybrid_to_pressure: + + # Expected output from above sample input + @pytest.fixture(scope="class") + def ds_out(self): + try: + return xr.open_dataset( + "vinth2p_output.nc" + ) # Generated by running ncl_tests/vinth2p_test_conwomap_5.ncl on + # atmos.nc + except: + return xr.open_dataset("test/vinth2p_output.nc") + # Sample input data data = ds_atmos.U[0, :, :, :] ps = ds_atmos.PS pres3d = np.asarray([1000, 950, 800, 700, 600, 500, 400, 300, 200]) # mb pres3d = pres3d * 100 # mb to Pa - # Expected output from above sample input - - try: - ds_out = xr.open_dataset( - "vinth2p_output.nc" - ) # Generated by running ncl_tests/vinth2p_test_conwomap_5.ncl on - # atmos.nc - except: - ds_out = xr.open_dataset("test/vinth2p_output.nc") - - uzon_expected = ds_out.uzon # Expected output - u_int_expected = ds_out.u_int # Expected output - - def test_interp_hybrid_to_pressure_atmos(self): + def test_interp_hybrid_to_pressure_atmos(self, ds_out) -> None: u_int = interp_hybrid_to_pressure(self.data, self.ps[0, :, :], _hyam, @@ -54,9 +52,9 @@ def test_interp_hybrid_to_pressure_atmos(self): uzon = u_int.mean(dim='lon') - nt.assert_array_almost_equal(self.uzon_expected, uzon, 5) + nt.assert_array_almost_equal(ds_out.uzon, uzon, 5) - def test_interp_hybrid_to_pressure_atmos_4d(self): + def test_interp_hybrid_to_pressure_atmos_4d(self, ds_out) -> None: data_t = self.data.expand_dims("time") u_int = interp_hybrid_to_pressure(data_t, @@ -69,11 +67,11 @@ def test_interp_hybrid_to_pressure_atmos_4d(self): uzon = u_int.mean(dim='lon') - uzon_expected_t = self.uzon_expected.expand_dims("time") + uzon_expected_t = ds_out.uzon.expand_dims("time") nt.assert_array_almost_equal(uzon_expected_t, uzon, 5) - def test_interp_hybrid_to_pressure_atmos_wrong_method(self): - with nt.assert_raises(ValueError): + def test_interp_hybrid_to_pressure_atmos_wrong_method(self) -> None: + with pytest.raises(ValueError): u_int = interp_hybrid_to_pressure(self.data, self.ps[0, :, :], _hyam, @@ -82,7 +80,7 @@ def test_interp_hybrid_to_pressure_atmos_wrong_method(self): new_levels=self.pres3d, method="wrong_method") - def test_interp_hybrid_to_pressure_atmos_dask(self): + def test_interp_hybrid_to_pressure_atmos_dask(self, ds_out) -> None: ps_dask = self.ps.chunk() data_dask = self.data.chunk() @@ -97,392 +95,420 @@ def test_interp_hybrid_to_pressure_atmos_dask(self): uzon = u_int.mean(dim='lon') - nt.assert_array_almost_equal(self.uzon_expected, uzon, 5) - - -class Test_interp_hybrid_to_pressure_extrapolate(TestCase): - # Open the netCDF data file with the input data - try: - ds_ccsm = xr.open_dataset( - gdf.get("netcdf_files/ccsm35.h0.0021-01.demo.nc"), - decode_times=False) - except: - ds_ccsm = xr.open_dataset("test/ccsm35.h0.0021-01.demo.nc", - decode_times=False) - - # Open the netCDF file with the output data from running vinth2p_ecmwf.ncl - try: - ds_out = xr.open_dataset("test/vinth2p_ecmwf_output.nc", - decode_times=False) - except: - ds_out = xr.open_dataset("vinth2p_ecmwf_output.nc", decode_times=False) - - # Pull out inputs - _hyam = ds_ccsm.hyam - _hybm = ds_ccsm.hybm - temp_in = ds_ccsm.T[:, :, :3, :2] - t_bot = ds_ccsm.TS[:, :3, :2] - geopotential_in = ds_ccsm.Z3[:, :, :3, :2] - humidity_in = ds_ccsm.Q[:, :, :3, :2] * 1000 # g/kg - press_in = ds_ccsm.PS[:, :3, :2] - phis = ds_ccsm.PHIS[:, :3, :2] - - temp_interp_expected = ds_out.Tp.rename(lev_p='plev') - temp_extrap_expected = ds_out.Tpx.rename(lev_p='plev') - geopotential_extrap_expected = ds_out.Zpx.rename(lev_p='plev') - humidity_extrap_expected = ds_out.Qpx.rename(lev_p='plev') + nt.assert_array_almost_equal(ds_out.uzon, uzon, 5) + + +class Test_interp_hybrid_to_pressure_extrapolate: + + @pytest.fixture(scope="class") + def ds_ccsm(self): + # Open the netCDF data file with the input data + try: + return xr.open_dataset( + gdf.get("netcdf_files/ccsm35.h0.0021-01.demo.nc"), + decode_times=False) + except: + return xr.open_dataset("test/ccsm35.h0.0021-01.demo.nc", + decode_times=False) + + @pytest.fixture(scope="class") + def ds_out(self): + # Open the netCDF file with the output data from running vinth2p_ecmwf.ncl + try: + return xr.open_dataset("test/vinth2p_ecmwf_output.nc", + decode_times=False) + except: + return xr.open_dataset("vinth2p_ecmwf_output.nc", + decode_times=False) + + @pytest.fixture(scope="class") + def _hyam(self, ds_ccsm): + return ds_ccsm.hyam + + @pytest.fixture(scope="class") + def _hybm(self, ds_ccsm): + return ds_ccsm.hybm + + @pytest.fixture(scope="class") + def temp_in(self, ds_ccsm): + return ds_ccsm.T[:, :, :3, :2] + + @pytest.fixture(scope="class") + def t_bot(self, ds_ccsm): + return ds_ccsm.TS[:, :3, :2] + + @pytest.fixture(scope="class") + def geopotential_in(self, ds_ccsm): + return ds_ccsm.Z3[:, :, :3, :2] + + @pytest.fixture(scope="class") + def humidity_in(self, ds_ccsm): + return ds_ccsm.Q[:, :, :3, :2] * 1000 # g/kg + + @pytest.fixture(scope="class") + def press_in(self, ds_ccsm): + return ds_ccsm.PS[:, :3, :2] + + @pytest.fixture(scope="class") + def phis(self, ds_ccsm): + return ds_ccsm.PHIS[:, :3, :2] new_levels = np.asarray([500, 925, 950, 1000]) new_levels *= 100 # new levels in Pa _p0 = 1000 * 100 # reference pressure in Pa - def test_interp_hybrid_to_pressure_interp_temp(self): - result = interp_hybrid_to_pressure(self.temp_in, - self.press_in, - self._hyam, - self._hybm, + def test_interp_hybrid_to_pressure_interp_temp(self, temp_in, press_in, + _hyam, _hybm, + ds_out) -> None: + result = interp_hybrid_to_pressure(temp_in, + press_in, + _hyam, + _hybm, p0=self._p0, new_levels=self.new_levels, method="linear") result = result.transpose('time', 'plev', 'lat', 'lon') result = result.assign_coords(dict(plev=self.new_levels / 100)) - xr.testing.assert_allclose(self.temp_interp_expected, result) - - def test_interp_hybrid_to_pressure_extrap_temp(self): - result = interp_hybrid_to_pressure(self.temp_in, - self.press_in, - self._hyam, - self._hybm, + temp_interp_expected = ds_out.Tp.rename(lev_p='plev') + xr.testing.assert_allclose(temp_interp_expected, result) + + def test_interp_hybrid_to_pressure_extrap_temp(self, temp_in, press_in, + _hyam, _hybm, t_bot, phis, + ds_out) -> None: + result = interp_hybrid_to_pressure(temp_in, + press_in, + _hyam, + _hybm, p0=self._p0, new_levels=self.new_levels, method="linear", extrapolate=True, variable='temperature', - t_bot=self.t_bot, - phi_sfc=self.phis) + t_bot=t_bot, + phi_sfc=phis) result = result.transpose('time', 'plev', 'lat', 'lon') result = result.assign_coords(dict(plev=self.new_levels / 100)) - xr.testing.assert_allclose(self.temp_extrap_expected, result) - - def test_interp_hybrid_to_pressure_extrap_geopotential(self): - result = interp_hybrid_to_pressure(self.geopotential_in, - self.press_in, - self._hyam, - self._hybm, + temp_extrap_expected = ds_out.Tpx.rename(lev_p='plev') + xr.testing.assert_allclose(temp_extrap_expected, result) + + def test_interp_hybrid_to_pressure_extrap_geopotential( + self, geopotential_in, press_in, _hyam, _hybm, t_bot, phis, + ds_out) -> None: + result = interp_hybrid_to_pressure(geopotential_in, + press_in, + _hyam, + _hybm, p0=self._p0, new_levels=self.new_levels, method="linear", extrapolate=True, variable='geopotential', - t_bot=self.t_bot, - phi_sfc=self.phis) + t_bot=t_bot, + phi_sfc=phis) result = result.transpose('time', 'plev', 'lat', 'lon') result = result.assign_coords(dict(plev=self.new_levels / 100)) - xr.testing.assert_allclose(self.geopotential_extrap_expected, result) - - def test_interp_hybrid_to_pressure_extrap_other(self): - result = interp_hybrid_to_pressure(self.humidity_in, - self.press_in, - self._hyam, - self._hybm, + geopotential_extrap_expected = ds_out.Zpx.rename(lev_p='plev') + xr.testing.assert_allclose(geopotential_extrap_expected, result) + + def test_interp_hybrid_to_pressure_extrap_other(self, humidity_in, press_in, + _hyam, _hybm, t_bot, phis, + ds_out) -> None: + result = interp_hybrid_to_pressure(humidity_in, + press_in, + _hyam, + _hybm, p0=self._p0, new_levels=self.new_levels, method="linear", extrapolate=True, variable='other', - t_bot=self.t_bot, - phi_sfc=self.phis) + t_bot=t_bot, + phi_sfc=phis) result = result.transpose('time', 'plev', 'lat', 'lon') result = result.assign_coords(dict(plev=self.new_levels / 100)) - xr.testing.assert_allclose(self.humidity_extrap_expected, result) - - def test_interp_hybrid_to_pressure_extrap_kwargs(self): - self.assertRaises(ValueError, - interp_hybrid_to_pressure, - self.humidity_in, - self.press_in, - self._hyam, - self._hybm, - p0=self._p0, - new_levels=self.new_levels, - method="linear", - extrapolate=True) - - def test_interp_hybrid_to_pressure_extrap_invalid_var(self): - self.assertRaises(ValueError, - interp_hybrid_to_pressure, - self.humidity_in, - self.press_in, - self._hyam, - self._hybm, - p0=self._p0, - new_levels=self.new_levels, - method="linear", - extrapolate=True, - variable=' ', - t_bot=self.t_bot, - phi_sfc=self.phis) - - -class Test_interp_sigma_to_hybrid(TestCase): + humidity_extrap_expected = ds_out.Qpx.rename(lev_p='plev') + xr.testing.assert_allclose(humidity_extrap_expected, result) + + def test_interp_hybrid_to_pressure_extrap_kwargs(self, humidity_in, + press_in, _hyam, + _hybm) -> None: + with pytest.raises(ValueError): + interp_hybrid_to_pressure(humidity_in, + press_in, + _hyam, + _hybm, + p0=self._p0, + new_levels=self.new_levels, + method="linear", + extrapolate=True) + + def test_interp_hybrid_to_pressure_extrap_invalid_var( + self, humidity_in, press_in, _hyam, _hybm, t_bot, phis) -> None: + with pytest.raises(ValueError): + interp_hybrid_to_pressure(humidity_in, + press_in, + _hyam, + _hybm, + p0=self._p0, + new_levels=self.new_levels, + method="linear", + extrapolate=True, + variable=' ', + t_bot=t_bot, + phi_sfc=phis) + + +class Test_interp_sigma_to_hybrid: + + @pytest.fixture(scope="class") + def ds_u(self): + # Open the netCDF data file "u.89335.1.nc" and read in input data + try: + return xr.open_dataset( + gdf.get("netcdf_files/u.89335.1_subset_time361.nc"), + decode_times=False) + except: + return xr.open_dataset("test/u.89335.1_subset_time361.nc", + decode_times=False) + + @pytest.fixture(scope="class") + def ds_ps(self): + # Open the netCDF data file "ps.89335.1.nc" and read in additional input + # data + try: + return xr.open_dataset(gdf.get("netcdf_files/ps.89335.1.nc"), + decode_times=False) + except: + return xr.open_dataset("test/ps.89335.1.nc", decode_times=False) + + @pytest.fixture(scope="class") + def ds_out(self): + # Expected output from above sample input + try: + return xr.open_dataset( + "sigma2hybrid_output.nc" + ) # Generated by running ncl_tests/test_sigma2hybrid.ncl + except: + return xr.open_dataset("test/sigma2hybrid_output.nc") + hyam = xr.DataArray([0.0108093, 0.0130731, 0.03255911, 0.0639471]) hybm = xr.DataArray([0.0108093, 0.0173664, 0.06069280, 0.1158237]) - # Open the netCDF data file "u.89335.1.nc" and read in input data - try: - ds_u = xr.open_dataset( - gdf.get("netcdf_files/u.89335.1_subset_time361.nc"), - decode_times=False) - except: - ds_u = xr.open_dataset("test/u.89335.1_subset_time361.nc", - decode_times=False) + @pytest.fixture(scope="class") + def u(self, ds_u): + return ds_u.u[:, 0:3, 0:2] - u = ds_u.u[:, 0:3, 0:2] + @pytest.fixture(scope="class") + def ps(self, ds_ps): + return ds_ps.ps[361, 0:3, 0:2] * 100 # Pa - # Open the netCDF data file "ps.89335.1.nc" and read in additional input - # data - try: - ds_ps = xr.open_dataset(gdf.get("netcdf_files/ps.89335.1.nc"), - decode_times=False) - except: - ds_ps = xr.open_dataset("test/ps.89335.1.nc", decode_times=False) + @pytest.fixture(scope="class") + def sigma(self, ds_ps): + return ds_ps.sigma - ps = ds_ps.ps[361, 0:3, 0:2] * 100 # Pa - sigma = ds_ps.sigma + @pytest.fixture(scope="class") + def xh_expected(self, ds_out): + return ds_out.xh.transpose("ncl3", "ncl1", "ncl2") # Expected output - # Expected output from above sample input - try: - ds_out = xr.open_dataset( - "sigma2hybrid_output.nc" - ) # Generated by running ncl_tests/test_sigma2hybrid.ncl - except: - ds_out = xr.open_dataset("test/sigma2hybrid_output.nc") - - xh_expected = ds_out.xh.transpose("ncl3", "ncl1", "ncl2") # Expected output - - def test_interp_sigma_to_hybrid_1d(self): - xh = interp_sigma_to_hybrid(self.u[:, 0, 0], - self.sigma, - self.ps[0, 0], + def test_interp_sigma_to_hybrid_1d(self, u, sigma, ps, xh_expected) -> None: + xh = interp_sigma_to_hybrid(u[:, 0, 0], + sigma, + ps[0, 0], self.hyam, self.hybm, p0=_p0, method="linear") - nt.assert_array_almost_equal(self.xh_expected[:, 0, 0], xh, 5) + nt.assert_array_almost_equal(xh_expected[:, 0, 0], xh, 5) - def test_interp_sigma_to_hybrid_3d(self): - xh = interp_sigma_to_hybrid(self.u, - self.sigma, - self.ps, + def test_interp_sigma_to_hybrid_3d(self, u, sigma, ps, xh_expected) -> None: + xh = interp_sigma_to_hybrid(u, + sigma, + ps, self.hyam, self.hybm, p0=_p0, method="linear") - nt.assert_array_almost_equal(self.xh_expected, xh, 5) + nt.assert_array_almost_equal(xh_expected, xh, 5) - def test_interp_sigma_to_hybrid_3d_transposed(self): - xh = interp_sigma_to_hybrid(self.u.transpose('ycoord', 'sigma', - 'xcoord'), - self.sigma, - self.ps.transpose('ycoord', 'xcoord'), + def test_interp_sigma_to_hybrid_3d_transposed(self, u, sigma, ps, + xh_expected) -> None: + xh = interp_sigma_to_hybrid(u.transpose('ycoord', 'sigma', 'xcoord'), + sigma, + ps.transpose('ycoord', 'xcoord'), self.hyam, self.hybm, p0=_p0, method="linear") nt.assert_array_almost_equal( - self.xh_expected.transpose('ncl2', 'ncl3', 'ncl1'), xh, 5) + xh_expected.transpose('ncl2', 'ncl3', 'ncl1'), xh, 5) - def test_interp_sigma_to_hybrid_3d_dask(self): + def test_interp_sigma_to_hybrid_3d_dask(self, ps, u, sigma, + xh_expected) -> None: - ps_dask = self.ps.chunk() - u_dask = self.u.chunk() + ps_dask = ps.chunk() + u_dask = u.chunk() xh = interp_sigma_to_hybrid(u_dask, - self.sigma, + sigma, ps_dask, self.hyam, self.hybm, p0=_p0, method="linear") - nt.assert_array_almost_equal(self.xh_expected, xh, 5) + nt.assert_array_almost_equal(xh_expected, xh, 5) - def test_interp_sigma_to_hybrid_wrong_method(self): - with nt.assert_raises(ValueError): - xh = interp_sigma_to_hybrid(self.u, - self.sigma, - self.ps, + def test_interp_sigma_to_hybrid_wrong_method(self, u, sigma, ps) -> None: + with pytest.raises(ValueError): + xh = interp_sigma_to_hybrid(u, + sigma, + ps, self.hyam, self.hybm, p0=_p0, method="wrong_method") -class Test_interp_manually_calc(unittest.TestCase): +class Test_interp_manually_calc: - @classmethod - def setUpClass(cls): - cls.test_input = xr.load_dataset( + @pytest.fixture(scope="class") + def test_input(self): + return xr.load_dataset( gdf.get("netcdf_files/interpolation_test_input_data.nc")) - cls.test_output = xr.load_dataset( + @pytest.fixture(scope="class") + def test_output(self): + return xr.load_dataset( gdf.get("netcdf_files/interpolation_test_output_data.nc")) - cls.data_in = cls.test_input['normal'] - cls.data_out = cls.test_output['normal'] - - cls.lat_in = cls.data_in['lat'].values - cls.lat_out = cls.data_out['lat'].values - cls.lon_in = cls.data_in['lon'].values - cls.lon_out = cls.data_out['lon'].values - - cls.data_in_nan = cls.test_input['nan'] - cls.data_out_nan = cls.test_output['nan'] - - cls.data_in_nan_2 = cls.test_input['nan_2'] - cls.data_out_nan_2 = cls.test_output['nan_2'] - - cls.data_in_missing = cls.test_input['missing'] - cls.data_out_missing = cls.test_output['missing'] - - cls.data_in_mask = cls.test_input['mask'] - cls.data_out_mask = cls.test_output['mask'] - - def test_float32(self): + def test_float32(self, test_input, test_output) -> None: np.testing.assert_almost_equal( - self.data_out.values.astype(np.float32), - interp_multidim(xr.DataArray(self.data_in.values.astype(np.float32), - dims=['lat', 'lon'], - coords={ - 'lat': self.lat_in, - 'lon': self.lon_in, - }), - self.lat_out, - self.lon_out, + test_output['normal'].values.astype(np.float32), + interp_multidim(xr.DataArray( + test_input['normal'].values.astype(np.float32), + dims=['lat', 'lon'], + coords={ + 'lat': test_input['normal']['lat'].values, + 'lon': test_input['normal']['lon'].values, + }), + test_output['normal']['lat'].values, + test_output['normal']['lon'].values, cyclic=True).values, decimal=7) - def test_float64(self): + def test_float64(self, test_input, test_output) -> None: np.testing.assert_almost_equal( - self.data_out.values.astype(np.float64), + test_output['normal'].values.astype(np.float64), interp_multidim( - xr.DataArray(self.data_in.values.astype(np.float64), + xr.DataArray(test_input['normal'].values.astype(np.float64), dims=['lat', 'lon'], coords={ - 'lat': self.lat_in, - 'lon': self.lon_in, + 'lat': test_input['normal']['lat'].values, + 'lon': test_input['normal']['lon'].values, }), - self.lat_out, - self.lon_out, + test_output['normal']['lat'].values, + test_output['normal']['lon'].values, cyclic=True, ).values, decimal=8, ) - def test_missing(self): + def test_missing(self, test_input, test_output) -> None: np.testing.assert_almost_equal( - self.data_out_missing, + test_output['missing'], interp_multidim( - self.data_in_missing, - self.lat_out, - self.lon_out, + test_input['missing'], + test_output['normal']['lat'].values, + test_output['normal']['lon'].values, cyclic=True, ).values, decimal=8, ) - def test_nan(self): + def test_nan(self, test_input, test_output) -> None: np.testing.assert_almost_equal( - self.data_out_nan, + test_output['nan'], interp_multidim( - self.data_in_nan, - self.lat_out, - self.lon_out, + test_input['nan'], + test_output['normal']['lat'].values, + test_output['normal']['lon'].values, cyclic=True, ).values, decimal=8, ) - def test_mask(self): + def test_mask(self, test_input, test_output) -> None: np.testing.assert_almost_equal( - self.data_out_mask, + test_output['mask'], interp_multidim( - self.data_in_mask, - self.lat_out, - self.lon_out, + test_input['mask'], + test_output['normal']['lat'].values, + test_output['normal']['lon'].values, cyclic=True, ).values, decimal=8, ) - def test_2_nans(self): + def test_2_nans(self, test_input, test_output) -> None: np.testing.assert_almost_equal( - self.data_out_nan_2, + test_output['nan_2'], interp_multidim( - self.data_in_nan_2, - self.lat_out, - self.lon_out, + test_input['nan_2'], + test_output['normal']['lat'].values, + test_output['normal']['lon'].values, cyclic=True, ).values, decimal=8, ) - def test_numpy(self): - np.testing.assert_almost_equal(self.data_out.values, - interp_multidim( - self.data_in.values, - self.lat_out, - self.lon_out, - lat_in=self.lat_in, - lon_in=self.lon_in, - cyclic=True, - ), - decimal=8) + def test_numpy(self, test_input, test_output) -> None: + np.testing.assert_almost_equal( + test_output['normal'].values, + interp_multidim( + test_input['normal'].values, + test_output['normal']['lat'].values, + test_output['normal']['lon'].values, + lat_in=test_input['normal']['lat'].values, + lon_in=test_input['normal']['lon'].values, + cyclic=True, + ), + decimal=8) - def test_extrapolate(self): - np.testing.assert_almost_equal(self.data_out.values, + def test_extrapolate(self, test_input, test_output) -> None: + np.testing.assert_almost_equal(test_output['normal'].values, interp_multidim( - self.data_in, - self.lat_out, - self.lon_out, + test_input['normal'], + test_output['normal']['lat'].values, + test_output['normal']['lon'].values, cyclic=True, fill_value='extrapolate', ), decimal=8) -class Test_interp_larger_dataset(unittest.TestCase): - test_input = None - test_output = None - test_lat_output = None - test_lon_output = None - test_data_chunked = None +class Test_interp_larger_dataset: - @classmethod - def setUpClass(cls): - cls.test_input = xr.load_dataset( + @pytest.fixture(scope="class") + def test_input(self): + return xr.load_dataset( gdf.get("netcdf_files/spherical_noise_input.nc"))['spherical_noise'] - cls.test_output = xr.load_dataset( - gdf.get( - "netcdf_files/spherical_noise_output.nc"))['spherical_noise'] + @pytest.fixture(scope="class") + def test_output(self): + return xr.load_dataset(gdf.get( + "netcdf_files/spherical_noise_output.nc"))['spherical_noise'] - cls.test_data_chunked = cls.test_input.chunk(2) - - def test_10x(self): - data_xr = interp_multidim(self.test_input, - self.test_output.coords['lat'], - self.test_output.coords['lon']) + def test_10x(self, test_input, test_output) -> None: + data_xr = interp_multidim(test_input, test_output.coords['lat'], + test_output.coords['lon']) np.testing.assert_almost_equal( - self.test_output, + test_output, data_xr.values, decimal=8, ) - def test_chunked(self): - data_xr = interp_multidim(self.test_data_chunked, - self.test_output.coords['lat'], - self.test_output.coords['lon']) + def test_chunked(self, test_input, test_output) -> None: + data_xr = interp_multidim(test_input.chunk(2), + test_output.coords['lat'], + test_output.coords['lon']) - np.testing.assert_almost_equal(self.test_output, - data_xr.values, - decimal=8) + np.testing.assert_almost_equal(test_output, data_xr.values, decimal=8) diff --git a/test/test_meteorology.py b/test/test_meteorology.py index 1c91c66d3..e57e60461 100644 --- a/test/test_meteorology.py +++ b/test/test_meteorology.py @@ -1,5 +1,5 @@ import sys -import unittest +import pytest import dask.array import dask.distributed as dd @@ -16,246 +16,243 @@ saturation_vapor_pressure, saturation_vapor_pressure_slope, delta_pressure) -class Test_dewtemp(unittest.TestCase): +@pytest.fixture(scope="module") +def client() -> None: + # dask client reference for all subsequent tests + client = dd.Client() + yield client + client.close() - @classmethod - def setUpClass(cls): - # set up ground truths - cls.t_def = [ - 29.3, 28.1, 23.5, 20.9, 18.4, 15.9, 13.1, 10.1, 6.7, 3.1, -0.5, - -4.5, -9.0, -14.8, -21.5, -29.7, -40.0, -52.4 - ] - cls.rh_def = [ - 75.0, 60.0, 61.1, 76.7, 90.5, 89.8, 78.3, 76.5, 46.0, 55.0, 63.8, - 53.2, 42.9, 41.7, 51.0, 70.6, 50.0, 50.0 - ] +class Test_dewtemp: - cls.dt_1 = 6.3 + # ground truths + t_def = [ + 29.3, 28.1, 23.5, 20.9, 18.4, 15.9, 13.1, 10.1, 6.7, 3.1, -0.5, -4.5, + -9.0, -14.8, -21.5, -29.7, -40.0, -52.4 + ] - cls.dt_2 = [ - 24.38342, 19.55563, 15.53281, 16.64218, 16.81433, 14.22482, - 9.401337, 6.149719, -4.1604, -5.096619, -6.528168, -12.61957, - -19.38332, -25.00714, -28.9841, -33.34853, -46.51273, -58.18289 - ] + rh_def = [ + 75.0, 60.0, 61.1, 76.7, 90.5, 89.8, 78.3, 76.5, 46.0, 55.0, 63.8, 53.2, + 42.9, 41.7, 51.0, 70.6, 50.0, 50.0 + ] - # make dask client to reference in subsequent tests - cls.client = dd.Client() + dt_1 = 6.3 - def test_float_input(self): - tk = 18. + 273.15 + dt_2 = [ + 24.38342, 19.55563, 15.53281, 16.64218, 16.81433, 14.22482, 9.401337, + 6.149719, -4.1604, -5.096619, -6.528168, -12.61957, -19.38332, + -25.00714, -28.9841, -33.34853, -46.51273, -58.18289 + ] + + def test_float_input(self) -> None: + tk = 18.0 + 273.15 rh = 46.5 assert np.allclose(dewtemp(tk, rh) - 273.15, self.dt_1, 0.1) - def test_list_input(self): + def test_list_input(self) -> None: tk = (np.asarray(self.t_def) + 273.15).tolist() assert np.allclose(dewtemp(tk, self.rh_def) - 273.15, self.dt_2, 0.1) - def test_numpy_input(self): + def test_numpy_input(self) -> None: tk = np.asarray(self.t_def) + 273.15 rh = np.asarray(self.rh_def) assert np.allclose(dewtemp(tk, rh) - 273.15, self.dt_2, 0.1) - def test_xarray_input(self): + def test_xarray_input(self) -> None: tk = xr.DataArray(np.asarray(self.t_def) + 273.15) rh = xr.DataArray(self.rh_def) assert np.allclose(dewtemp(tk, rh) - 273.15, self.dt_2, 0.1) - def test_dims_error(self): - self.assertRaises(ValueError, dewtemp, self.t_def[:10], self.rh_def[:8]) + def test_dims_error(self) -> None: + with pytest.raises(ValueError): + dewtemp(self.t_def[:10], self.rh_def[:8]) - def test_xarray_type_error(self): - self.assertRaises(TypeError, dewtemp, self.t_def, - xr.DataArray(self.rh_def)) + def test_xarray_type_error(self) -> None: + with pytest.raises(TypeError): + dewtemp(self.t_def, xr.DataArray(self.rh_def)) - def test_dask_compute(self): + def test_dask_compute(self) -> None: tk = xr.DataArray(np.asarray(self.t_def) + 273.15).chunk(6) rh = xr.DataArray(self.rh_def).chunk(6) assert np.allclose(dewtemp(tk, rh) - 273.15, self.dt_2, atol=0.1) - def test_dask_lazy(self): + def test_dask_lazy(self) -> None: tk = xr.DataArray(np.asarray(self.t_def) + 273.15).chunk(6) rh = xr.DataArray(self.rh_def).chunk(6) assert isinstance((dewtemp(tk, rh) - 273.15).data, dask.array.Array) -class Test_heat_index(unittest.TestCase): - - @classmethod - def setUpClass(cls): - # set up ground truths - cls.ncl_gt_1 = [ - 137.36142, 135.86795, 104.684456, 131.25621, 105.39449, 79.78999, - 83.57511, 59.965, 30. - ] - cls.ncl_gt_2 = [ - 68.585, 76.13114, 75.12854, 99.43573, 104.93261, 93.73293, - 104.328705, 123.23398, 150.34001, 106.87023 - ] +class Test_heat_index: - cls.t1 = np.array([104, 100, 92, 92, 86, 80, 80, 60, 30]) - cls.rh1 = np.array([55, 65, 60, 90, 90, 40, 75, 90, 50]) + # set up ground truths + ncl_gt_1 = [ + 137.36142, 135.86795, 104.684456, 131.25621, 105.39449, 79.78999, + 83.57511, 59.965, 30. + ] + ncl_gt_2 = [ + 68.585, 76.13114, 75.12854, 99.43573, 104.93261, 93.73293, 104.328705, + 123.23398, 150.34001, 106.87023 + ] - cls.t2 = np.array([70, 75, 80, 85, 90, 95, 100, 105, 110, 115]) - cls.rh2 = np.array([10, 75, 15, 80, 65, 25, 30, 40, 50, 5]) + t1 = np.array([104, 100, 92, 92, 86, 80, 80, 60, 30]) + rh1 = np.array([55, 65, 60, 90, 90, 40, 75, 90, 50]) - # make client to reference in subsequent tests - cls.client = dd.Client() + t2 = np.array([70, 75, 80, 85, 90, 95, 100, 105, 110, 115]) + rh2 = np.array([10, 75, 15, 80, 65, 25, 30, 40, 50, 5]) - def test_numpy_input(self): + def test_numpy_input(self) -> None: assert np.allclose(heat_index(self.t1, self.rh1, False), self.ncl_gt_1, atol=0.005) - def test_multi_dimensional_input(self): + def test_multi_dimensional_input(self) -> None: assert np.allclose(heat_index(self.t2.reshape(2, 5), self.rh2.reshape(2, 5), True), np.asarray(self.ncl_gt_2).reshape(2, 5), atol=0.005) - def test_alt_coef(self): + def test_alt_coef(self) -> None: assert np.allclose(heat_index(self.t2, self.rh2, True), self.ncl_gt_2, atol=0.005) - def test_xarray_alt_coef(self): + def test_xarray_alt_coef(self) -> None: assert np.allclose(heat_index(xr.DataArray(self.t2), xr.DataArray(self.rh2), True), self.ncl_gt_2, atol=0.005) - def test_float_input(self): + def test_float_input(self) -> None: assert np.allclose(heat_index(80, 75), 83.5751, atol=0.005) - def test_list_input(self): + def test_list_input(self) -> None: assert np.allclose(heat_index(self.t1.tolist(), self.rh1.tolist()), self.ncl_gt_1, atol=0.005) - def test_xarray_input(self): + def test_xarray_input(self) -> None: t = xr.DataArray(self.t1) rh = xr.DataArray(self.rh1) assert np.allclose(heat_index(t, rh), self.ncl_gt_1, atol=0.005) - def test_alternate_xarray_tag(self): + def test_alternate_xarray_tag(self) -> None: t = xr.DataArray([15, 20]) rh = xr.DataArray([15, 20]) out = heat_index(t, rh) assert out.tag == "NCL: heat_index_nws; (Steadman+t)*0.5" - def test_rh_warning(self): - self.assertWarns(UserWarning, heat_index, [50, 80, 90], [0.1, 0.2, 0.5]) + def test_rh_warning(self) -> None: + with pytest.warns(UserWarning): + heat_index([50, 80, 90], [0.1, 0.2, 0.5]) - def test_rh_valid(self): - self.assertRaises(ValueError, heat_index, [50, 80, 90], [-1, 101, 50]) + def test_rh_valid(self) -> None: + with pytest.raises(ValueError): + heat_index([50, 80, 90], [-1, 101, 50]) - def test_xarray_rh_warning(self): - self.assertWarns(UserWarning, heat_index, [50, 80, 90], [0.1, 0.2, 0.5]) + def test_xarray_rh_warning(self) -> None: + with pytest.warns(UserWarning): + heat_index([50, 80, 90], [0.1, 0.2, 0.5]) - def test_xarray_rh_valid(self): - self.assertRaises(ValueError, heat_index, xr.DataArray([50, 80, 90]), - xr.DataArray([-1, 101, 50])) + def test_xarray_rh_valid(self) -> None: + with pytest.raises(ValueError): + heat_index(xr.DataArray([50, 80, 90]), xr.DataArray([-1, 101, 50])) - def test_xarray_type_error(self): - self.assertRaises(TypeError, heat_index, self.t1, - xr.DataArray(self.rh1)) + def test_xarray_type_error(self) -> None: + with pytest.raises(TypeError): + heat_index(self.t1, xr.DataArray(self.rh1)) - def test_dims_error(self): - self.assertRaises(ValueError, heat_index, self.t1[:10], self.rh1[:8]) + def test_dims_error(self) -> None: + with pytest.raises(ValueError): + heat_index(self.t1[:10], self.rh1[:8]) - def test_dask_compute(self): + def test_dask_compute(self) -> None: t = xr.DataArray(self.t1).chunk(3) rh = xr.DataArray(self.rh1).chunk(3) assert np.allclose(heat_index(t, rh), self.ncl_gt_1, atol=0.005) - def test_dask_lazy(self): + def test_dask_lazy(self) -> None: t = xr.DataArray(self.t1).chunk(3) rh = xr.DataArray(self.rh1).chunk(3) assert isinstance((heat_index(t, rh)).data, dask.array.Array) -class Test_relhum(unittest.TestCase): - - @classmethod - def setUpClass(cls): - # set up ground truths - cls.p_def = [ - 100800, 100000, 95000, 90000, 85000, 80000, 75000, 70000, 65000, - 60000, 55000, 50000, 45000, 40000, 35000, 30000, 25000, 20000, - 17500, 15000, 12500, 10000, 8000, 7000, 6000, 5000, 4000, 3000, - 2500, 2000 - ] +class Test_relhum: - cls.t_def = [ - 302.45, 301.25, 296.65, 294.05, 291.55, 289.05, 286.25, 283.25, - 279.85, 276.25, 272.65, 268.65, 264.15, 258.35, 251.65, 243.45, - 233.15, 220.75, 213.95, 206.65, 199.05, 194.65, 197.15, 201.55, - 206.45, 211.85, 216.85, 221.45, 222.45, 225.65 - ] + # set up ground truths + p_def = [ + 100800, 100000, 95000, 90000, 85000, 80000, 75000, 70000, 65000, 60000, + 55000, 50000, 45000, 40000, 35000, 30000, 25000, 20000, 17500, 15000, + 12500, 10000, 8000, 7000, 6000, 5000, 4000, 3000, 2500, 2000 + ] - cls.q_def = [ - 0.02038, 0.01903, 0.01614, 0.01371, 0.01156, 0.0098, 0.00833, - 0.00675, 0.00606, 0.00507, 0.00388, 0.00329, 0.00239, 0.0017, 0.001, - 0.0006, 0.0002, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - ] + t_def = [ + 302.45, 301.25, 296.65, 294.05, 291.55, 289.05, 286.25, 283.25, 279.85, + 276.25, 272.65, 268.65, 264.15, 258.35, 251.65, 243.45, 233.15, 220.75, + 213.95, 206.65, 199.05, 194.65, 197.15, 201.55, 206.45, 211.85, 216.85, + 221.45, 222.45, 225.65 + ] - cls.rh_gt_1 = 46.4 + q_def = [ + 0.02038, 0.01903, 0.01614, 0.01371, 0.01156, 0.0098, 0.00833, 0.00675, + 0.00606, 0.00507, 0.00388, 0.00329, 0.00239, 0.0017, 0.001, 0.0006, + 0.0002, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] - cls.rh_gt_2 = [ - 79.8228, 79.3578, 84.1962, 79.4898, 73.989, 69.2401, 66.1896, - 61.1084, 64.21, 63.8305, 58.0412, 60.8194, 57.927, 62.3734, 62.9706, - 73.8184, 62.71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - ] + rh_gt_1 = 46.4 - # make dask client to reference in subsequent tests - cls.client = dd.Client() + rh_gt_2 = [ + 79.8228, 79.3578, 84.1962, 79.4898, 73.989, 69.2401, 66.1896, 61.1084, + 64.21, 63.8305, 58.0412, 60.8194, 57.927, 62.3734, 62.9706, 73.8184, + 62.71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] - def test_float_input(self): - p = 1000. * 100 - t = 18. + 273.15 - q = 6. / 1000. + def test_float_input(self) -> None: + p = 1000.0 * 100 + t = 18.0 + 273.15 + q = 6.0 / 1000.0 assert np.allclose(relhum(t, q, p), self.rh_gt_1, atol=0.1) - def test_list_input(self): + def test_list_input(self) -> None: assert np.allclose(relhum(self.t_def, self.q_def, self.p_def), self.rh_gt_2, atol=0.1) - def test_numpy_input(self): + def test_numpy_input(self) -> None: p = np.asarray(self.p_def) t = np.asarray(self.t_def) q = np.asarray(self.q_def) assert np.allclose(relhum(t, q, p), self.rh_gt_2, atol=0.1) - def test_dims_error(self): - self.assertRaises(ValueError, relhum, self.t_def[:10], self.q_def[:10], - self.p_def[:9]) + def test_dims_error(self) -> None: + with pytest.raises(ValueError): + relhum(self.t_def[:10], self.q_def[:10], self.p_def[:9]) - def test_xarray_type_error(self): - self.assertRaises(TypeError, relhum, self.t_def, - xr.DataArray(self.q_def), self.p_def) + def test_xarray_type_error(self) -> None: + with pytest.raises(TypeError): + relhum(self.t_def, xr.DataArray(self.q_def), self.p_def) - def test_dask_compute(self): + def test_dask_compute(self, client) -> None: p = xr.DataArray(self.p_def).chunk(10) t = xr.DataArray(self.t_def).chunk(10) q = xr.DataArray(self.q_def).chunk(10) assert np.allclose(relhum(t, q, p), self.rh_gt_2, atol=0.1) - def test_dask_lazy(self): + def test_dask_lazy(self, client) -> None: p = xr.DataArray(self.p_def).chunk(10) t = xr.DataArray(self.t_def).chunk(10) q = xr.DataArray(self.q_def).chunk(10) @@ -263,95 +260,89 @@ def test_dask_lazy(self): assert isinstance(relhum(t, q, p).data, dask.array.Array) -class Test_relhum_water(unittest.TestCase): +class Test_relhum_water: rh_gt_1 = 46.3574 - def test_float_input(self): - p = 1000. * 100 - t = 18. + 273.15 - q = 6. / 1000. + def test_float_input(self) -> None: + p = 1000.0 * 100 + t = 18.0 + 273.15 + q = 6.0 / 1000.0 assert np.allclose(relhum_water(t, q, p), self.rh_gt_1, atol=0.1) -class Test_relhum_ice(unittest.TestCase): +class Test_relhum_ice: rh_gt_1 = 147.8802 - def test_float_input(self): - tc = -5. + def test_float_input(self) -> None: + tc = -5.0 tk = tc + 273.15 - w = 3.7 / 1000. - p = 1000. * 100. + w = 3.7 / 1000.0 + p = 1000.0 * 100.0 assert np.allclose(relhum_ice(tk, w, p), self.rh_gt_1, atol=0.1) -class Test_actual_saturation_vapor_pressure(unittest.TestCase): +class Test_actual_saturation_vapor_pressure: - @classmethod - def setUpClass(cls): + # set up ground truths + temp_gt = np.arange(1, 101, 1) + @pytest.fixture(scope="class") + def ncl_gt(self): # get ground truth from ncl run netcdf file try: - ncl_xr_gt = xr.open_dataarray( + return xr.open_dataarray( "satvpr_tdew_fao56_output.nc" - ) # Generated by running ncl_tests/test_satvpr_tdew_fao56.ncl + ).values # Generated by running ncl_tests/test_satvpr_tdew_fao56.ncl except: - ncl_xr_gt = xr.open_dataarray("test/satvpr_tdew_fao56_output.nc") - - # set up ground truths - cls.ncl_gt = np.asarray(ncl_xr_gt) - - cls.temp_gt = np.arange(1, 101, 1) + return xr.open_dataarray("test/satvpr_tdew_fao56_output.nc").values - # make client to reference in subsequent tests - cls.client = dd.Client() - - def test_numpy_input(self): + def test_numpy_input(self, ncl_gt) -> None: assert np.allclose(actual_saturation_vapor_pressure( self.temp_gt, tfill=1.0000000e+20), - self.ncl_gt, + ncl_gt, atol=0.005) - def test_float_input(self): + def test_float_input(self) -> None: degf = 59 expected = 1.70535 assert np.allclose(actual_saturation_vapor_pressure(degf), expected, atol=0.005) - def test_list_input(self): + def test_list_input(self, ncl_gt) -> None: assert np.allclose(actual_saturation_vapor_pressure( self.temp_gt.tolist(), tfill=1.0000000e+20), - self.ncl_gt.tolist(), + ncl_gt.tolist(), atol=0.005) - def test_multi_dimensional_input(self): + def test_multi_dimensional_input(self, ncl_gt) -> None: assert np.allclose(actual_saturation_vapor_pressure( self.temp_gt.reshape(2, 50), tfill=1.0000000e+20), - self.ncl_gt.reshape(2, 50), + ncl_gt.reshape(2, 50), atol=0.005) - def test_xarray_input(self): + def test_xarray_input(self, ncl_gt) -> None: tempf = xr.DataArray(self.temp_gt) - expected = xr.DataArray(self.ncl_gt) + expected = xr.DataArray(ncl_gt) assert np.allclose(actual_saturation_vapor_pressure( tempf, tfill=1.0000000e+20), expected, atol=0.005) - def test_dask_compute(self): + def test_dask_compute(self, ncl_gt, client) -> None: tempf = xr.DataArray(self.temp_gt).chunk(10) assert np.allclose(actual_saturation_vapor_pressure( tempf, tfill=1.0000000e+20), - self.ncl_gt, + ncl_gt, atol=0.005) - def test_dask_lazy(self): + def test_dask_lazy(self, client) -> None: tempf = xr.DataArray(self.temp_gt).chunk(10) assert isinstance( @@ -359,207 +350,188 @@ def test_dask_lazy(self): dask.array.Array) -class Test_max_daylight(unittest.TestCase): +class Test_max_daylight: - @classmethod - def setUpClass(cls): + # set up ground truths + jday_gt = np.linspace(1, 365, num=365) + lat_gt = np.linspace(-66, 66, num=133) + @pytest.fixture(scope="class") + def ncl_gt(self): # get ground truth from ncl run netcdf file try: - ncl_xr_gt = xr.open_dataarray( + return xr.open_dataarray( "max_daylight_test.nc" - ) # Generated by running ncl_tests/test_max_daylight.ncl + ).values # Generated by running ncl_tests/test_max_daylight.ncl except: - ncl_xr_gt = xr.open_dataarray("test/max_daylight_test.nc") - - # set up ground truths - cls.ncl_gt = np.asarray(ncl_xr_gt) - - cls.jday_gt = np.linspace(1, 365, num=365) - cls.lat_gt = np.linspace(-66, 66, num=133) + return xr.open_dataarray("test/max_daylight_test.nc").values - # make client to reference in subsequent tests - cls.client = dd.Client() - - def test_numpy_input(self): + def test_numpy_input(self, ncl_gt) -> None: assert np.allclose(max_daylight(self.jday_gt, self.lat_gt), - self.ncl_gt, + ncl_gt, atol=0.005) - def test_float_input(self): + def test_float_input(self) -> None: assert np.allclose(max_daylight(246, -20.0), 11.66559, atol=0.005) - def test_list_input(self): + def test_list_input(self, ncl_gt) -> None: assert np.allclose(max_daylight(self.jday_gt.tolist(), self.lat_gt.tolist()), - self.ncl_gt, + ncl_gt, atol=0.005) - def test_xarray_input(self): + def test_xarray_input(self, ncl_gt) -> None: jday = xr.DataArray(self.jday_gt) lat = xr.DataArray(self.lat_gt) - assert np.allclose(max_daylight(jday, lat), self.ncl_gt, atol=0.005) + assert np.allclose(max_daylight(jday, lat), ncl_gt, atol=0.005) - def test_dask_unchunked_input(self): + def test_dask_unchunked_input(self, ncl_gt, client) -> None: jday = dask.array.from_array(self.jday_gt) lat = dask.array.from_array(self.lat_gt) - out = self.client.submit(max_daylight, jday, lat).result() + out = client.submit(max_daylight, jday, lat).result() - assert np.allclose(out, self.ncl_gt, atol=0.005) + assert np.allclose(out, ncl_gt, atol=0.005) - def test_dask_chunked_input(self): + def test_dask_chunked_input(self, ncl_gt, client) -> None: jday = dask.array.from_array(self.jday_gt, chunks='auto') lat = dask.array.from_array(self.lat_gt, chunks='auto') - out = self.client.submit(max_daylight, jday, lat).result() + out = client.submit(max_daylight, jday, lat).result() - assert np.allclose(out, self.ncl_gt, atol=0.005) + assert np.allclose(out, ncl_gt, atol=0.005) - def test_input_dim(self): - self.assertRaises(ValueError, max_daylight, - np.arange(4).reshape(2, 2), - np.arange(4).reshape(2, 2)) + def test_input_dim(self) -> None: + with pytest.raises(ValueError): + max_daylight(np.arange(4).reshape(2, 2), np.arange(4).reshape(2, 2)) - def test_lat_bound_warning(self): - self.assertWarns(UserWarning, max_daylight, 10, 56) + def test_lat_bound_warning(self) -> None: + with pytest.warns(UserWarning): + max_daylight(10, 56) - def test_lat_bound_second_warning(self): - self.assertWarns(UserWarning, max_daylight, 10, 67) + def test_lat_bound_second_warning(self) -> None: + with pytest.warns(UserWarning): + max_daylight(10, 67) -class Test_psychrometric_constant(unittest.TestCase): +class Test_psychrometric_constant: - @classmethod - def setUpClass(cls): + # set up ground truths + pressure_gt = np.arange(1, 101, 1) + @pytest.fixture(scope="class") + def ncl_gt(self): # get ground truth from ncl run netcdf file try: - ncl_xr_gt = xr.open_dataarray( + return xr.open_dataarray( "psychro_fao56_output.nc" - ) # Generated by running ncl_tests/test_psychro_fao56.ncl + ).values # Generated by running ncl_tests/test_psychro_fao56.ncl except: - ncl_xr_gt = xr.open_dataarray("test/psychro_fao56_output.nc") - - # set up ground truths - cls.ncl_gt = np.asarray(ncl_xr_gt) + return xr.open_dataarray("test/psychro_fao56_output.nc").values - cls.pressure_gt = np.arange(1, 101, 1) - - # make client to reference in subsequent tests - cls.client = dd.Client() - - def test_numpy_input(self): + def test_numpy_input(self, ncl_gt) -> None: assert np.allclose(psychrometric_constant(self.pressure_gt), - self.ncl_gt, + ncl_gt, atol=0.005) - def test_float_input(self): + def test_float_input(self) -> None: pressure = 81.78 expected = 0.05434634 assert np.allclose(psychrometric_constant(pressure), expected, atol=0.005) - def test_list_input(self): + def test_list_input(self, ncl_gt) -> None: assert np.allclose(psychrometric_constant(self.pressure_gt.tolist()), - self.ncl_gt.tolist(), + ncl_gt.tolist(), atol=0.005) - def test_multi_dimensional_input(self): + def test_multi_dimensional_input(self, ncl_gt) -> None: assert np.allclose(psychrometric_constant( self.pressure_gt.reshape(2, 50)), - self.ncl_gt.reshape(2, 50), + ncl_gt.reshape(2, 50), atol=0.005) - def test_xarray_input(self): + def test_xarray_input(self, ncl_gt) -> None: pressure = xr.DataArray(self.pressure_gt) - expected = xr.DataArray(self.ncl_gt) + expected = xr.DataArray(ncl_gt) assert np.allclose(psychrometric_constant(pressure), expected, atol=0.005) - def test_dask_compute(self): + def test_dask_compute(self, ncl_gt, client) -> None: pressure = xr.DataArray(self.pressure_gt).chunk(10) - assert np.allclose(psychrometric_constant(pressure), - self.ncl_gt, - atol=0.005) + assert np.allclose(psychrometric_constant(pressure), ncl_gt, atol=0.005) - def test_dask_lazy(self): + def test_dask_lazy(self, client) -> None: pressure = xr.DataArray(self.pressure_gt).chunk(10) assert isinstance((psychrometric_constant(pressure)).data, dask.array.Array) -class Test_saturation_vapor_pressure(unittest.TestCase): +class Test_saturation_vapor_pressure: - @classmethod - def setUpClass(cls): + # set up ground truths + temp_gt = np.arange(1, 101, 1) + @pytest.fixture(scope="class") + def ncl_gt(self): # get ground truth from ncl run netcdf file try: - ncl_xr_gt = xr.open_dataarray( + return xr.open_dataarray( "satvpr_temp_fao56_output.nc" - ) # Generated by running ncl_tests/test_satvpr_temp_fao56.ncl + ).values # Generated by running ncl_tests/test_satvpr_temp_fao56.ncl except: - ncl_xr_gt = xr.open_dataarray("test/satvpr_temp_fao56_output.nc") - - # set up ground truths - cls.ncl_gt = np.asarray(ncl_xr_gt) + return xr.open_dataarray("test/satvpr_temp_fao56_output.nc").values - cls.temp_gt = np.arange(1, 101, 1) - - # make client to reference in subsequent tests - cls.client = dd.Client() - - def test_numpy_input(self): + def test_numpy_input(self, ncl_gt) -> None: assert np.allclose(saturation_vapor_pressure(self.temp_gt, tfill=1.0000000e+20), - self.ncl_gt, + ncl_gt, atol=0.005) - def test_float_input(self): + def test_float_input(self) -> None: degf = 59 expected = 1.70535 assert np.allclose(saturation_vapor_pressure(degf), expected, atol=0.005) - def test_list_input(self): + def test_list_input(self, ncl_gt) -> None: assert np.allclose(saturation_vapor_pressure(self.temp_gt.tolist(), tfill=1.0000000e+20), - self.ncl_gt.tolist(), + ncl_gt.tolist(), atol=0.005) - def test_multi_dimensional_input(self): + def test_multi_dimensional_input(self, ncl_gt) -> None: assert np.allclose(saturation_vapor_pressure(self.temp_gt.reshape( 2, 50), tfill=1.0000000e+20), - self.ncl_gt.reshape(2, 50), + ncl_gt.reshape(2, 50), atol=0.005) - def test_xarray_input(self): + def test_xarray_input(self, ncl_gt) -> None: tempf = xr.DataArray(self.temp_gt) - expected = xr.DataArray(self.ncl_gt) + expected = xr.DataArray(ncl_gt) assert np.allclose(saturation_vapor_pressure(tempf, tfill=1.0000000e+20), expected, atol=0.005) - def test_dask_compute(self): + def test_dask_compute(self, ncl_gt) -> None: tempf = xr.DataArray(self.temp_gt).chunk(10) assert np.allclose(saturation_vapor_pressure(tempf, tfill=1.0000000e+20), - self.ncl_gt, + ncl_gt, atol=0.005) - def test_dask_lazy(self): + def test_dask_lazy(self) -> None: tempf = xr.DataArray(self.temp_gt).chunk(10) assert isinstance((saturation_vapor_pressure(tempf, @@ -567,166 +539,163 @@ def test_dask_lazy(self): dask.array.Array) -class Test_saturation_vapor_pressure_slope(unittest.TestCase): +class Test_saturation_vapor_pressure_slope: - @classmethod - def setUpClass(cls): + # set up ground truths + temp_gt = np.arange(1, 101, 1) + @pytest.fixture(scope="class") + def ncl_gt(self): # get ground truth from ncl run netcdf file try: - ncl_xr_gt = xr.open_dataarray( + return xr.open_dataarray( "satvpr_slope_fao56_output.nc" - ) # Generated by running ncl_tests/test_satvpr_slope_fao56.ncl + ).values # Generated by running ncl_tests/test_satvpr_slope_fao56.ncl except: - ncl_xr_gt = xr.open_dataarray("test/satvpr_slope_fao56_output.nc") - - # set up ground truths - cls.ncl_gt = np.asarray(ncl_xr_gt) - - cls.temp_gt = np.arange(1, 101, 1) - - # make client to reference in subsequent tests - cls.client = dd.Client() + return xr.open_dataarray("test/satvpr_slope_fao56_output.nc").values - def test_numpy_input(self): + def test_numpy_input(self, ncl_gt) -> None: assert np.allclose(saturation_vapor_pressure_slope(self.temp_gt), - self.ncl_gt, + ncl_gt, equal_nan=True) - def test_float_input(self): + def test_float_input(self) -> None: degf = 67.55 expected = 0.142793 assert np.allclose(saturation_vapor_pressure_slope(degf), expected, atol=0.005) - def test_list_input(self): + def test_list_input(self, ncl_gt) -> None: assert np.allclose(saturation_vapor_pressure_slope( self.temp_gt.tolist()), - self.ncl_gt.tolist(), + ncl_gt.tolist(), equal_nan=True) - def test_multi_dimensional_input(self): + def test_multi_dimensional_input(self, ncl_gt) -> None: assert np.allclose(saturation_vapor_pressure_slope( self.temp_gt.reshape(2, 50)), - self.ncl_gt.reshape(2, 50), + ncl_gt.reshape(2, 50), atol=0.005, equal_nan=True) - def test_xarray_input(self): + def test_xarray_input(self, ncl_gt) -> None: tempf = xr.DataArray(self.temp_gt) - expected = xr.DataArray(self.ncl_gt) + expected = xr.DataArray(ncl_gt) assert np.allclose(saturation_vapor_pressure_slope(tempf), expected, atol=0.005, equal_nan=True) - def test_dask_compute(self): + def test_dask_compute(self, ncl_gt, client) -> None: tempf = xr.DataArray(self.temp_gt).chunk(10) assert np.allclose(saturation_vapor_pressure_slope(tempf), - self.ncl_gt, + ncl_gt, atol=0.005, equal_nan=True) - def test_dask_lazy(self): + def test_dask_lazy(self, client) -> None: tempf = xr.DataArray(self.temp_gt).chunk(10) assert isinstance((saturation_vapor_pressure_slope(tempf)).data, dask.array.Array) -class TestDeltaPressure(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.pressure_lev = np.array([1, 5, 100, 1000]) - cls.pressure_lev_da = xr.DataArray(cls.pressure_lev) - cls.pressure_lev_da.attrs = { - "long name": "pressure level", - "units": "hPa", - "direction": "descending" - } - - cls.surface_pressure_scalar = 1018 - cls.surface_pressure_1D = np.array([1018, 1019]) - cls.surface_pressure_2D = np.array([[1018, 1019], [1017, 1019.5]]) - cls.surface_pressure_3D = np.array([[[1018, 1019], [1017, 1019.5]], - [[1019, 1020], [1018, 1020.5]]]) - - coords = {'time': [1, 2], 'lat': [3, 4], 'lon': [5, 6]} - dims = ["time", "lat", "lon"] - attrs = {"long name": "surface pressure", "units": "hPa"} - cls.surface_pressure_3D_da = xr.DataArray(cls.surface_pressure_3D, - coords=coords, - dims=dims, - attrs=attrs) - - def test_delta_pressure1D(self): +class Test_Delta_Pressure: + + pressure_lev = np.array([1, 5, 100, 1000]) + pressure_lev_da = xr.DataArray(pressure_lev) + pressure_lev_da.attrs = { + "long name": "pressure level", + "units": "hPa", + "direction": "descending" + } + + surface_pressure_scalar = 1018 + surface_pressure_1D = np.array([1018, 1019]) + surface_pressure_2D = np.array([[1018, 1019], [1017, 1019.5]]) + surface_pressure_3D = np.array([[[1018, 1019], [1017, 1019.5]], + [[1019, 1020], [1018, 1020.5]]]) + + surface_pressure_3D_da = xr.DataArray( + surface_pressure_3D, + coords={ + "time": [1, 2], + "lat": [3, 4], + "lon": [5, 6] + }, + dims=["time", "lat", "lon"], + attrs={ + "long name": "surface pressure", + "units": "hPa" + }, + ) + + def test_delta_pressure1D(self) -> None: pressure_lev = [float(i) for i in self.pressure_lev] pressure_top = min(pressure_lev) delta_p = delta_pressure(pressure_lev, self.surface_pressure_scalar) - self.assertEqual(sum(delta_p), - self.surface_pressure_scalar - pressure_top) + assert sum(delta_p) == (self.surface_pressure_scalar - pressure_top) - def test_negative_pressure_warning(self): + def test_negative_pressure_warning(self) -> None: pressure_lev_negative = self.pressure_lev.copy() pressure_lev_negative[0] = -5 - with self.assertWarns(Warning): + with pytest.warns(UserWarning): delta_p = delta_pressure(pressure_lev_negative, self.surface_pressure_scalar) - def test_relative_pressure_warning(self): + def test_relative_pressure_warning(self) -> None: surface_pressure_low = 0.5 - with self.assertWarns(Warning): + with pytest.warns(UserWarning): delta_p = delta_pressure(self.pressure_lev, surface_pressure_low) - def test_output_type(self): + def test_output_type(self) -> None: delta_pressure_da = delta_pressure(self.pressure_lev_da, self.surface_pressure_3D_da) - self.assertIsInstance(delta_pressure_da, xr.DataArray) + assert isinstance(delta_pressure_da, xr.DataArray) delta_pressure_np = delta_pressure(self.pressure_lev, self.surface_pressure_3D) - self.assertIsInstance(delta_pressure_np, np.ndarray) + assert isinstance(delta_pressure_np, np.ndarray) - def test_output_dimensions(self): + def test_output_dimensions(self) -> None: delta_pressure_scalar = delta_pressure(self.pressure_lev, self.surface_pressure_scalar) - self.assertEqual(delta_pressure_scalar.shape, (4,)) + assert delta_pressure_scalar.shape == (4,) delta_pressure_1D = delta_pressure(self.pressure_lev, self.surface_pressure_1D) - self.assertEqual(delta_pressure_1D.shape, (2, 4)) + assert delta_pressure_1D.shape == (2, 4) delta_pressure_2D = delta_pressure(self.pressure_lev, self.surface_pressure_2D) - self.assertEqual(delta_pressure_2D.shape, (2, 2, 4)) + assert delta_pressure_2D.shape == (2, 2, 4) delta_pressure_3D = delta_pressure(self.pressure_lev, self.surface_pressure_3D) - self.assertEqual(delta_pressure_3D.shape, (2, 2, 2, 4)) + assert delta_pressure_3D.shape == (2, 2, 2, 4) - def test_output_attrs(self): + def test_output_attrs(self) -> None: delta_pressure_da = delta_pressure(self.pressure_lev_da, self.surface_pressure_3D_da) for item in self.pressure_lev_da.attrs: - self.assertIn(item, delta_pressure_da.attrs) + assert item in delta_pressure_da.attrs - def test_output_coords(self): + def test_output_coords(self) -> None: delta_pressure_da = delta_pressure(self.pressure_lev_da, self.surface_pressure_3D_da) for item in self.surface_pressure_3D_da.coords: - self.assertIn(item, delta_pressure_da.coords) + assert item in delta_pressure_da.coords for item in self.pressure_lev_da.coords: - self.assertIn(item, delta_pressure_da.coords) + assert item in delta_pressure_da.coords - def test_mismatch_input_types(self): + def test_mismatch_input_types(self) -> None: delta_pressure_da = delta_pressure(self.pressure_lev, self.surface_pressure_3D_da) - self.assertIsInstance(delta_pressure_da, xr.DataArray) + assert isinstance(delta_pressure_da, xr.DataArray) delta_pressure_np = delta_pressure(self.pressure_lev_da, self.surface_pressure_3D) - self.assertIsInstance(delta_pressure_np, np.ndarray) + assert isinstance(delta_pressure_np, np.ndarray) diff --git a/test/test_spherical.py b/test/test_spherical.py index 4fe2d82b1..7f9fbb761 100644 --- a/test/test_spherical.py +++ b/test/test_spherical.py @@ -1,6 +1,6 @@ import math as ma import sys -import unittest +import pytest import numpy as np import scipy.special as ss @@ -9,72 +9,70 @@ from geocat.comp import decomposition, recomposition, scale_voronoi -class Test_Spherical(unittest.TestCase): +class Test_Spherical: - @classmethod - def setUpClass(cls): - max_harm = 23 - num_phi = 90 - num_theta = 180 + max_harm = 23 + num_phi = 90 + num_theta = 180 - theta = np.linspace(0, ma.tau - ma.tau / num_theta, num_theta) - phi = np.linspace( - ma.pi / (2 * num_phi), - ma.pi - ma.pi / (2 * num_phi), - num_phi, - ) - cls.theta_np, cls.phi_np = np.meshgrid(theta, phi) - cls.theta_xr = xr.DataArray(cls.theta_np, dims=['lat', 'lon']) - cls.phi_xr = xr.DataArray(cls.phi_np, dims=['lat', 'lon']) - cls.test_scale_np = np.sin(cls.phi_np) - cls.test_scale_xr = xr.DataArray( - cls.test_scale_np, - dims=['lat', 'lon'], - ).compute() + theta = np.linspace(0, ma.tau - ma.tau / num_theta, num_theta) + phi = np.linspace( + ma.pi / (2 * num_phi), + ma.pi - ma.pi / (2 * num_phi), + num_phi, + ) + theta_np, phi_np = np.meshgrid(theta, phi) + theta_xr = xr.DataArray(theta_np, dims=['lat', 'lon']) + phi_xr = xr.DataArray(phi_np, dims=['lat', 'lon']) + test_scale_np = np.sin(phi_np) + test_scale_xr = xr.DataArray( + test_scale_np, + dims=['lat', 'lon'], + ).compute() - test_data = np.zeros(cls.theta_np.shape) - test_results = [] - test_harmonics = [] - for n in range(max_harm + 1): - for m in range(n + 1): - test_harmonics.append([m, n]) - test_results.append(0) - if n in [0, 2, 3, 5, 7, 11, 13, 17, 19, 23 - ] and m in [0, 2, 3, 5, 7, 11, 13, 17, 19, 23]: - if m in [2, 5, 11, 17, 23]: - test_data += ss.sph_harm( - m, - n, - cls.theta_np, - cls.phi_np, - ).imag - test_results[-1] = 1j - else: - test_data += ss.sph_harm( - m, - n, - cls.theta_np, - cls.phi_np, - ).real - test_results[-1] = 1 + test_data = np.zeros(theta_np.shape) + test_results = [] + test_harmonics = [] + for n in range(max_harm + 1): + for m in range(n + 1): + test_harmonics.append([m, n]) + test_results.append(0) + if n in [0, 2, 3, 5, 7, 11, 13, 17, 19, 23 + ] and m in [0, 2, 3, 5, 7, 11, 13, 17, 19, 23]: + if m in [2, 5, 11, 17, 23]: + test_data += ss.sph_harm( + m, + n, + theta_np, + phi_np, + ).imag + test_results[-1] = 1j + else: + test_data += ss.sph_harm( + m, + n, + theta_np, + phi_np, + ).real + test_results[-1] = 1 - cls.test_harmonics_np = np.array(test_harmonics) - cls.test_harmonics_xr = xr.DataArray( - cls.test_harmonics_np, - dims=['har', 'm,n'], - ).compute() - cls.test_data_np = test_data - cls.test_data_xr = xr.DataArray( - cls.test_data_np, - dims=['lat', 'lon'], - ).compute() - cls.test_results_np = np.array(test_results) - cls.test_results_xr = xr.DataArray( - cls.test_results_np, - dims=['har'], - ).compute() + test_harmonics_np = np.array(test_harmonics) + test_harmonics_xr = xr.DataArray( + test_harmonics_np, + dims=['har', 'm,n'], + ).compute() + test_data_np = test_data + test_data_xr = xr.DataArray( + test_data_np, + dims=['lat', 'lon'], + ).compute() + test_results_np = np.array(test_results) + test_results_xr = xr.DataArray( + test_results_np, + dims=['har'], + ).compute() - def test_decomposition_np(self): + def test_decomposition_np(self) -> None: results_np = decomposition( self.test_data_np, self.test_scale_np, @@ -87,7 +85,7 @@ def test_decomposition_np(self): decimal=2, ) - def test_decomposition_xr(self): + def test_decomposition_xr(self) -> None: results_xr = decomposition( self.test_data_xr, self.test_scale_xr, @@ -100,7 +98,7 @@ def test_decomposition_xr(self): decimal=2, ) - def test_recomposition_np(self): + def test_recomposition_np(self) -> None: data_np = recomposition( self.test_results_np, self.theta_np, @@ -111,7 +109,7 @@ def test_recomposition_np(self): self.test_data_np, ) - def test_recomposition_xr(self): + def test_recomposition_xr(self) -> None: data_xr = recomposition( self.test_results_xr, self.theta_xr, @@ -122,7 +120,7 @@ def test_recomposition_xr(self): self.test_data_xr.to_numpy(), ) - def test_scale_voronoi_np(self): + def test_scale_voronoi_np(self) -> None: scale_np = scale_voronoi( self.theta_np, self.phi_np, @@ -132,7 +130,7 @@ def test_scale_voronoi_np(self): self.test_scale_np / np.sum(self.test_scale_np, axis=(0, 1)), ) - def test_scale_voronoi_xr(self): + def test_scale_voronoi_xr(self) -> None: scale_xr = scale_voronoi( self.theta_xr, self.phi_xr, diff --git a/test/test_stats.py b/test/test_stats.py index 61d61fc35..10929a94e 100644 --- a/test/test_stats.py +++ b/test/test_stats.py @@ -1,8 +1,8 @@ -from unittest import TestCase import sys from abc import ABCMeta import numpy as np import xarray as xr +import pytest from geocat.comp.stats import eofunc, eofunc_eofs, eofunc_pcs, eofunc_ts, pearson_r @@ -47,11 +47,6 @@ class BaseEOFTestClass(metaclass=ABCMeta): # _sample_data[ 4 ] _sample_data_eof.append(np.arange(64, dtype='int64').reshape((4, 4, 4))) - try: - _nc_ds = xr.open_dataset("eofunc_dataset.nc") - except: - _nc_ds = xr.open_dataset("test/eofunc_dataset.nc") - _num_attrs = 4 expected_output = np.full((1, 4, 4), 0.25) @@ -60,9 +55,9 @@ class BaseEOFTestClass(metaclass=ABCMeta): expected_eigen_val_time_dim_0 = 6826.66667 -class Test_eof(TestCase, BaseEOFTestClass): +class Test_eof(BaseEOFTestClass): - def test_eof_00(self): + def test_eof_00(self) -> None: data = self._sample_data_eof[0] results = eofunc_eofs(data, neofs=1, time_dim=2) @@ -78,7 +73,7 @@ def test_eof_00(self): np.testing.assert_almost_equal(self.expected_eigen_val_time_dim_2, attrs['eigenvalues'].values[0], 5) - def test_eof_deprecated(self): + def test_eof_deprecated(self) -> None: data = self._sample_data_eof[0] results = eofunc(data, neval=1) @@ -94,7 +89,7 @@ def test_eof_deprecated(self): np.testing.assert_almost_equal(self.expected_eigen_val_time_dim_2, attrs['eigenvalues'].values[0], 5) - def test_eof_01(self): + def test_eof_01(self) -> None: data = self._sample_data_eof[1] results = eofunc_eofs(data, neofs=1, time_dim=2) @@ -110,7 +105,7 @@ def test_eof_01(self): np.testing.assert_almost_equal(self.expected_eigen_val_time_dim_2, attrs['eigenvalues'].values[0], 5) - def test_eof_02(self): + def test_eof_02(self) -> None: data = self._sample_data_eof[1] results = eofunc_eofs(data, neofs=1, time_dim=2) @@ -126,7 +121,7 @@ def test_eof_02(self): np.testing.assert_almost_equal(self.expected_eigen_val_time_dim_2, attrs['eigenvalues'].values[0], 5) - def test_eof_14(self): + def test_eof_14(self) -> None: data = self._sample_data_eof[4] results = eofunc_eofs(data, neofs=1, time_dim=2) @@ -142,7 +137,7 @@ def test_eof_14(self): np.testing.assert_almost_equal(self.expected_eigen_val_time_dim_2, attrs['eigenvalues'].values[0], 5) - def test_eof_15(self): + def test_eof_15(self) -> None: data = np.asarray(self._sample_data_eof[0]) data = np.transpose(data, axes=(2, 1, 0)) @@ -174,7 +169,7 @@ def test_eof_15(self): np.testing.assert_equal(False, ("prop2" in attrs)) # TODO: Maybe revisited to add time_dim support for Xarray in addition to numpy inputs - # def test_eof_15_time_dim(self): + # def test_eof_15_time_dim(self) -> None: # # data = np.asarray(self._sample_data_eof[0]) # @@ -207,7 +202,7 @@ def test_eof_15(self): # self.assertFalse("prop1" in attrs) # self.assertFalse("prop2" in attrs) - def test_eof_16(self): + def test_eof_16(self) -> None: data = np.asarray(self._sample_data_eof[0]) data = np.transpose(data, axes=(2, 1, 0)) @@ -239,7 +234,7 @@ def test_eof_16(self): np.testing.assert_equal("prop1", attrs["prop1"]) np.testing.assert_equal(2, attrs["prop2"]) - def test_eof_n_01(self): + def test_eof_n_01(self) -> None: data = self._sample_data_eof[1] results = eofunc_eofs(data, neofs=1, time_dim=1) @@ -255,7 +250,7 @@ def test_eof_n_01(self): np.testing.assert_almost_equal(self.expected_eigen_val_time_dim_1, attrs['eigenvalues'].values[0], 5) - def test_eof_n_03(self): + def test_eof_n_03(self) -> None: data = self._sample_data_eof[1] results = eofunc_eofs(data, 1, time_dim=0) @@ -271,7 +266,7 @@ def test_eof_n_03(self): np.testing.assert_almost_equal(self.expected_eigen_val_time_dim_0, attrs['eigenvalues'].values[0], 5) - def test_eof_n_03_1(self): + def test_eof_n_03_1(self) -> None: data = self._sample_data_eof[1] results = eofunc_eofs(data, 1, time_dim=0) @@ -288,12 +283,19 @@ def test_eof_n_03_1(self): attrs['eigenvalues'].values[0], 5) -class Test_eof_ts(TestCase, BaseEOFTestClass): +class Test_eof_ts(BaseEOFTestClass): + + @pytest.fixture(scope="class") + def _nc_ds(self): + try: + return xr.open_dataset("eofunc_dataset.nc") + except: + return xr.open_dataset("test/eofunc_dataset.nc") - def test_01(self): - sst = self._nc_ds.sst - evec = self._nc_ds.evec - expected_tsout = self._nc_ds.tsout + def test_01(self, _nc_ds) -> None: + sst = _nc_ds.sst + evec = _nc_ds.evec + expected_tsout = _nc_ds.tsout actual_tsout = eofunc_pcs(sst, npcs=5) @@ -302,10 +304,10 @@ def test_01(self): np.testing.assert_array_almost_equal(actual_tsout, expected_tsout.data, 3) - def test_01_deprecated(self): - sst = self._nc_ds.sst - evec = self._nc_ds.evec - expected_tsout = self._nc_ds.tsout + def test_01_deprecated(self, _nc_ds) -> None: + sst = _nc_ds.sst + evec = _nc_ds.evec + expected_tsout = _nc_ds.tsout actual_tsout = eofunc_ts(sst, evec, time_dim=0) @@ -314,10 +316,10 @@ def test_01_deprecated(self): np.testing.assert_array_almost_equal(actual_tsout, expected_tsout.data, 3) - def test_02(self): - sst = self._nc_ds.sst - evec = self._nc_ds.evec - expected_tsout = self._nc_ds.tsout + def test_02(self, _nc_ds) -> None: + sst = _nc_ds.sst + evec = _nc_ds.evec + expected_tsout = _nc_ds.tsout actual_tsout = eofunc_pcs(sst, npcs=5, meta=True) @@ -330,70 +332,69 @@ def test_02(self): sst.coords["time"].data) -class Test_pearson_r(TestCase): - - @classmethod - def setUpClass(cls): - # Coordinates - times = xr.cftime_range(start='2022-08-01', end='2022-08-05', freq='D') - lats = np.linspace(start=-45, stop=45, num=3, dtype='float32') - lons = np.linspace(start=-180, stop=180, num=4, dtype='float32') - - # Create data variables - x, y, z = np.meshgrid(lons, lats, times) - np.random.seed(0) - cls.a = np.random.random_sample((len(lats), len(lons), len(times))) - cls.b = np.power(cls.a, 2) - cls.weights = np.cos(np.deg2rad(y)) - cls.ds = xr.Dataset(data_vars={ - 'a': (('lat', 'lon', 'time'), cls.a), - 'b': (('lat', 'lon', 'time'), cls.b), - 'weights': (('lat', 'lon', 'time'), cls.weights) - }, - coords={ - 'lat': lats, - 'lon': lons, - 'time': times - }, - attrs={'description': 'Test data'}) - - cls.unweighted_r = 0.963472086 - cls.unweighted_r_skipnan = 0.96383798 - cls.weighted_r = 0.963209755 - cls.weighted_r_lat = [ - [0.995454445, 0.998450821, 0.99863877, 0.978765291, 0.982350092], - [0.99999275, 0.995778831, 0.998994355, 0.991634937, 0.999868279], - [0.991344899, 0.998632079, 0.99801552, 0.968517489, 0.985215828], - [0.997034735, 0.99834464, 0.987382522, 0.99646236, 0.989222738] - ] +class Test_pearson_r: + + # Coordinates + times = xr.cftime_range(start='2022-08-01', end='2022-08-05', freq='D') + lats = np.linspace(start=-45, stop=45, num=3, dtype='float32') + lons = np.linspace(start=-180, stop=180, num=4, dtype='float32') + + # Create data variables + x, y, z = np.meshgrid(lons, lats, times) + np.random.seed(0) + a = np.random.random_sample((len(lats), len(lons), len(times))) + b = np.power(a, 2) + weights = np.cos(np.deg2rad(y)) + ds = xr.Dataset(data_vars={ + 'a': (('lat', 'lon', 'time'), a), + 'b': (('lat', 'lon', 'time'), b), + 'weights': (('lat', 'lon', 'time'), weights) + }, + coords={ + 'lat': lats, + 'lon': lons, + 'time': times + }, + attrs={'description': 'Test data'}) + + unweighted_r = 0.963472086 + unweighted_r_skipnan = 0.96383798 + weighted_r = 0.963209755 + weighted_r_lat = [ + [0.995454445, 0.998450821, 0.99863877, 0.978765291, 0.982350092], + [0.99999275, 0.995778831, 0.998994355, 0.991634937, 0.999868279], + [0.991344899, 0.998632079, 0.99801552, 0.968517489, 0.985215828], + [0.997034735, 0.99834464, 0.987382522, 0.99646236, 0.989222738] + ] # Testing numpy inputs - def test_np_inputs(self): + def test_np_inputs(self) -> None: a = self.a b = self.b result = pearson_r(a, b) assert np.allclose(self.unweighted_r, result) - def test_np_inputs_weighted(self): + def test_np_inputs_weighted(self) -> None: a = self.a b = self.b w = self.weights result = pearson_r(a, b, weights=w) assert np.allclose(self.weighted_r, result) - def test_np_inputs_warn(self): + def test_np_inputs_warn(self) -> None: a = self.a b = self.b - self.assertWarns(Warning, pearson_r, a, b, dim='lat', axis=0) + with pytest.warns(UserWarning): + pearson_r(a, b, dim='lat', axis=0) - def test_np_inputs_across_lats(self): + def test_np_inputs_across_lats(self) -> None: a = self.a b = self.b w = self.weights result = pearson_r(a, b, weights=w, axis=0) assert np.allclose(self.weighted_r_lat, result) - def test_np_inputs_skipna(self): + def test_np_inputs_skipna(self) -> None: # deep copy to prevent adding nans to the test data for other tests a = self.a.copy() a[0] = np.nan @@ -402,32 +403,33 @@ def test_np_inputs_skipna(self): assert np.allclose(self.unweighted_r_skipnan, result) # Testing xarray inputs - def test_xr_inputs(self): + def test_xr_inputs(self) -> None: a = self.ds.a b = self.ds.b result = pearson_r(a, b) assert np.allclose(self.unweighted_r, result) - def test_xr_inputs_weighted(self): + def test_xr_inputs_weighted(self) -> None: a = self.ds.a b = self.ds.b w = self.ds.weights result = pearson_r(a, b, weights=w) assert np.allclose(self.weighted_r, result) - def test_xr_inputs_warn(self): + def test_xr_inputs_warn(self) -> None: a = self.ds.a b = self.ds.b - self.assertWarns(Warning, pearson_r, a, b, dim='lat', axis=0) + with pytest.warns(UserWarning): + pearson_r(a, b, dim='lat', axis=0) - def test_xr_inputs_across_lats(self): + def test_xr_inputs_across_lats(self) -> None: a = self.ds.a b = self.ds.b w = self.ds.weights[:, 0, 0] result = pearson_r(a, b, weights=w, dim='lat') assert np.allclose(self.weighted_r_lat, result) - def test_xr_inputs_skipna(self): + def test_xr_inputs_skipna(self) -> None: # deep copy to prevent adding nans to the test data for other tests a = self.ds.a.copy(deep=True) a[0] = np.nan @@ -435,7 +437,7 @@ def test_xr_inputs_skipna(self): result = pearson_r(a, b, skipna=True) assert np.allclose(self.unweighted_r_skipnan, result) - def test_keep_attrs(self): + def test_keep_attrs(self) -> None: a = self.ds.a b = self.ds.b a.attrs.update({'Description': 'Test Data'})