diff --git a/doc/whats-new.rst b/doc/whats-new.rst index c408306ffdb..411c6c05cd2 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -47,6 +47,8 @@ Enhancements Bug fixes ~~~~~~~~~ +- Silenced warnings that appear when using pandas 0.24. + By `Stephan Hoyer `_ - Interpolating via resample now internally specifies ``bounds_error=False`` as an argument to ``scipy.interpolate.interp1d``, allowing for interpolation from higher frequencies to lower frequencies. Datapoints outside the bounds diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index 19e2d009e44..9347ba6b6db 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -1,4 +1,5 @@ -from collections import Mapping, OrderedDict +import collections.abc +from collections import OrderedDict from contextlib import contextmanager import pandas as pd @@ -14,7 +15,7 @@ _THIS_ARRAY = ReprObject('') -class AbstractCoordinates(Mapping): +class AbstractCoordinates(collections.abc.Mapping): def __getitem__(self, key): raise NotImplementedError diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 711dfcdce71..0d1b9ebd55b 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -130,7 +130,7 @@ def merge_indexes( if isinstance(var_names, str): var_names = [var_names] - names, labels, levels = [], [], [] # type: (list, list, list) + names, codes, levels = [], [], [] # type: (list, list, list) current_index_variable = variables.get(dim) for n in var_names: @@ -144,13 +144,18 @@ def merge_indexes( if current_index_variable is not None and append: current_index = current_index_variable.to_index() if isinstance(current_index, pd.MultiIndex): + try: + current_codes = current_index.codes + except AttributeError: + # fpr pandas<0.24 + current_codes = current_index.labels names.extend(current_index.names) - labels.extend(current_index.labels) + codes.extend(current_codes) levels.extend(current_index.levels) else: names.append('%s_level_0' % dim) cat = pd.Categorical(current_index.values, ordered=True) - labels.append(cat.codes) + codes.append(cat.codes) levels.append(cat.categories) if not len(names) and len(var_names) == 1: @@ -161,10 +166,10 @@ def merge_indexes( names.append(n) var = variables[n] cat = pd.Categorical(var.values, ordered=True) - labels.append(cat.codes) + codes.append(cat.codes) levels.append(cat.categories) - idx = pd.MultiIndex(labels=labels, levels=levels, names=names) + idx = pd.MultiIndex(levels, codes, names=names) vars_to_replace[dim] = IndexVariable(dim, idx) vars_to_remove.extend(var_names) diff --git a/xarray/tests/test_coding_times.py b/xarray/tests/test_coding_times.py index 24234d3b6b5..863c0378835 100644 --- a/xarray/tests/test_coding_times.py +++ b/xarray/tests/test_coding_times.py @@ -533,17 +533,22 @@ def test_infer_cftime_datetime_units(calendar, date_args, expected): @pytest.mark.parametrize( ['timedeltas', 'units', 'numbers'], - [('1D', 'days', np.int64(1)), - (['1D', '2D', '3D'], 'days', np.array([1, 2, 3], 'int64')), - ('1h', 'hours', np.int64(1)), - ('1ms', 'milliseconds', np.int64(1)), - ('1us', 'microseconds', np.int64(1)), - (['NaT', '0s', '1s'], None, [np.nan, 0, 1]), - (['30m', '60m'], 'hours', [0.5, 1.0]), - (np.timedelta64('NaT', 'ns'), 'days', np.nan), - (['NaT', 'NaT'], 'days', [np.nan, np.nan])]) + [ + ('1D', 'days', np.int64(1)), + (['1D', '2D', '3D'], 'days', np.array([1, 2, 3], 'int64')), + ('1h', 'hours', np.int64(1)), + ('1ms', 'milliseconds', np.int64(1)), + ('1us', 'microseconds', np.int64(1)), + (['NaT', '0s', '1s'], None, [np.nan, 0, 1]), + (['30m', '60m'], 'hours', [0.5, 1.0]), + ('NaT', 'days', np.nan), + (['NaT', 'NaT'], 'days', [np.nan, np.nan]), + ]) def test_cf_timedelta(timedeltas, units, numbers): - timedeltas = pd.to_timedelta(timedeltas, box=False) + if timedeltas == 'NaT': + timedeltas = np.timedelta64('NaT', 'ns') + else: + timedeltas = pd.to_timedelta(timedeltas, box=False) numbers = np.array(numbers) expected = numbers diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index 23e15aeff24..59d14d7cdac 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -122,21 +122,14 @@ def test_struct_array_dims(self): """ # GH837, GH861 # checking array subraction when dims are the same - p_data = np.array([('John', 180), ('Stacy', 150), ('Dick', 200)], + # note: names need to be in sorted order to align consistently with + # pandas < 0.24 and >= 0.24. + p_data = np.array([('Abe', 180), ('Stacy', 150), ('Dick', 200)], dtype=[('name', '|S256'), ('height', object)]) - - p_data_1 = np.array([('John', 180), ('Stacy', 150), ('Dick', 200)], - dtype=[('name', '|S256'), ('height', object)]) - - p_data_2 = np.array([('John', 180), ('Dick', 200)], - dtype=[('name', '|S256'), ('height', object)]) - weights_0 = DataArray([80, 56, 120], dims=['participant'], coords={'participant': p_data}) - weights_1 = DataArray([81, 52, 115], dims=['participant'], - coords={'participant': p_data_1}) - + coords={'participant': p_data}) actual = weights_1 - weights_0 expected = DataArray([1, -4, -5], dims=['participant'], @@ -145,31 +138,27 @@ def test_struct_array_dims(self): assert_identical(actual, expected) # checking array subraction when dims are not the same - p_data_1 = np.array([('John', 180), ('Stacy', 151), ('Dick', 200)], - dtype=[('name', '|S256'), ('height', object)]) - + p_data_alt = np.array([('Abe', 180), ('Stacy', 151), ('Dick', 200)], + dtype=[('name', '|S256'), ('height', object)]) weights_1 = DataArray([81, 52, 115], dims=['participant'], - coords={'participant': p_data_1}) - + coords={'participant': p_data_alt}) actual = weights_1 - weights_0 expected = DataArray([1, -5], dims=['participant'], - coords={'participant': p_data_2}) + coords={'participant': p_data[[0, 2]]}) assert_identical(actual, expected) # checking array subraction when dims are not the same and one # is np.nan - p_data_1 = np.array([('John', 180), ('Stacy', np.nan), ('Dick', 200)], - dtype=[('name', '|S256'), ('height', object)]) - + p_data_nan = np.array([('Abe', 180), ('Stacy', np.nan), ('Dick', 200)], + dtype=[('name', '|S256'), ('height', object)]) weights_1 = DataArray([81, 52, 115], dims=['participant'], - coords={'participant': p_data_1}) - + coords={'participant': p_data_nan}) actual = weights_1 - weights_0 expected = DataArray([1, -5], dims=['participant'], - coords={'participant': p_data_2}) + coords={'participant': p_data[[0, 2]]}) assert_identical(actual, expected) diff --git a/xarray/tests/test_utils.py b/xarray/tests/test_utils.py index 5a5041b5449..09152bac284 100644 --- a/xarray/tests/test_utils.py +++ b/xarray/tests/test_utils.py @@ -74,7 +74,9 @@ def test_multiindex_from_product_levels(): result = utils.multiindex_from_product_levels( [pd.Index(['b', 'a']), pd.Index([1, 3, 2])]) np.testing.assert_array_equal( - result.labels, [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]) + # compat for pandas < 0.24 + result.codes if hasattr(result, 'codes') else result.labels, + [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]) np.testing.assert_array_equal(result.levels[0], ['b', 'a']) np.testing.assert_array_equal(result.levels[1], [1, 3, 2]) @@ -86,7 +88,9 @@ def test_multiindex_from_product_levels_non_unique(): result = utils.multiindex_from_product_levels( [pd.Index(['b', 'a']), pd.Index([1, 1, 2])]) np.testing.assert_array_equal( - result.labels, [[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1]]) + # compat for pandas < 0.24 + result.codes if hasattr(result, 'codes') else result.labels, + [[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1]]) np.testing.assert_array_equal(result.levels[0], ['b', 'a']) np.testing.assert_array_equal(result.levels[1], [1, 2])