From 56c20195ab2a96c752d12e7aaa75912019de369e Mon Sep 17 00:00:00 2001 From: Tong SHEN Date: Sun, 9 Apr 2017 22:40:12 +0800 Subject: [PATCH 01/56] DOC: Fix a comment typo in pandas/tools/concat.py (#15956) --- pandas/tools/concat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/tools/concat.py b/pandas/tools/concat.py index 6405106118472..5df9a5abb78b2 100644 --- a/pandas/tools/concat.py +++ b/pandas/tools/concat.py @@ -278,7 +278,7 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None, break else: - # filter out the empties if we have not multi-index possibiltes + # filter out the empties if we have not multi-index possibilities # note to keep empty Series as it affect to result columns / name non_empties = [obj for obj in objs if sum(obj.shape) > 0 or isinstance(obj, Series)] From c3c60f0d7a782cd429e3d7115a99cdc068a6d528 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Sun, 9 Apr 2017 11:28:51 -0400 Subject: [PATCH 02/56] DOC/API/TST: add pd.unique doc-string & consistent return value for Categorical/tz-aware datetime (#15939) closes #9346 --- doc/source/whatsnew/v0.20.0.txt | 71 +++++++++++ pandas/core/algorithms.py | 89 ++++++++++++- pandas/core/base.py | 20 ++- pandas/core/categorical.py | 27 ++++ pandas/core/series.py | 10 +- pandas/tests/test_algos.py | 220 ++++++++++++++++++++++++-------- 6 files changed, 371 insertions(+), 66 deletions(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 7664688ffa4f4..4c0594c024774 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -593,6 +593,76 @@ result. On the other hand, this might have backward incompatibilities: e.g. compared to numpy arrays, ``Index`` objects are not mutable. To get the original ndarray, you can always convert explicitly using ``np.asarray(idx.hour)``. +.. _whatsnew_0200.api_breaking.unique: + +pd.unique will now be consistent with extension types +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In prior versions, using ``Series.unique()`` and ``pd.unique(Series)`` on ``Categorical`` and tz-aware +datatypes would yield different return types. These are now made consistent. (:issue:`15903`) + +- Datetime tz-aware + + Previous behaviour: + + .. code-block:: ipython + + # Series + In [5]: pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), + pd.Timestamp('20160101', tz='US/Eastern')]).unique() + Out[5]: array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) + + In [6]: pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), + pd.Timestamp('20160101', tz='US/Eastern')])) + Out[6]: array(['2016-01-01T05:00:00.000000000'], dtype='datetime64[ns]') + + # Index + In [7]: pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), + pd.Timestamp('20160101', tz='US/Eastern')]).unique() + Out[7]: DatetimeIndex(['2016-01-01 00:00:00-05:00'], dtype='datetime64[ns, US/Eastern]', freq=None) + + In [8]: pd.unique([pd.Timestamp('20160101', tz='US/Eastern'), + pd.Timestamp('20160101', tz='US/Eastern')]) + Out[8]: array(['2016-01-01T05:00:00.000000000'], dtype='datetime64[ns]') + + New Behavior: + + .. ipython:: python + + # Series, returns an array of Timestamp tz-aware + pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), + pd.Timestamp('20160101', tz='US/Eastern')]).unique() + pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), + pd.Timestamp('20160101', tz='US/Eastern')])) + + # Index, returns a DatetimeIndex + pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), + pd.Timestamp('20160101', tz='US/Eastern')]).unique() + pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), + pd.Timestamp('20160101', tz='US/Eastern')])) + +- Categoricals + + Previous behaviour: + + .. code-block:: ipython + + In [1]: pd.Series(pd.Categorical(list('baabc'))).unique() + Out[1]: + [b, a, c] + Categories (3, object): [b, a, c] + + In [2]: pd.unique(pd.Series(pd.Categorical(list('baabc')))) + Out[2]: array(['b', 'a', 'c'], dtype=object) + + New Behavior: + + .. ipython:: python + + # returns a Categorical + pd.Series(pd.Categorical(list('baabc'))).unique() + pd.unique(pd.Series(pd.Categorical(list('baabc'))).unique()) + .. _whatsnew_0200.api_breaking.s3: S3 File Handling @@ -1148,6 +1218,7 @@ Conversion - Bug in ``DataFrame`` construction with nulls and datetimes in a list-like (:issue:`15869`) - Bug in ``DataFrame.fillna()`` with tz-aware datetimes (:issue:`15855`) - Bug in ``is_string_dtype``, ``is_timedelta64_ns_dtype``, and ``is_string_like_dtype`` in which an error was raised when ``None`` was passed in (:issue:`15941`) +- Bug in the return type of ``pd.unique`` on a ``Categorical``, which was returning an ndarray and not a ``Categorical`` (:issue:`15903`) Indexing ^^^^^^^^ diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 9b88ea23483bd..654e38e43b6c0 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -267,11 +267,85 @@ def match(to_match, values, na_sentinel=-1): return result -def unique1d(values): +def unique(values): """ - Hash table-based unique + Hash table-based unique. Uniques are returned in order + of appearance. This does NOT sort. + + Significantly faster than numpy.unique. Includes NA values. + + Parameters + ---------- + values : 1d array-like + + Returns + ------- + unique values. + - If the input is an Index, the return is an Index + - If the input is a Categorical dtype, the return is a Categorical + - If the input is a Series/ndarray, the return will be an ndarray + + Examples + -------- + pd.unique(pd.Series([2, 1, 3, 3])) + array([2, 1, 3]) + + >>> pd.unique(pd.Series([2] + [1] * 5)) + array([2, 1]) + + >>> pd.unique(Series([pd.Timestamp('20160101'), + ... pd.Timestamp('20160101')])) + array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') + + >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), + ... pd.Timestamp('20160101', tz='US/Eastern')])) + array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], + dtype=object) + + >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), + ... pd.Timestamp('20160101', tz='US/Eastern')])) + DatetimeIndex(['2016-01-01 00:00:00-05:00'], + ... dtype='datetime64[ns, US/Eastern]', freq=None) + + >>> pd.unique(list('baabc')) + array(['b', 'a', 'c'], dtype=object) + + An unordered Categorical will return categories in the + order of appearance. + + >>> pd.unique(Series(pd.Categorical(list('baabc')))) + [b, a, c] + Categories (3, object): [b, a, c] + + >>> pd.unique(Series(pd.Categorical(list('baabc'), + ... categories=list('abc')))) + [b, a, c] + Categories (3, object): [b, a, c] + + An ordered Categorical preserves the category ordering. + + >>> pd.unique(Series(pd.Categorical(list('baabc'), + ... categories=list('abc'), + ... ordered=True))) + [b, a, c] + Categories (3, object): [a < b < c] + + See Also + -------- + pandas.Index.unique + pandas.Series.unique + """ + values = _ensure_arraylike(values) + + # categorical is a fast-path + # this will coerce Categorical, CategoricalIndex, + # and category dtypes Series to same return of Category + if is_categorical_dtype(values): + values = getattr(values, '.values', values) + return values.unique() + original = values htable, _, values, dtype, ndtype = _get_hashtable_algo(values) @@ -279,10 +353,17 @@ def unique1d(values): uniques = table.unique(values) uniques = _reconstruct_data(uniques, dtype, original) + if isinstance(original, ABCSeries) and is_datetime64tz_dtype(dtype): + # we are special casing datetime64tz_dtype + # to return an object array of tz-aware Timestamps + + # TODO: it must return DatetimeArray with tz in pandas 2.0 + uniques = uniques.asobject.values + return uniques -unique = unique1d +unique1d = unique def isin(comps, values): @@ -651,7 +732,7 @@ def mode(values): if is_categorical_dtype(values): if isinstance(values, Series): - return Series(values.values.mode()) + return Series(values.values.mode(), name=values.name) return values.mode() values, dtype, ndtype = _ensure_data(values) diff --git a/pandas/core/base.py b/pandas/core/base.py index 3401c7c59cb56..56bdeee6982d5 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -855,13 +855,24 @@ def value_counts(self, normalize=False, sort=True, ascending=False, _shared_docs['unique'] = ( """ - Return %(unique)s of unique values in the object. - Significantly faster than numpy.unique. Includes NA values. - The order of the original is preserved. + Return unique values in the object. Uniques are returned in order + of appearance, this does NOT sort. Hash table-based unique. + + Parameters + ---------- + values : 1d array-like Returns ------- - uniques : %(unique)s + unique values. + - If the input is an Index, the return is an Index + - If the input is a Categorical dtype, the return is a Categorical + - If the input is a Series/ndarray, the return will be an ndarray + + See Also + -------- + pandas.unique + pandas.Categorical.unique """) @Appender(_shared_docs['unique'] % _indexops_doc_kwargs) @@ -873,6 +884,7 @@ def unique(self): else: from pandas.core.algorithms import unique1d result = unique1d(values) + return result def nunique(self, dropna=True): diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 0fcf8664e755d..e3d6792604c4c 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1895,6 +1895,33 @@ def unique(self): Returns ------- unique values : ``Categorical`` + + Examples + -------- + An unordered Categorical will return categories in the + order of appearance. + + >>> pd.Categorical(list('baabc')) + [b, a, c] + Categories (3, object): [b, a, c] + + >>> pd.Categorical(list('baabc'), categories=list('abc')) + [b, a, c] + Categories (3, object): [b, a, c] + + An ordered Categorical preserves the category ordering. + + >>> pd.Categorical(list('baabc'), + ... categories=list('abc'), + ... ordered=True) + [b, a, c] + Categories (3, object): [a < b < c] + + See Also + -------- + pandas.unique + pandas.CategoricalIndex.unique + """ # unlike np.unique, unique1d does not sort diff --git a/pandas/core/series.py b/pandas/core/series.py index 760abc20351cf..5ee3ca73742ae 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1204,10 +1204,14 @@ def mode(self): @Appender(base._shared_docs['unique'] % _shared_doc_kwargs) def unique(self): result = super(Series, self).unique() + if is_datetime64tz_dtype(self.dtype): - # to return array of Timestamp with tz - # ToDo: it must return DatetimeArray with tz in pandas 2.0 - return result.asobject.values + # we are special casing datetime64tz_dtype + # to return an object array of tz-aware Timestamps + + # TODO: it must return DatetimeArray with tz in pandas 2.0 + result = result.asobject.values + return result @Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index d893183dae0ed..d9f81968c684d 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -6,7 +6,8 @@ from numpy import nan from datetime import datetime from itertools import permutations -from pandas import Series, Categorical, CategoricalIndex, Index +from pandas import (Series, Categorical, CategoricalIndex, Index, + Timestamp, DatetimeIndex) import pandas as pd from pandas import compat @@ -34,7 +35,7 @@ def test_ints(self): expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0])) tm.assert_series_equal(result, expected) - s = pd.Series(np.arange(5), dtype=np.float32) + s = Series(np.arange(5), dtype=np.float32) result = algos.match(s, [2, 4]) expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64) self.assert_numpy_array_equal(result, expected) @@ -204,20 +205,20 @@ def test_mixed(self): def test_datelike(self): # M8 - v1 = pd.Timestamp('20130101 09:00:00.00004') - v2 = pd.Timestamp('20130101') + v1 = Timestamp('20130101 09:00:00.00004') + v2 = Timestamp('20130101') x = Series([v1, v1, v1, v2, v2, v1]) labels, uniques = algos.factorize(x) exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp) self.assert_numpy_array_equal(labels, exp) - exp = pd.DatetimeIndex([v1, v2]) + exp = DatetimeIndex([v1, v2]) self.assert_index_equal(uniques, exp) labels, uniques = algos.factorize(x, sort=True) exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp) self.assert_numpy_array_equal(labels, exp) - exp = pd.DatetimeIndex([v2, v1]) + exp = DatetimeIndex([v2, v1]) self.assert_index_equal(uniques, exp) # period @@ -350,7 +351,7 @@ def test_datetime64_dtype_array_returned(self): tm.assert_numpy_array_equal(result, expected) self.assertEqual(result.dtype, expected.dtype) - s = pd.Series(dt_index) + s = Series(dt_index) result = algos.unique(s) tm.assert_numpy_array_equal(result, expected) self.assertEqual(result.dtype, expected.dtype) @@ -369,7 +370,7 @@ def test_timedelta64_dtype_array_returned(self): tm.assert_numpy_array_equal(result, expected) self.assertEqual(result.dtype, expected.dtype) - s = pd.Series(td_index) + s = Series(td_index) result = algos.unique(s) tm.assert_numpy_array_equal(result, expected) self.assertEqual(result.dtype, expected.dtype) @@ -380,10 +381,119 @@ def test_timedelta64_dtype_array_returned(self): self.assertEqual(result.dtype, expected.dtype) def test_uint64_overflow(self): - s = pd.Series([1, 2, 2**63, 2**63], dtype=np.uint64) + s = Series([1, 2, 2**63, 2**63], dtype=np.uint64) exp = np.array([1, 2, 2**63], dtype=np.uint64) tm.assert_numpy_array_equal(algos.unique(s), exp) + def test_categorical(self): + + # we are expecting to return in the order + # of appearance + expected = pd.Categorical(list('bac'), + categories=list('bac')) + + # we are expecting to return in the order + # of the categories + expected_o = pd.Categorical(list('bac'), + categories=list('abc'), + ordered=True) + + # GH 15939 + c = pd.Categorical(list('baabc')) + result = c.unique() + tm.assert_categorical_equal(result, expected) + + result = algos.unique(c) + tm.assert_categorical_equal(result, expected) + + c = pd.Categorical(list('baabc'), ordered=True) + result = c.unique() + tm.assert_categorical_equal(result, expected_o) + + result = algos.unique(c) + tm.assert_categorical_equal(result, expected_o) + + # Series of categorical dtype + s = Series(pd.Categorical(list('baabc')), name='foo') + result = s.unique() + tm.assert_categorical_equal(result, expected) + + result = pd.unique(s) + tm.assert_categorical_equal(result, expected) + + # CI -> return CI + ci = pd.CategoricalIndex(pd.Categorical(list('baabc'), + categories=list('bac'))) + expected = pd.CategoricalIndex(expected) + result = ci.unique() + tm.assert_index_equal(result, expected) + + result = pd.unique(ci) + tm.assert_index_equal(result, expected) + + def test_datetime64tz_aware(self): + # GH 15939 + + result = Series( + pd.Index([Timestamp('20160101', tz='US/Eastern'), + Timestamp('20160101', tz='US/Eastern')])).unique() + expected = np.array([Timestamp('2016-01-01 00:00:00-0500', + tz='US/Eastern')], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = pd.Index([Timestamp('20160101', tz='US/Eastern'), + Timestamp('20160101', tz='US/Eastern')]).unique() + expected = DatetimeIndex(['2016-01-01 00:00:00'], + dtype='datetime64[ns, US/Eastern]', freq=None) + tm.assert_index_equal(result, expected) + + result = pd.unique( + Series(pd.Index([Timestamp('20160101', tz='US/Eastern'), + Timestamp('20160101', tz='US/Eastern')]))) + expected = np.array([Timestamp('2016-01-01 00:00:00-0500', + tz='US/Eastern')], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = pd.unique(pd.Index([Timestamp('20160101', tz='US/Eastern'), + Timestamp('20160101', tz='US/Eastern')])) + expected = DatetimeIndex(['2016-01-01 00:00:00'], + dtype='datetime64[ns, US/Eastern]', freq=None) + tm.assert_index_equal(result, expected) + + def test_order_of_appearance(self): + # 9346 + # light testing of guarantee of order of appearance + # these also are the doc-examples + result = pd.unique(Series([2, 1, 3, 3])) + tm.assert_numpy_array_equal(result, + np.array([2, 1, 3], dtype='int64')) + + result = pd.unique(Series([2] + [1] * 5)) + tm.assert_numpy_array_equal(result, + np.array([2, 1], dtype='int64')) + + result = pd.unique(Series([Timestamp('20160101'), + Timestamp('20160101')])) + expected = np.array(['2016-01-01T00:00:00.000000000'], + dtype='datetime64[ns]') + tm.assert_numpy_array_equal(result, expected) + + result = pd.unique(pd.Index( + [Timestamp('20160101', tz='US/Eastern'), + Timestamp('20160101', tz='US/Eastern')])) + expected = DatetimeIndex(['2016-01-01 00:00:00'], + dtype='datetime64[ns, US/Eastern]', + freq=None) + tm.assert_index_equal(result, expected) + + result = pd.unique(list('aabc')) + expected = np.array(['a', 'b', 'c'], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = pd.unique(Series(pd.Categorical(list('aabc')))) + expected = pd.Categorical(list('abc')) + tm.assert_categorical_equal(result, expected) + class TestIsin(tm.TestCase): @@ -403,15 +513,15 @@ def test_basic(self): expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) - result = algos.isin(pd.Series([1, 2]), [1]) + result = algos.isin(Series([1, 2]), [1]) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) - result = algos.isin(pd.Series([1, 2]), pd.Series([1])) + result = algos.isin(Series([1, 2]), Series([1])) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) - result = algos.isin(pd.Series([1, 2]), set([1])) + result = algos.isin(Series([1, 2]), set([1])) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) @@ -419,11 +529,11 @@ def test_basic(self): expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) - result = algos.isin(pd.Series(['a', 'b']), pd.Series(['a'])) + result = algos.isin(Series(['a', 'b']), Series(['a'])) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) - result = algos.isin(pd.Series(['a', 'b']), set(['a'])) + result = algos.isin(Series(['a', 'b']), set(['a'])) expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) @@ -520,33 +630,33 @@ def test_value_counts_nat(self): self.assertEqual(len(vc), 1) self.assertEqual(len(vc_with_na), 2) - exp_dt = pd.Series({pd.Timestamp('2014-01-01 00:00:00'): 1}) + exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1}) tm.assert_series_equal(algos.value_counts(dt), exp_dt) # TODO same for (timedelta) def test_value_counts_datetime_outofbounds(self): # GH 13663 - s = pd.Series([datetime(3000, 1, 1), datetime(5000, 1, 1), - datetime(5000, 1, 1), datetime(6000, 1, 1), - datetime(3000, 1, 1), datetime(3000, 1, 1)]) + s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1), + datetime(5000, 1, 1), datetime(6000, 1, 1), + datetime(3000, 1, 1), datetime(3000, 1, 1)]) res = s.value_counts() exp_index = pd.Index([datetime(3000, 1, 1), datetime(5000, 1, 1), datetime(6000, 1, 1)], dtype=object) - exp = pd.Series([3, 2, 1], index=exp_index) + exp = Series([3, 2, 1], index=exp_index) tm.assert_series_equal(res, exp) # GH 12424 - res = pd.to_datetime(pd.Series(['2362-01-01', np.nan]), + res = pd.to_datetime(Series(['2362-01-01', np.nan]), errors='ignore') - exp = pd.Series(['2362-01-01', np.nan], dtype=object) + exp = Series(['2362-01-01', np.nan], dtype=object) tm.assert_series_equal(res, exp) def test_categorical(self): s = Series(pd.Categorical(list('aaabbc'))) result = s.value_counts() - expected = pd.Series([3, 2, 1], - index=pd.CategoricalIndex(['a', 'b', 'c'])) + expected = Series([3, 2, 1], + index=pd.CategoricalIndex(['a', 'b', 'c'])) tm.assert_series_equal(result, expected, check_index_type=True) # preserve order? @@ -559,11 +669,11 @@ def test_categorical_nans(self): s = Series(pd.Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan) s.iloc[1] = np.nan result = s.value_counts() - expected = pd.Series([4, 3, 2], index=pd.CategoricalIndex( + expected = Series([4, 3, 2], index=pd.CategoricalIndex( ['a', 'b', 'c'], categories=['a', 'b', 'c'])) tm.assert_series_equal(result, expected, check_index_type=True) result = s.value_counts(dropna=False) - expected = pd.Series([ + expected = Series([ 4, 3, 2, 1 ], index=pd.CategoricalIndex(['a', 'b', 'c', np.nan])) tm.assert_series_equal(result, expected, check_index_type=True) @@ -573,12 +683,12 @@ def test_categorical_nans(self): list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c'])) s.iloc[1] = np.nan result = s.value_counts() - expected = pd.Series([4, 3, 2], index=pd.CategoricalIndex( + expected = Series([4, 3, 2], index=pd.CategoricalIndex( ['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True)) tm.assert_series_equal(result, expected, check_index_type=True) result = s.value_counts(dropna=False) - expected = pd.Series([4, 3, 2, 1], index=pd.CategoricalIndex( + expected = Series([4, 3, 2, 1], index=pd.CategoricalIndex( ['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True)) tm.assert_series_equal(result, expected, check_index_type=True) @@ -595,33 +705,33 @@ def test_dropna(self): # https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328 tm.assert_series_equal( - pd.Series([True, True, False]).value_counts(dropna=True), - pd.Series([2, 1], index=[True, False])) + Series([True, True, False]).value_counts(dropna=True), + Series([2, 1], index=[True, False])) tm.assert_series_equal( - pd.Series([True, True, False]).value_counts(dropna=False), - pd.Series([2, 1], index=[True, False])) + Series([True, True, False]).value_counts(dropna=False), + Series([2, 1], index=[True, False])) tm.assert_series_equal( - pd.Series([True, True, False, None]).value_counts(dropna=True), - pd.Series([2, 1], index=[True, False])) + Series([True, True, False, None]).value_counts(dropna=True), + Series([2, 1], index=[True, False])) tm.assert_series_equal( - pd.Series([True, True, False, None]).value_counts(dropna=False), - pd.Series([2, 1, 1], index=[True, False, np.nan])) + Series([True, True, False, None]).value_counts(dropna=False), + Series([2, 1, 1], index=[True, False, np.nan])) tm.assert_series_equal( - pd.Series([10.3, 5., 5.]).value_counts(dropna=True), - pd.Series([2, 1], index=[5., 10.3])) + Series([10.3, 5., 5.]).value_counts(dropna=True), + Series([2, 1], index=[5., 10.3])) tm.assert_series_equal( - pd.Series([10.3, 5., 5.]).value_counts(dropna=False), - pd.Series([2, 1], index=[5., 10.3])) + Series([10.3, 5., 5.]).value_counts(dropna=False), + Series([2, 1], index=[5., 10.3])) tm.assert_series_equal( - pd.Series([10.3, 5., 5., None]).value_counts(dropna=True), - pd.Series([2, 1], index=[5., 10.3])) + Series([10.3, 5., 5., None]).value_counts(dropna=True), + Series([2, 1], index=[5., 10.3])) # 32-bit linux has a different ordering if not compat.is_platform_32bit(): - result = pd.Series([10.3, 5., 5., None]).value_counts(dropna=False) - expected = pd.Series([2, 1, 1], index=[5., 10.3, np.nan]) + result = Series([10.3, 5., 5., None]).value_counts(dropna=False) + expected = Series([2, 1, 1], index=[5., 10.3, np.nan]) tm.assert_series_equal(result, expected) def test_value_counts_normalized(self): @@ -736,15 +846,15 @@ def test_numeric_object_likes(self): tm.assert_numpy_array_equal(res_false, exp_false) # series - for s in [pd.Series(case), pd.Series(case, dtype='category')]: + for s in [Series(case), Series(case, dtype='category')]: res_first = s.duplicated(keep='first') - tm.assert_series_equal(res_first, pd.Series(exp_first)) + tm.assert_series_equal(res_first, Series(exp_first)) res_last = s.duplicated(keep='last') - tm.assert_series_equal(res_last, pd.Series(exp_last)) + tm.assert_series_equal(res_last, Series(exp_last)) res_false = s.duplicated(keep=False) - tm.assert_series_equal(res_false, pd.Series(exp_false)) + tm.assert_series_equal(res_false, Series(exp_false)) def test_datetime_likes(self): @@ -753,8 +863,8 @@ def test_datetime_likes(self): td = ['1 days', '2 days', '1 days', 'NaT', '3 days', '2 days', '4 days', '1 days', 'NaT', '6 days'] - cases = [np.array([pd.Timestamp(d) for d in dt]), - np.array([pd.Timestamp(d, tz='US/Eastern') for d in dt]), + cases = [np.array([Timestamp(d) for d in dt]), + np.array([Timestamp(d, tz='US/Eastern') for d in dt]), np.array([pd.Period(d, freq='D') for d in dt]), np.array([np.datetime64(d) for d in dt]), np.array([pd.Timedelta(d) for d in td])] @@ -788,16 +898,16 @@ def test_datetime_likes(self): tm.assert_numpy_array_equal(res_false, exp_false) # series - for s in [pd.Series(case), pd.Series(case, dtype='category'), - pd.Series(case, dtype=object)]: + for s in [Series(case), Series(case, dtype='category'), + Series(case, dtype=object)]: res_first = s.duplicated(keep='first') - tm.assert_series_equal(res_first, pd.Series(exp_first)) + tm.assert_series_equal(res_first, Series(exp_first)) res_last = s.duplicated(keep='last') - tm.assert_series_equal(res_last, pd.Series(exp_last)) + tm.assert_series_equal(res_last, Series(exp_last)) res_false = s.duplicated(keep=False) - tm.assert_series_equal(res_false, pd.Series(exp_false)) + tm.assert_series_equal(res_false, Series(exp_false)) def test_unique_index(self): cases = [pd.Index([1, 2, 3]), pd.RangeIndex(0, 3)] @@ -939,7 +1049,7 @@ def test_lookup_overflow(self): np.arange(len(xs), dtype=np.int64)) def test_get_unique(self): - s = pd.Series([1, 2, 2**63, 2**63], dtype=np.uint64) + s = Series([1, 2, 2**63, 2**63], dtype=np.uint64) exp = np.array([1, 2, 2**63], dtype=np.uint64) self.assert_numpy_array_equal(s.unique(), exp) From b7ddb0a734eeed3fc4692724a6951ae7a62b1dea Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Sun, 9 Apr 2017 12:07:35 -0400 Subject: [PATCH 03/56] DOC: add .unique to top-level in api --- doc/source/api.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/api.rst b/doc/source/api.rst index 336b0b9b14c6c..8b4f295392a68 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -165,6 +165,7 @@ Data manipulations concat get_dummies factorize + unique wide_to_long Top-level missing data From b4c6fc1dc4fac2951c7e3ee035968760b4f4adb9 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Sun, 9 Apr 2017 13:58:31 -0400 Subject: [PATCH 04/56] DOC: add some See Also to Categorical --- doc/source/api.rst | 10 +++++++++- pandas/core/algorithms.py | 2 +- pandas/core/categorical.py | 17 +++++++++++++---- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/doc/source/api.rst b/doc/source/api.rst index 8b4f295392a68..2e6f693677e4e 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -653,13 +653,21 @@ adding ordering information or special categories is need at creation time of th Categorical.from_codes ``np.asarray(categorical)`` works by implementing the array interface. Be aware, that this converts -the Categorical back to a numpy array, so levels and order information is not preserved! +the Categorical back to a numpy array, so categories and order information is not preserved! .. autosummary:: :toctree: generated/ Categorical.__array__ +Categorical methods + +.. autosummary:: + :toctree: generated/ + + Categorical.unique + Categorical.value_counts + Plotting ~~~~~~~~ diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 654e38e43b6c0..7fab9295bb94e 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -287,7 +287,7 @@ def unique(values): Examples -------- - pd.unique(pd.Series([2, 1, 3, 3])) + >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index e3d6792604c4c..906e8efafe4af 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1137,8 +1137,9 @@ def isnull(self): See also -------- - pandas.isnull : pandas version + isnull : pandas version Categorical.notnull : boolean inverse of Categorical.isnull + """ ret = self._codes == -1 @@ -1164,8 +1165,9 @@ def notnull(self): See also -------- - pandas.notnull : pandas version + notnull : pandas version Categorical.isnull : boolean inverse of Categorical.notnull + """ return ~self.isnull() @@ -1206,6 +1208,11 @@ def value_counts(self, dropna=True): Returns ------- counts : Series + + See Also + -------- + Series.value_counts + """ from numpy import bincount from pandas.types.missing import isnull @@ -1308,6 +1315,7 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'): See Also -------- Categorical.sort + Series.sort_values Examples -------- @@ -1919,8 +1927,9 @@ def unique(self): See Also -------- - pandas.unique - pandas.CategoricalIndex.unique + unique + CategoricalIndex.unique + Series.unique """ From b4701a6dcb432ba6c5c5b757f4956ae59d282781 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Sun, 9 Apr 2017 14:24:05 -0400 Subject: [PATCH 05/56] DOC: suppress some doc build warnings (#15958) --- doc/source/dsintro.rst | 1 + doc/source/io.rst | 1 + doc/source/whatsnew/v0.13.0.txt | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 0086cb0f94747..3c6572229802d 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -979,6 +979,7 @@ Convert to a MultiIndex DataFrame Alternatively, one can convert to an xarray ``DataArray``. .. ipython:: python + :okwarning: p.to_xarray() diff --git a/doc/source/io.rst b/doc/source/io.rst index f4676f3ad964e..2b3d2895333d3 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4487,6 +4487,7 @@ See the `Full Documentation `__ Write to a feather file. .. ipython:: python + :okwarning: df.to_feather('example.feather') diff --git a/doc/source/whatsnew/v0.13.0.txt b/doc/source/whatsnew/v0.13.0.txt index 118632cc2c0ee..3347b05a5df37 100644 --- a/doc/source/whatsnew/v0.13.0.txt +++ b/doc/source/whatsnew/v0.13.0.txt @@ -357,11 +357,11 @@ HDFStore API Changes .. ipython:: python path = 'test.h5' - df = DataFrame(randn(10,2)) + df = pd.DataFrame(np.random.randn(10,2)) df.to_hdf(path,'df_table',format='table') df.to_hdf(path,'df_table2',append=True) df.to_hdf(path,'df_fixed') - with get_store(path) as store: + with pd.HDFStore(path) as store: print(store) .. ipython:: python From 9cb2c2db0dd763bb9e6586d3103a564875ed25d5 Mon Sep 17 00:00:00 2001 From: Tong Shen Date: Mon, 10 Apr 2017 08:08:14 -0400 Subject: [PATCH 06/56] BUG: Fix MultiIndex names handling in pd.concat closes #15787 Author: Tong Shen Closes #15955 from funnycrab/fix_bug_in_concat and squashes the following commits: 8c0e721 [Tong Shen] explicitly specify dtype when constructing DataFrame to avoid test failure db7866f [Tong Shen] construct expected results as DataFrame instead of FrozenList 7f82be9 [Tong Shen] BUG: Fix MultiIndex names handling in pd.concat --- doc/source/whatsnew/v0.20.0.txt | 1 + pandas/indexes/api.py | 2 +- pandas/tests/tools/test_concat.py | 24 ++++++++++++++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 4c0594c024774..e8170b4bf2113 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1241,6 +1241,7 @@ Indexing - Bug in creating a ``MultiIndex`` with tuples and not passing a list of names; this will now raise ``ValueError`` (:issue:`15110`) - Bug in the HTML display with with a ``MultiIndex`` and truncation (:issue:`14882`) - Bug in the display of ``.info()`` where a qualifier (+) would always be displayed with a ``MultiIndex`` that contains only non-strings (:issue:`15245`) +- Bug in ``pd.concat()`` where the names of ``MultiIndex`` of resulting ``DataFrame`` are not handled correctly when ``None`` is presented in the names of ``MultiIndex`` of input ``DataFrame`` (:issue:`15787`) I/O ^^^ diff --git a/pandas/indexes/api.py b/pandas/indexes/api.py index a38453e0d2ccc..a3cb54ca97071 100644 --- a/pandas/indexes/api.py +++ b/pandas/indexes/api.py @@ -107,7 +107,7 @@ def _get_consensus_names(indexes): # find the non-none names, need to tupleify to make # the set hashable, then reverse on return consensus_names = set([tuple(i.names) for i in indexes - if all(n is not None for n in i.names)]) + if any(n is not None for n in i.names)]) if len(consensus_names) == 1: return list(list(consensus_names)[0]) return [None] * indexes[0].nlevels diff --git a/pandas/tests/tools/test_concat.py b/pandas/tests/tools/test_concat.py index 623c5fa02fcb2..c61f2a3dc8066 100644 --- a/pandas/tests/tools/test_concat.py +++ b/pandas/tests/tools/test_concat.py @@ -1048,6 +1048,30 @@ def test_concat_multiindex_with_tz(self): result = concat([df, df]) tm.assert_frame_equal(result, expected) + def test_concat_multiindex_with_none_in_index_names(self): + # GH 15787 + index = pd.MultiIndex.from_product([[1], range(5)], + names=['level1', None]) + df = pd.DataFrame({'col': range(5)}, index=index, dtype=np.int32) + + result = concat([df, df], keys=[1, 2], names=['level2']) + index = pd.MultiIndex.from_product([[1, 2], [1], range(5)], + names=['level2', 'level1', None]) + expected = pd.DataFrame({'col': list(range(5)) * 2}, + index=index, dtype=np.int32) + assert_frame_equal(result, expected) + + result = concat([df, df[:2]], keys=[1, 2], names=['level2']) + level2 = [1] * 5 + [2] * 2 + level1 = [1] * 7 + no_name = list(range(5)) + list(range(2)) + tuples = list(zip(level2, level1, no_name)) + index = pd.MultiIndex.from_tuples(tuples, + names=['level2', 'level1', None]) + expected = pd.DataFrame({'col': no_name}, index=index, + dtype=np.int32) + assert_frame_equal(result, expected) + def test_concat_keys_and_levels(self): df = DataFrame(np.random.randn(1, 3)) df2 = DataFrame(np.random.randn(1, 4)) From d984cfc391121882906564541fbce49c48cdc229 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Mon, 10 Apr 2017 08:12:00 -0400 Subject: [PATCH 07/56] TST: clean up series/frame api tests inheritance a bit (#15949) * TST: clean up series/frame api tests inheritance a bit * BUG: Index.to_series() is not copying the index --- doc/source/whatsnew/v0.20.0.txt | 1 + pandas/indexes/base.py | 4 +- .../frame/{test_misc_api.py => test_api.py} | 0 pandas/tests/frame/test_query_eval.py | 2 +- pandas/tests/indexes/common.py | 9 ++++ .../series/{test_misc_api.py => test_api.py} | 0 pandas/tests/series/test_quantile.py | 42 ++++++++----------- pandas/tests/sparse/test_frame.py | 2 +- pandas/tests/sparse/test_series.py | 2 +- pandas/tseries/index.py | 4 +- 10 files changed, 36 insertions(+), 30 deletions(-) rename pandas/tests/frame/{test_misc_api.py => test_api.py} (100%) rename pandas/tests/series/{test_misc_api.py => test_api.py} (100%) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index e8170b4bf2113..fd1cd3d0022c9 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1219,6 +1219,7 @@ Conversion - Bug in ``DataFrame.fillna()`` with tz-aware datetimes (:issue:`15855`) - Bug in ``is_string_dtype``, ``is_timedelta64_ns_dtype``, and ``is_string_like_dtype`` in which an error was raised when ``None`` was passed in (:issue:`15941`) - Bug in the return type of ``pd.unique`` on a ``Categorical``, which was returning an ndarray and not a ``Categorical`` (:issue:`15903`) +- Bug in ``Index.to_series()`` where the index was not copied (and so mutating later would change the original), (:issue:`15949`) Indexing ^^^^^^^^ diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py index 91e2422873dd4..bf7975bcdb964 100644 --- a/pandas/indexes/base.py +++ b/pandas/indexes/base.py @@ -944,7 +944,9 @@ def to_series(self, **kwargs): """ from pandas import Series - return Series(self._to_embed(), index=self, name=self.name) + return Series(self._to_embed(), + index=self._shallow_copy(), + name=self.name) def _to_embed(self, keep_tz=False): """ diff --git a/pandas/tests/frame/test_misc_api.py b/pandas/tests/frame/test_api.py similarity index 100% rename from pandas/tests/frame/test_misc_api.py rename to pandas/tests/frame/test_api.py diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 647af92b42273..f90b37b66d200 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -484,7 +484,7 @@ def test_date_index_query_with_NaT_duplicates(self): df = DataFrame(d) df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT df.set_index('dates1', inplace=True, drop=True) - res = df.query('index < 20130101 < dates3', engine=engine, + res = df.query('dates1 < 20130101 < dates3', engine=engine, parser=parser) expec = df[(df.index.to_series() < '20130101') & ('20130101' < df.dates3)] diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index ba76945834aff..08f8f8d48e705 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -38,6 +38,15 @@ def test_pickle_compat_construction(self): # need an object to create with self.assertRaises(TypeError, self._holder) + def test_to_series(self): + # assert that we are creating a copy of the index + + idx = self.create_index() + s = idx.to_series() + assert s.values is not idx.values + assert s.index is not idx + assert s.name == idx.name + def test_shift(self): # GH8083 test the base class for shift diff --git a/pandas/tests/series/test_misc_api.py b/pandas/tests/series/test_api.py similarity index 100% rename from pandas/tests/series/test_misc_api.py rename to pandas/tests/series/test_api.py diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py index b8d1b92081858..5aca34fb86576 100644 --- a/pandas/tests/series/test_quantile.py +++ b/pandas/tests/series/test_quantile.py @@ -16,17 +16,16 @@ class TestSeriesQuantile(TestData, tm.TestCase): def test_quantile(self): - from numpy import percentile q = self.ts.quantile(0.1) - self.assertEqual(q, percentile(self.ts.valid(), 10)) + self.assertEqual(q, np.percentile(self.ts.valid(), 10)) q = self.ts.quantile(0.9) - self.assertEqual(q, percentile(self.ts.valid(), 90)) + self.assertEqual(q, np.percentile(self.ts.valid(), 90)) # object dtype q = Series(self.ts, dtype=object).quantile(0.9) - self.assertEqual(q, percentile(self.ts.valid(), 90)) + self.assertEqual(q, np.percentile(self.ts.valid(), 90)) # datetime64[ns] dtype dts = self.ts.index.to_series() @@ -48,12 +47,11 @@ def test_quantile(self): self.ts.quantile(invalid) def test_quantile_multi(self): - from numpy import percentile qs = [.1, .9] result = self.ts.quantile(qs) - expected = pd.Series([percentile(self.ts.valid(), 10), - percentile(self.ts.valid(), 90)], + expected = pd.Series([np.percentile(self.ts.valid(), 10), + np.percentile(self.ts.valid(), 90)], index=qs, name=self.ts.name) tm.assert_series_equal(result, expected) @@ -70,50 +68,44 @@ def test_quantile_multi(self): [], dtype=float)) tm.assert_series_equal(result, expected) + @pytest.mark.skipif(_np_version_under1p9, + reason="Numpy version is under 1.9") def test_quantile_interpolation(self): # GH #10174 - if _np_version_under1p9: - pytest.skip("Numpy version is under 1.9") - - from numpy import percentile # interpolation = linear (default case) q = self.ts.quantile(0.1, interpolation='linear') - self.assertEqual(q, percentile(self.ts.valid(), 10)) + self.assertEqual(q, np.percentile(self.ts.valid(), 10)) q1 = self.ts.quantile(0.1) - self.assertEqual(q1, percentile(self.ts.valid(), 10)) + self.assertEqual(q1, np.percentile(self.ts.valid(), 10)) # test with and without interpolation keyword self.assertEqual(q, q1) + @pytest.mark.skipif(_np_version_under1p9, + reason="Numpy version is under 1.9") def test_quantile_interpolation_dtype(self): # GH #10174 - if _np_version_under1p9: - pytest.skip("Numpy version is under 1.9") - - from numpy import percentile # interpolation = linear (default case) q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower') - self.assertEqual(q, percentile(np.array([1, 3, 4]), 50)) + self.assertEqual(q, np.percentile(np.array([1, 3, 4]), 50)) self.assertTrue(is_integer(q)) q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher') - self.assertEqual(q, percentile(np.array([1, 3, 4]), 50)) + self.assertEqual(q, np.percentile(np.array([1, 3, 4]), 50)) self.assertTrue(is_integer(q)) + @pytest.mark.skipif(not _np_version_under1p9, + reason="Numpy version is greater 1.9") def test_quantile_interpolation_np_lt_1p9(self): # GH #10174 - if not _np_version_under1p9: - pytest.skip("Numpy version is greater than 1.9") - - from numpy import percentile # interpolation = linear (default case) q = self.ts.quantile(0.1, interpolation='linear') - self.assertEqual(q, percentile(self.ts.valid(), 10)) + self.assertEqual(q, np.percentile(self.ts.valid(), 10)) q1 = self.ts.quantile(0.1) - self.assertEqual(q1, percentile(self.ts.valid(), 10)) + self.assertEqual(q1, np.percentile(self.ts.valid(), 10)) # interpolation other than linear expErrMsg = "Interpolation methods other than " diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py index ae1a1e35f1859..e6482d70e0ae3 100644 --- a/pandas/tests/sparse/test_frame.py +++ b/pandas/tests/sparse/test_frame.py @@ -22,7 +22,7 @@ from pandas.sparse.libsparse import BlockIndex, IntIndex from pandas.sparse.api import SparseSeries, SparseDataFrame, SparseArray -from pandas.tests.frame.test_misc_api import SharedWithSparse +from pandas.tests.frame.test_api import SharedWithSparse from pandas.tests.sparse.common import spmatrix # noqa: F401 diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py index 8aa85a5b7f396..83f0237841dbd 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/sparse/test_series.py @@ -18,7 +18,7 @@ from pandas.sparse.libsparse import BlockIndex, IntIndex from pandas.sparse.api import SparseSeries -from pandas.tests.series.test_misc_api import SharedWithSparse +from pandas.tests.series.test_api import SharedWithSparse def _test_data1(): diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 8fa842a836051..2c14d4f8ea79e 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -895,7 +895,9 @@ def to_series(self, keep_tz=False): Series """ from pandas import Series - return Series(self._to_embed(keep_tz), index=self, name=self.name) + return Series(self._to_embed(keep_tz), + index=self._shallow_copy(), + name=self.name) def _to_embed(self, keep_tz=False): """ From 838e09ccef896a1265b5447fb14f0969f7cd86d5 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Mon, 10 Apr 2017 08:16:38 -0400 Subject: [PATCH 08/56] DOC: remove Categorical.unique refs from doc-strings (#15964) closes #15957 --- doc/source/api.rst | 8 -------- pandas/core/base.py | 5 +++-- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/doc/source/api.rst b/doc/source/api.rst index 2e6f693677e4e..bf9d521e2a12a 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -660,14 +660,6 @@ the Categorical back to a numpy array, so categories and order information is no Categorical.__array__ -Categorical methods - -.. autosummary:: - :toctree: generated/ - - Categorical.unique - Categorical.value_counts - Plotting ~~~~~~~~ diff --git a/pandas/core/base.py b/pandas/core/base.py index 56bdeee6982d5..bdbfb7b949986 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -871,8 +871,9 @@ def value_counts(self, normalize=False, sort=True, ascending=False, See Also -------- - pandas.unique - pandas.Categorical.unique + unique + Index.unique + Series.unique """) @Appender(_shared_docs['unique'] % _indexops_doc_kwargs) From fbbcc10948e5847f1aa5f20684c15cdfc516f91c Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Tue, 11 Apr 2017 06:29:30 -0400 Subject: [PATCH 09/56] TST/DEPR: fix bunch of Panel deprecation warnings (#15965) * TST: remove using .to_datetime w/Timestamp as deprecated * TST: split some sorting tests * fix isnan comparison deprecation warning * TST/DEPR: catch Panel deprecation warnings * close files in tests pickles with compression --- pandas/tests/computation/test_eval.py | 12 +- pandas/tests/frame/test_reshape.py | 12 +- pandas/tests/frame/test_subclass.py | 38 ++- pandas/tests/groupby/test_groupby.py | 111 +++--- pandas/tests/indexes/datetimes/test_tools.py | 3 +- pandas/tests/indexing/common.py | 57 ++-- pandas/tests/indexing/test_multiindex.py | 171 +++++----- pandas/tests/indexing/test_panel.py | 316 +++++++++--------- pandas/tests/indexing/test_partial.py | 55 +-- .../tests/io/generate_legacy_storage_files.py | 19 +- pandas/tests/io/msgpack/common.py | 10 + pandas/tests/io/msgpack/test_buffer.py | 3 +- pandas/tests/io/msgpack/test_extension.py | 6 +- pandas/tests/io/test_excel.py | 17 +- pandas/tests/io/test_packers.py | 20 +- pandas/tests/io/test_pickle.py | 11 +- pandas/tests/sparse/test_frame.py | 28 +- pandas/tests/test_categorical.py | 9 +- pandas/tests/test_multilevel.py | 17 +- pandas/tests/test_sorting.py | 25 +- pandas/tests/tools/test_concat.py | 28 +- pandas/tests/tools/test_hashing.py | 14 +- pandas/tests/tools/test_join.py | 144 ++++---- pandas/tests/tseries/test_resample.py | 82 ++--- pandas/tests/types/test_generic.py | 4 +- pandas/tests/types/test_inference.py | 7 +- pandas/types/missing.py | 2 +- 27 files changed, 667 insertions(+), 554 deletions(-) create mode 100644 pandas/tests/io/msgpack/common.py diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 97ed88b1dc22b..78aad90cacf94 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -1,4 +1,5 @@ import warnings +from warnings import catch_warnings import operator from itertools import product @@ -1130,11 +1131,12 @@ def test_bool_ops_with_constants(self): self.assertEqual(res, exp) def test_panel_fails(self): - x = Panel(randn(3, 4, 5)) - y = Series(randn(10)) - with pytest.raises(NotImplementedError): - self.eval('x + y', - local_dict={'x': x, 'y': y}) + with catch_warnings(record=True): + x = Panel(randn(3, 4, 5)) + y = Series(randn(10)) + with pytest.raises(NotImplementedError): + self.eval('x + y', + local_dict={'x': x, 'y': y}) def test_4d_ndarray_fails(self): x = randn(3, 4, 5, 6) diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index c8c7313ddd071..a0ee4ca2ce287 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -2,6 +2,7 @@ from __future__ import print_function +from warnings import catch_warnings from datetime import datetime import itertools @@ -53,11 +54,12 @@ def test_pivot(self): self.assertEqual(pivoted.index.name, 'index') self.assertEqual(pivoted.columns.names, (None, 'columns')) - # pivot multiple columns - wp = tm.makePanel() - lp = wp.to_frame() - df = lp.reset_index() - assert_frame_equal(df.pivot('major', 'minor'), lp.unstack()) + with catch_warnings(record=True): + # pivot multiple columns + wp = tm.makePanel() + lp = wp.to_frame() + df = lp.reset_index() + assert_frame_equal(df.pivot('major', 'minor'), lp.unstack()) def test_pivot_duplicates(self): data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'], diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 9052a16bf973c..7444490d18373 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -2,6 +2,7 @@ from __future__ import print_function +from warnings import catch_warnings import numpy as np from pandas import DataFrame, Series, MultiIndex, Panel @@ -128,24 +129,25 @@ def test_indexing_sliced(self): def test_to_panel_expanddim(self): # GH 9762 - class SubclassedFrame(DataFrame): - - @property - def _constructor_expanddim(self): - return SubclassedPanel - - class SubclassedPanel(Panel): - pass - - index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)]) - df = SubclassedFrame({'X': [1, 2, 3], 'Y': [4, 5, 6]}, index=index) - result = df.to_panel() - self.assertTrue(isinstance(result, SubclassedPanel)) - expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]], - items=['X', 'Y'], major_axis=[0], - minor_axis=[0, 1, 2], - dtype='int64') - tm.assert_panel_equal(result, expected) + with catch_warnings(record=True): + class SubclassedFrame(DataFrame): + + @property + def _constructor_expanddim(self): + return SubclassedPanel + + class SubclassedPanel(Panel): + pass + + index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)]) + df = SubclassedFrame({'X': [1, 2, 3], 'Y': [4, 5, 6]}, index=index) + result = df.to_panel() + self.assertTrue(isinstance(result, SubclassedPanel)) + expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]], + items=['X', 'Y'], major_axis=[0], + minor_axis=[0, 1, 2], + dtype='int64') + tm.assert_panel_equal(result, expected) def test_subclass_attr_err_propagation(self): # GH 11808 diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index c17c98c5448be..68955c954206e 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import print_function +from warnings import catch_warnings from string import ascii_lowercase from datetime import datetime from numpy import nan @@ -814,12 +815,14 @@ def f(grp): assert_series_equal(result, e) def test_get_group(self): - wp = tm.makePanel() - grouped = wp.groupby(lambda x: x.month, axis='major') + with catch_warnings(record=True): + wp = tm.makePanel() + grouped = wp.groupby(lambda x: x.month, axis='major') - gp = grouped.get_group(1) - expected = wp.reindex(major=[x for x in wp.major_axis if x.month == 1]) - assert_panel_equal(gp, expected) + gp = grouped.get_group(1) + expected = wp.reindex( + major=[x for x in wp.major_axis if x.month == 1]) + assert_panel_equal(gp, expected) # GH 5267 # be datelike friendly @@ -1317,16 +1320,17 @@ def test_multi_iter_frame(self): pass def test_multi_iter_panel(self): - wp = tm.makePanel() - grouped = wp.groupby([lambda x: x.month, lambda x: x.weekday()], - axis=1) - - for (month, wd), group in grouped: - exp_axis = [x - for x in wp.major_axis - if x.month == month and x.weekday() == wd] - expected = wp.reindex(major=exp_axis) - assert_panel_equal(group, expected) + with catch_warnings(record=True): + wp = tm.makePanel() + grouped = wp.groupby([lambda x: x.month, lambda x: x.weekday()], + axis=1) + + for (month, wd), group in grouped: + exp_axis = [x + for x in wp.major_axis + if x.month == month and x.weekday() == wd] + expected = wp.reindex(major=exp_axis) + assert_panel_equal(group, expected) def test_multi_func(self): col1 = self.df['A'] @@ -1387,25 +1391,26 @@ def test_groupby_multiple_columns(self): def _check_op(op): - result1 = op(grouped) - - expected = defaultdict(dict) - for n1, gp1 in data.groupby('A'): - for n2, gp2 in gp1.groupby('B'): - expected[n1][n2] = op(gp2.loc[:, ['C', 'D']]) - expected = dict((k, DataFrame(v)) - for k, v in compat.iteritems(expected)) - expected = Panel.fromDict(expected).swapaxes(0, 1) - expected.major_axis.name, expected.minor_axis.name = 'A', 'B' - - # a little bit crude - for col in ['C', 'D']: - result_col = op(grouped[col]) - exp = expected[col] - pivoted = result1[col].unstack() - pivoted2 = result_col.unstack() - assert_frame_equal(pivoted.reindex_like(exp), exp) - assert_frame_equal(pivoted2.reindex_like(exp), exp) + with catch_warnings(record=True): + result1 = op(grouped) + + expected = defaultdict(dict) + for n1, gp1 in data.groupby('A'): + for n2, gp2 in gp1.groupby('B'): + expected[n1][n2] = op(gp2.loc[:, ['C', 'D']]) + expected = dict((k, DataFrame(v)) + for k, v in compat.iteritems(expected)) + expected = Panel.fromDict(expected).swapaxes(0, 1) + expected.major_axis.name, expected.minor_axis.name = 'A', 'B' + + # a little bit crude + for col in ['C', 'D']: + result_col = op(grouped[col]) + exp = expected[col] + pivoted = result1[col].unstack() + pivoted2 = result_col.unstack() + assert_frame_equal(pivoted.reindex_like(exp), exp) + assert_frame_equal(pivoted2.reindex_like(exp), exp) _check_op(lambda x: x.sum()) _check_op(lambda x: x.mean()) @@ -2980,8 +2985,9 @@ def test_dictify(self): def test_sparse_friendly(self): sdf = self.df[['C', 'D']].to_sparse() - panel = tm.makePanel() - tm.add_nans(panel) + with catch_warnings(record=True): + panel = tm.makePanel() + tm.add_nans(panel) def _check_work(gp): gp.mean() @@ -2997,27 +3003,28 @@ def _check_work(gp): # _check_work(panel.groupby(lambda x: x.month, axis=1)) def test_panel_groupby(self): - self.panel = tm.makePanel() - tm.add_nans(self.panel) - grouped = self.panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1}, - axis='items') - agged = grouped.mean() - agged2 = grouped.agg(lambda x: x.mean('items')) + with catch_warnings(record=True): + self.panel = tm.makePanel() + tm.add_nans(self.panel) + grouped = self.panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1}, + axis='items') + agged = grouped.mean() + agged2 = grouped.agg(lambda x: x.mean('items')) - tm.assert_panel_equal(agged, agged2) + tm.assert_panel_equal(agged, agged2) - self.assert_index_equal(agged.items, Index([0, 1])) + self.assert_index_equal(agged.items, Index([0, 1])) - grouped = self.panel.groupby(lambda x: x.month, axis='major') - agged = grouped.mean() + grouped = self.panel.groupby(lambda x: x.month, axis='major') + agged = grouped.mean() - exp = Index(sorted(list(set(self.panel.major_axis.month)))) - self.assert_index_equal(agged.major_axis, exp) + exp = Index(sorted(list(set(self.panel.major_axis.month)))) + self.assert_index_equal(agged.major_axis, exp) - grouped = self.panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1}, - axis='minor') - agged = grouped.mean() - self.assert_index_equal(agged.minor_axis, Index([0, 1])) + grouped = self.panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1}, + axis='minor') + agged = grouped.mean() + self.assert_index_equal(agged.minor_axis, Index([0, 1])) def test_groupby_2d_malformed(self): d = DataFrame(index=lrange(2)) diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 02630c76abb93..1260ee4e5ab07 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1533,7 +1533,8 @@ def units_from_epochs(): return list(range(5)) -@pytest.fixture(params=[epoch_1960(), epoch_1960().to_datetime(), +@pytest.fixture(params=[epoch_1960(), + epoch_1960().to_pydatetime(), epoch_1960().to_datetime64(), str(epoch_1960())]) def epochs(request): diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index c7637a00910c6..0f8a9573a233b 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -37,41 +37,46 @@ def setUp(self): self.frame_ints = DataFrame(np.random.randn(4, 4), index=lrange(0, 8, 2), columns=lrange(0, 12, 3)) - self.panel_ints = Panel(np.random.rand(4, 4, 4), - items=lrange(0, 8, 2), - major_axis=lrange(0, 12, 3), - minor_axis=lrange(0, 16, 4)) + with catch_warnings(record=True): + self.panel_ints = Panel(np.random.rand(4, 4, 4), + items=lrange(0, 8, 2), + major_axis=lrange(0, 12, 3), + minor_axis=lrange(0, 16, 4)) self.series_uints = Series(np.random.rand(4), index=UInt64Index(lrange(0, 8, 2))) self.frame_uints = DataFrame(np.random.randn(4, 4), index=UInt64Index(lrange(0, 8, 2)), columns=UInt64Index(lrange(0, 12, 3))) - self.panel_uints = Panel(np.random.rand(4, 4, 4), - items=UInt64Index(lrange(0, 8, 2)), - major_axis=UInt64Index(lrange(0, 12, 3)), - minor_axis=UInt64Index(lrange(0, 16, 4))) + with catch_warnings(record=True): + self.panel_uints = Panel(np.random.rand(4, 4, 4), + items=UInt64Index(lrange(0, 8, 2)), + major_axis=UInt64Index(lrange(0, 12, 3)), + minor_axis=UInt64Index(lrange(0, 16, 4))) self.series_labels = Series(np.random.randn(4), index=list('abcd')) self.frame_labels = DataFrame(np.random.randn(4, 4), index=list('abcd'), columns=list('ABCD')) - self.panel_labels = Panel(np.random.randn(4, 4, 4), - items=list('abcd'), - major_axis=list('ABCD'), - minor_axis=list('ZYXW')) + with catch_warnings(record=True): + self.panel_labels = Panel(np.random.randn(4, 4, 4), + items=list('abcd'), + major_axis=list('ABCD'), + minor_axis=list('ZYXW')) self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8]) self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, 'null', 8]) - self.panel_mixed = Panel(np.random.randn(4, 4, 4), - items=[2, 4, 'null', 8]) + with catch_warnings(record=True): + self.panel_mixed = Panel(np.random.randn(4, 4, 4), + items=[2, 4, 'null', 8]) self.series_ts = Series(np.random.randn(4), index=date_range('20130101', periods=4)) self.frame_ts = DataFrame(np.random.randn(4, 4), index=date_range('20130101', periods=4)) - self.panel_ts = Panel(np.random.randn(4, 4, 4), - items=date_range('20130101', periods=4)) + with catch_warnings(record=True): + self.panel_ts = Panel(np.random.randn(4, 4, 4), + items=date_range('20130101', periods=4)) dates_rev = (date_range('20130101', periods=4) .sort_values(ascending=False)) @@ -79,12 +84,14 @@ def setUp(self): index=dates_rev) self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev) - self.panel_ts_rev = Panel(np.random.randn(4, 4, 4), - items=dates_rev) + with catch_warnings(record=True): + self.panel_ts_rev = Panel(np.random.randn(4, 4, 4), + items=dates_rev) self.frame_empty = DataFrame({}) self.series_empty = Series({}) - self.panel_empty = Panel({}) + with catch_warnings(record=True): + self.panel_empty = Panel({}) # form agglomerates for o in self._objs: @@ -255,8 +262,18 @@ def _print(result, error=None): continue obj = d[t] - if obj is not None: + if obj is None: + continue + + def _call(obj=obj): obj = obj.copy() k2 = key2 _eq(t, o, a, obj, key1, k2) + + # Panel deprecations + if isinstance(obj, Panel): + with catch_warnings(record=True): + _call() + else: + _call() diff --git a/pandas/tests/indexing/test_multiindex.py b/pandas/tests/indexing/test_multiindex.py index 1fc0a87764b94..07786b9fb4b72 100644 --- a/pandas/tests/indexing/test_multiindex.py +++ b/pandas/tests/indexing/test_multiindex.py @@ -1164,87 +1164,98 @@ def f(): class TestMultiIndexPanel(tm.TestCase): def test_iloc_getitem_panel_multiindex(self): - # GH 7199 - # Panel with multi-index - multi_index = pd.MultiIndex.from_tuples([('ONE', 'one'), - ('TWO', 'two'), - ('THREE', 'three')], - names=['UPPER', 'lower']) - - simple_index = [x[0] for x in multi_index] - wd1 = Panel(items=['First', 'Second'], major_axis=['a', 'b', 'c', 'd'], - minor_axis=multi_index) - - wd2 = Panel(items=['First', 'Second'], major_axis=['a', 'b', 'c', 'd'], - minor_axis=simple_index) - - expected1 = wd1['First'].iloc[[True, True, True, False], [0, 2]] - result1 = wd1.iloc[0, [True, True, True, False], [0, 2]] # WRONG - tm.assert_frame_equal(result1, expected1) - - expected2 = wd2['First'].iloc[[True, True, True, False], [0, 2]] - result2 = wd2.iloc[0, [True, True, True, False], [0, 2]] - tm.assert_frame_equal(result2, expected2) - - expected1 = DataFrame(index=['a'], columns=multi_index, - dtype='float64') - result1 = wd1.iloc[0, [0], [0, 1, 2]] - tm.assert_frame_equal(result1, expected1) - - expected2 = DataFrame(index=['a'], columns=simple_index, - dtype='float64') - result2 = wd2.iloc[0, [0], [0, 1, 2]] - tm.assert_frame_equal(result2, expected2) - - # GH 7516 - mi = MultiIndex.from_tuples([(0, 'x'), (1, 'y'), (2, 'z')]) - p = Panel(np.arange(3 * 3 * 3, dtype='int64').reshape(3, 3, 3), - items=['a', 'b', 'c'], major_axis=mi, - minor_axis=['u', 'v', 'w']) - result = p.iloc[:, 1, 0] - expected = Series([3, 12, 21], index=['a', 'b', 'c'], name='u') - tm.assert_series_equal(result, expected) - result = p.loc[:, (1, 'y'), 'u'] - tm.assert_series_equal(result, expected) + with catch_warnings(record=True): + + # GH 7199 + # Panel with multi-index + multi_index = pd.MultiIndex.from_tuples([('ONE', 'one'), + ('TWO', 'two'), + ('THREE', 'three')], + names=['UPPER', 'lower']) + + simple_index = [x[0] for x in multi_index] + wd1 = Panel(items=['First', 'Second'], + major_axis=['a', 'b', 'c', 'd'], + minor_axis=multi_index) + + wd2 = Panel(items=['First', 'Second'], + major_axis=['a', 'b', 'c', 'd'], + minor_axis=simple_index) + + expected1 = wd1['First'].iloc[[True, True, True, False], [0, 2]] + result1 = wd1.iloc[0, [True, True, True, False], [0, 2]] # WRONG + tm.assert_frame_equal(result1, expected1) + + expected2 = wd2['First'].iloc[[True, True, True, False], [0, 2]] + result2 = wd2.iloc[0, [True, True, True, False], [0, 2]] + tm.assert_frame_equal(result2, expected2) + + expected1 = DataFrame(index=['a'], columns=multi_index, + dtype='float64') + result1 = wd1.iloc[0, [0], [0, 1, 2]] + tm.assert_frame_equal(result1, expected1) + + expected2 = DataFrame(index=['a'], columns=simple_index, + dtype='float64') + result2 = wd2.iloc[0, [0], [0, 1, 2]] + tm.assert_frame_equal(result2, expected2) + + # GH 7516 + mi = MultiIndex.from_tuples([(0, 'x'), (1, 'y'), (2, 'z')]) + p = Panel(np.arange(3 * 3 * 3, dtype='int64').reshape(3, 3, 3), + items=['a', 'b', 'c'], major_axis=mi, + minor_axis=['u', 'v', 'w']) + result = p.iloc[:, 1, 0] + expected = Series([3, 12, 21], index=['a', 'b', 'c'], name='u') + tm.assert_series_equal(result, expected) + + result = p.loc[:, (1, 'y'), 'u'] + tm.assert_series_equal(result, expected) def test_panel_setitem_with_multiindex(self): - # 10360 - # failing with a multi-index - arr = np.array([[[1, 2, 3], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]], - dtype=np.float64) - - # reg index - axes = dict(items=['A', 'B'], major_axis=[0, 1], - minor_axis=['X', 'Y', 'Z']) - p1 = Panel(0., **axes) - p1.iloc[0, 0, :] = [1, 2, 3] - expected = Panel(arr, **axes) - tm.assert_panel_equal(p1, expected) - - # multi-indexes - axes['items'] = pd.MultiIndex.from_tuples([('A', 'a'), ('B', 'b')]) - p2 = Panel(0., **axes) - p2.iloc[0, 0, :] = [1, 2, 3] - expected = Panel(arr, **axes) - tm.assert_panel_equal(p2, expected) - - axes['major_axis'] = pd.MultiIndex.from_tuples([('A', 1), ('A', 2)]) - p3 = Panel(0., **axes) - p3.iloc[0, 0, :] = [1, 2, 3] - expected = Panel(arr, **axes) - tm.assert_panel_equal(p3, expected) - - axes['minor_axis'] = pd.MultiIndex.from_product([['X'], range(3)]) - p4 = Panel(0., **axes) - p4.iloc[0, 0, :] = [1, 2, 3] - expected = Panel(arr, **axes) - tm.assert_panel_equal(p4, expected) - - arr = np.array( - [[[1, 0, 0], [2, 0, 0]], [[0, 0, 0], [0, 0, 0]]], dtype=np.float64) - p5 = Panel(0., **axes) - p5.iloc[0, :, 0] = [1, 2] - expected = Panel(arr, **axes) - tm.assert_panel_equal(p5, expected) + with catch_warnings(record=True): + # 10360 + # failing with a multi-index + arr = np.array([[[1, 2, 3], [0, 0, 0]], + [[0, 0, 0], [0, 0, 0]]], + dtype=np.float64) + + # reg index + axes = dict(items=['A', 'B'], major_axis=[0, 1], + minor_axis=['X', 'Y', 'Z']) + p1 = Panel(0., **axes) + p1.iloc[0, 0, :] = [1, 2, 3] + expected = Panel(arr, **axes) + tm.assert_panel_equal(p1, expected) + + # multi-indexes + axes['items'] = pd.MultiIndex.from_tuples( + [('A', 'a'), ('B', 'b')]) + p2 = Panel(0., **axes) + p2.iloc[0, 0, :] = [1, 2, 3] + expected = Panel(arr, **axes) + tm.assert_panel_equal(p2, expected) + + axes['major_axis'] = pd.MultiIndex.from_tuples( + [('A', 1), ('A', 2)]) + p3 = Panel(0., **axes) + p3.iloc[0, 0, :] = [1, 2, 3] + expected = Panel(arr, **axes) + tm.assert_panel_equal(p3, expected) + + axes['minor_axis'] = pd.MultiIndex.from_product( + [['X'], range(3)]) + p4 = Panel(0., **axes) + p4.iloc[0, 0, :] = [1, 2, 3] + expected = Panel(arr, **axes) + tm.assert_panel_equal(p4, expected) + + arr = np.array( + [[[1, 0, 0], [2, 0, 0]], [[0, 0, 0], [0, 0, 0]]], + dtype=np.float64) + p5 = Panel(0., **axes) + p5.iloc[0, :, 0] = [1, 2] + expected = Panel(arr, **axes) + tm.assert_panel_equal(p5, expected) diff --git a/pandas/tests/indexing/test_panel.py b/pandas/tests/indexing/test_panel.py index 0677ea498c282..8daef6155212c 100644 --- a/pandas/tests/indexing/test_panel.py +++ b/pandas/tests/indexing/test_panel.py @@ -1,3 +1,4 @@ +import pytest from warnings import catch_warnings import numpy as np @@ -9,201 +10,210 @@ class TestPanel(tm.TestCase): def test_iloc_getitem_panel(self): - # GH 7189 - p = Panel(np.arange(4 * 3 * 2).reshape(4, 3, 2), - items=['A', 'B', 'C', 'D'], - major_axis=['a', 'b', 'c'], - minor_axis=['one', 'two']) + with catch_warnings(record=True): + # GH 7189 + p = Panel(np.arange(4 * 3 * 2).reshape(4, 3, 2), + items=['A', 'B', 'C', 'D'], + major_axis=['a', 'b', 'c'], + minor_axis=['one', 'two']) - result = p.iloc[1] - expected = p.loc['B'] - tm.assert_frame_equal(result, expected) + result = p.iloc[1] + expected = p.loc['B'] + tm.assert_frame_equal(result, expected) - result = p.iloc[1, 1] - expected = p.loc['B', 'b'] - tm.assert_series_equal(result, expected) + result = p.iloc[1, 1] + expected = p.loc['B', 'b'] + tm.assert_series_equal(result, expected) - result = p.iloc[1, 1, 1] - expected = p.loc['B', 'b', 'two'] - self.assertEqual(result, expected) + result = p.iloc[1, 1, 1] + expected = p.loc['B', 'b', 'two'] + self.assertEqual(result, expected) - # slice - result = p.iloc[1:3] - expected = p.loc[['B', 'C']] - tm.assert_panel_equal(result, expected) + # slice + result = p.iloc[1:3] + expected = p.loc[['B', 'C']] + tm.assert_panel_equal(result, expected) - result = p.iloc[:, 0:2] - expected = p.loc[:, ['a', 'b']] - tm.assert_panel_equal(result, expected) + result = p.iloc[:, 0:2] + expected = p.loc[:, ['a', 'b']] + tm.assert_panel_equal(result, expected) - # list of integers - result = p.iloc[[0, 2]] - expected = p.loc[['A', 'C']] - tm.assert_panel_equal(result, expected) + # list of integers + result = p.iloc[[0, 2]] + expected = p.loc[['A', 'C']] + tm.assert_panel_equal(result, expected) - # neg indicies - result = p.iloc[[-1, 1], [-1, 1]] - expected = p.loc[['D', 'B'], ['c', 'b']] - tm.assert_panel_equal(result, expected) + # neg indicies + result = p.iloc[[-1, 1], [-1, 1]] + expected = p.loc[['D', 'B'], ['c', 'b']] + tm.assert_panel_equal(result, expected) - # dups indicies - result = p.iloc[[-1, -1, 1], [-1, 1]] - expected = p.loc[['D', 'D', 'B'], ['c', 'b']] - tm.assert_panel_equal(result, expected) + # dups indicies + result = p.iloc[[-1, -1, 1], [-1, 1]] + expected = p.loc[['D', 'D', 'B'], ['c', 'b']] + tm.assert_panel_equal(result, expected) - # combined - result = p.iloc[0, [True, True], [0, 1]] - expected = p.loc['A', ['a', 'b'], ['one', 'two']] - tm.assert_frame_equal(result, expected) + # combined + result = p.iloc[0, [True, True], [0, 1]] + expected = p.loc['A', ['a', 'b'], ['one', 'two']] + tm.assert_frame_equal(result, expected) - # out-of-bounds exception - self.assertRaises(IndexError, p.iloc.__getitem__, tuple([10, 5])) + # out-of-bounds exception + with pytest.raises(IndexError): + p.iloc[tuple([10, 5])] - def f(): - p.iloc[0, [True, True], [0, 1, 2]] + def f(): + p.iloc[0, [True, True], [0, 1, 2]] - self.assertRaises(IndexError, f) + self.assertRaises(IndexError, f) - # trying to use a label - self.assertRaises(ValueError, p.iloc.__getitem__, tuple(['j', 'D'])) + # trying to use a label + with pytest.raises(ValueError): + p.iloc[tuple(['j', 'D'])] - # GH - p = Panel( - np.random.rand(4, 3, 2), items=['A', 'B', 'C', 'D'], - major_axis=['U', 'V', 'W'], minor_axis=['X', 'Y']) - expected = p['A'] + # GH + p = Panel( + np.random.rand(4, 3, 2), items=['A', 'B', 'C', 'D'], + major_axis=['U', 'V', 'W'], minor_axis=['X', 'Y']) + expected = p['A'] - result = p.iloc[0, :, :] - tm.assert_frame_equal(result, expected) + result = p.iloc[0, :, :] + tm.assert_frame_equal(result, expected) - result = p.iloc[0, [True, True, True], :] - tm.assert_frame_equal(result, expected) + result = p.iloc[0, [True, True, True], :] + tm.assert_frame_equal(result, expected) - result = p.iloc[0, [True, True, True], [0, 1]] - tm.assert_frame_equal(result, expected) + result = p.iloc[0, [True, True, True], [0, 1]] + tm.assert_frame_equal(result, expected) - def f(): - p.iloc[0, [True, True, True], [0, 1, 2]] + def f(): + p.iloc[0, [True, True, True], [0, 1, 2]] - self.assertRaises(IndexError, f) + self.assertRaises(IndexError, f) - def f(): - p.iloc[0, [True, True, True], [2]] + def f(): + p.iloc[0, [True, True, True], [2]] - self.assertRaises(IndexError, f) + self.assertRaises(IndexError, f) def test_iloc_panel_issue(self): - # GH 3617 - p = Panel(np.random.randn(4, 4, 4)) + with catch_warnings(record=True): + # GH 3617 + p = Panel(np.random.randn(4, 4, 4)) - self.assertEqual(p.iloc[:3, :3, :3].shape, (3, 3, 3)) - self.assertEqual(p.iloc[1, :3, :3].shape, (3, 3)) - self.assertEqual(p.iloc[:3, 1, :3].shape, (3, 3)) - self.assertEqual(p.iloc[:3, :3, 1].shape, (3, 3)) - self.assertEqual(p.iloc[1, 1, :3].shape, (3, )) - self.assertEqual(p.iloc[1, :3, 1].shape, (3, )) - self.assertEqual(p.iloc[:3, 1, 1].shape, (3, )) + self.assertEqual(p.iloc[:3, :3, :3].shape, (3, 3, 3)) + self.assertEqual(p.iloc[1, :3, :3].shape, (3, 3)) + self.assertEqual(p.iloc[:3, 1, :3].shape, (3, 3)) + self.assertEqual(p.iloc[:3, :3, 1].shape, (3, 3)) + self.assertEqual(p.iloc[1, 1, :3].shape, (3, )) + self.assertEqual(p.iloc[1, :3, 1].shape, (3, )) + self.assertEqual(p.iloc[:3, 1, 1].shape, (3, )) def test_panel_getitem(self): - # GH4016, date selection returns a frame when a partial string - # selection - ind = date_range(start="2000", freq="D", periods=1000) - df = DataFrame( - np.random.randn( - len(ind), 5), index=ind, columns=list('ABCDE')) - panel = Panel(dict([('frame_' + c, df) for c in list('ABC')])) - - test2 = panel.loc[:, "2002":"2002-12-31"] - test1 = panel.loc[:, "2002"] - tm.assert_panel_equal(test1, test2) - # GH8710 - # multi-element getting with a list - panel = tm.makePanel() - - expected = panel.iloc[[0, 1]] - - result = panel.loc[['ItemA', 'ItemB']] - tm.assert_panel_equal(result, expected) + with catch_warnings(record=True): + # GH4016, date selection returns a frame when a partial string + # selection + ind = date_range(start="2000", freq="D", periods=1000) + df = DataFrame( + np.random.randn( + len(ind), 5), index=ind, columns=list('ABCDE')) + panel = Panel(dict([('frame_' + c, df) for c in list('ABC')])) - result = panel.loc[['ItemA', 'ItemB'], :, :] - tm.assert_panel_equal(result, expected) + test2 = panel.loc[:, "2002":"2002-12-31"] + test1 = panel.loc[:, "2002"] + tm.assert_panel_equal(test1, test2) - result = panel[['ItemA', 'ItemB']] - tm.assert_panel_equal(result, expected) + # GH8710 + # multi-element getting with a list + panel = tm.makePanel() - result = panel.loc['ItemA':'ItemB'] - tm.assert_panel_equal(result, expected) + expected = panel.iloc[[0, 1]] - with catch_warnings(record=True): - result = panel.ix[['ItemA', 'ItemB']] - tm.assert_panel_equal(result, expected) + result = panel.loc[['ItemA', 'ItemB']] + tm.assert_panel_equal(result, expected) - # with an object-like - # GH 9140 - class TestObject: + result = panel.loc[['ItemA', 'ItemB'], :, :] + tm.assert_panel_equal(result, expected) - def __str__(self): - return "TestObject" + result = panel[['ItemA', 'ItemB']] + tm.assert_panel_equal(result, expected) - obj = TestObject() + result = panel.loc['ItemA':'ItemB'] + tm.assert_panel_equal(result, expected) - p = Panel(np.random.randn(1, 5, 4), items=[obj], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) + with catch_warnings(record=True): + result = panel.ix[['ItemA', 'ItemB']] + tm.assert_panel_equal(result, expected) - expected = p.iloc[0] - result = p[obj] - tm.assert_frame_equal(result, expected) + # with an object-like + # GH 9140 + class TestObject: - def test_panel_setitem(self): + def __str__(self): + return "TestObject" - # GH 7763 - # loc and setitem have setting differences - np.random.seed(0) - index = range(3) - columns = list('abc') + obj = TestObject() - panel = Panel({'A': DataFrame(np.random.randn(3, 3), - index=index, columns=columns), - 'B': DataFrame(np.random.randn(3, 3), - index=index, columns=columns), - 'C': DataFrame(np.random.randn(3, 3), - index=index, columns=columns)}) + p = Panel(np.random.randn(1, 5, 4), items=[obj], + major_axis=date_range('1/1/2000', periods=5), + minor_axis=['A', 'B', 'C', 'D']) - replace = DataFrame(np.eye(3, 3), index=range(3), columns=columns) - expected = Panel({'A': replace, 'B': replace, 'C': replace}) + expected = p.iloc[0] + result = p[obj] + tm.assert_frame_equal(result, expected) - p = panel.copy() - for idx in list('ABC'): - p[idx] = replace - tm.assert_panel_equal(p, expected) + def test_panel_setitem(self): - p = panel.copy() - for idx in list('ABC'): - p.loc[idx, :, :] = replace - tm.assert_panel_equal(p, expected) + with catch_warnings(record=True): + # GH 7763 + # loc and setitem have setting differences + np.random.seed(0) + index = range(3) + columns = list('abc') + + panel = Panel({'A': DataFrame(np.random.randn(3, 3), + index=index, columns=columns), + 'B': DataFrame(np.random.randn(3, 3), + index=index, columns=columns), + 'C': DataFrame(np.random.randn(3, 3), + index=index, columns=columns)}) + + replace = DataFrame(np.eye(3, 3), index=range(3), columns=columns) + expected = Panel({'A': replace, 'B': replace, 'C': replace}) + + p = panel.copy() + for idx in list('ABC'): + p[idx] = replace + tm.assert_panel_equal(p, expected) + + p = panel.copy() + for idx in list('ABC'): + p.loc[idx, :, :] = replace + tm.assert_panel_equal(p, expected) def test_panel_assignment(self): - # GH3777 - wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) - wp2 = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) - - # TODO: unused? - # expected = wp.loc[['Item1', 'Item2'], :, ['A', 'B']] - - def f(): - wp.loc[['Item1', 'Item2'], :, ['A', 'B']] = wp2.loc[ - ['Item1', 'Item2'], :, ['A', 'B']] - - self.assertRaises(NotImplementedError, f) - - # to_assign = wp2.loc[['Item1', 'Item2'], :, ['A', 'B']] - # wp.loc[['Item1', 'Item2'], :, ['A', 'B']] = to_assign - # result = wp.loc[['Item1', 'Item2'], :, ['A', 'B']] - # tm.assert_panel_equal(result,expected) + + with catch_warnings(record=True): + # GH3777 + wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], + major_axis=date_range('1/1/2000', periods=5), + minor_axis=['A', 'B', 'C', 'D']) + wp2 = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], + major_axis=date_range('1/1/2000', periods=5), + minor_axis=['A', 'B', 'C', 'D']) + + # TODO: unused? + # expected = wp.loc[['Item1', 'Item2'], :, ['A', 'B']] + + def f(): + wp.loc[['Item1', 'Item2'], :, ['A', 'B']] = wp2.loc[ + ['Item1', 'Item2'], :, ['A', 'B']] + + self.assertRaises(NotImplementedError, f) + + # to_assign = wp2.loc[['Item1', 'Item2'], :, ['A', 'B']] + # wp.loc[['Item1', 'Item2'], :, ['A', 'B']] = to_assign + # result = wp.loc[['Item1', 'Item2'], :, ['A', 'B']] + # tm.assert_panel_equal(result,expected) diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 31fadcc88583c..f51f050c57624 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -119,33 +119,34 @@ def f(): df.ix[:, 'C'] = df.ix[:, 'A'] tm.assert_frame_equal(df, expected) - # ## panel ## - p_orig = Panel(np.arange(16).reshape(2, 4, 2), - items=['Item1', 'Item2'], - major_axis=pd.date_range('2001/1/12', periods=4), - minor_axis=['A', 'B'], dtype='float64') - - # panel setting via item - p_orig = Panel(np.arange(16).reshape(2, 4, 2), - items=['Item1', 'Item2'], - major_axis=pd.date_range('2001/1/12', periods=4), - minor_axis=['A', 'B'], dtype='float64') - expected = p_orig.copy() - expected['Item3'] = expected['Item1'] - p = p_orig.copy() - p.loc['Item3'] = p['Item1'] - tm.assert_panel_equal(p, expected) - - # panel with aligned series - expected = p_orig.copy() - expected = expected.transpose(2, 1, 0) - expected['C'] = DataFrame({'Item1': [30, 30, 30, 30], - 'Item2': [32, 32, 32, 32]}, - index=p_orig.major_axis) - expected = expected.transpose(2, 1, 0) - p = p_orig.copy() - p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items) - tm.assert_panel_equal(p, expected) + with catch_warnings(record=True): + # ## panel ## + p_orig = Panel(np.arange(16).reshape(2, 4, 2), + items=['Item1', 'Item2'], + major_axis=pd.date_range('2001/1/12', periods=4), + minor_axis=['A', 'B'], dtype='float64') + + # panel setting via item + p_orig = Panel(np.arange(16).reshape(2, 4, 2), + items=['Item1', 'Item2'], + major_axis=pd.date_range('2001/1/12', periods=4), + minor_axis=['A', 'B'], dtype='float64') + expected = p_orig.copy() + expected['Item3'] = expected['Item1'] + p = p_orig.copy() + p.loc['Item3'] = p['Item1'] + tm.assert_panel_equal(p, expected) + + # panel with aligned series + expected = p_orig.copy() + expected = expected.transpose(2, 1, 0) + expected['C'] = DataFrame({'Item1': [30, 30, 30, 30], + 'Item2': [32, 32, 32, 32]}, + index=p_orig.major_axis) + expected = expected.transpose(2, 1, 0) + p = p_orig.copy() + p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items) + tm.assert_panel_equal(p, expected) # GH 8473 dates = date_range('1/1/2000', periods=8) diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index d0365cb2c30b3..22c62b738e6a2 100644 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -1,5 +1,6 @@ """ self-contained to write legacy storage (pickle/msgpack) files """ from __future__ import print_function +from warnings import catch_warnings from distutils.version import LooseVersion from pandas import (Series, DataFrame, Panel, SparseSeries, SparseDataFrame, @@ -127,14 +128,16 @@ def create_data(): u'B': Timestamp('20130603', tz='CET')}, index=range(5)) ) - mixed_dup_panel = Panel({u'ItemA': frame[u'float'], - u'ItemB': frame[u'int']}) - mixed_dup_panel.items = [u'ItemA', u'ItemA'] - panel = dict(float=Panel({u'ItemA': frame[u'float'], - u'ItemB': frame[u'float'] + 1}), - dup=Panel(np.arange(30).reshape(3, 5, 2).astype(np.float64), - items=[u'A', u'B', u'A']), - mixed_dup=mixed_dup_panel) + with catch_warnings(record=True): + mixed_dup_panel = Panel({u'ItemA': frame[u'float'], + u'ItemB': frame[u'int']}) + mixed_dup_panel.items = [u'ItemA', u'ItemA'] + panel = dict(float=Panel({u'ItemA': frame[u'float'], + u'ItemB': frame[u'float'] + 1}), + dup=Panel( + np.arange(30).reshape(3, 5, 2).astype(np.float64), + items=[u'A', u'B', u'A']), + mixed_dup=mixed_dup_panel) cat = dict(int8=Categorical(list('abcdefg')), int16=Categorical(np.arange(1000)), diff --git a/pandas/tests/io/msgpack/common.py b/pandas/tests/io/msgpack/common.py new file mode 100644 index 0000000000000..b770d12cffbfa --- /dev/null +++ b/pandas/tests/io/msgpack/common.py @@ -0,0 +1,10 @@ +from pandas.compat import PY3 + + +# array compat +if PY3: + frombytes = lambda obj, data: obj.frombytes(data) + tobytes = lambda obj: obj.tobytes() +else: + frombytes = lambda obj, data: obj.fromstring(data) + tobytes = lambda obj: obj.tostring() diff --git a/pandas/tests/io/msgpack/test_buffer.py b/pandas/tests/io/msgpack/test_buffer.py index 5a2dc3dba5dfa..8ebec734f1d3d 100644 --- a/pandas/tests/io/msgpack/test_buffer.py +++ b/pandas/tests/io/msgpack/test_buffer.py @@ -1,12 +1,13 @@ # coding: utf-8 from pandas.io.msgpack import packb, unpackb +from .common import frombytes def test_unpack_buffer(): from array import array buf = array('b') - buf.fromstring(packb((b'foo', b'bar'))) + frombytes(buf, packb((b'foo', b'bar'))) obj = unpackb(buf, use_list=1) assert [b'foo', b'bar'] == obj diff --git a/pandas/tests/io/msgpack/test_extension.py b/pandas/tests/io/msgpack/test_extension.py index a5a111efbb835..26a611bea224c 100644 --- a/pandas/tests/io/msgpack/test_extension.py +++ b/pandas/tests/io/msgpack/test_extension.py @@ -1,7 +1,9 @@ from __future__ import print_function import array + import pandas.io.msgpack as msgpack from pandas.io.msgpack import ExtType +from .common import frombytes, tobytes def test_pack_ext_type(): @@ -42,7 +44,7 @@ def default(obj): print('default called', obj) if isinstance(obj, array.array): typecode = 123 # application specific typecode - data = obj.tostring() + data = tobytes(obj) return ExtType(typecode, data) raise TypeError("Unknwon type object %r" % (obj, )) @@ -50,7 +52,7 @@ def ext_hook(code, data): print('ext_hook called', code, data) assert code == 123 obj = array.array('d') - obj.fromstring(data) + frombytes(obj, data) return obj obj = [42, b'hello', array.array('d', [1.1, 2.2, 3.3])] diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 256a37e922177..d83e26995020c 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -7,6 +7,7 @@ from distutils.version import LooseVersion import warnings +from warnings import catch_warnings import operator import functools import pytest @@ -2340,9 +2341,13 @@ def check_called(func): writer = ExcelWriter('something.test') tm.assertIsInstance(writer, DummyClass) df = tm.makeCustomDataframe(1, 1) - panel = tm.makePanel() - func = lambda: df.to_excel('something.test') - check_called(func) - check_called(lambda: panel.to_excel('something.test')) - check_called(lambda: df.to_excel('something.xlsx')) - check_called(lambda: df.to_excel('something.xls', engine='dummy')) + + with catch_warnings(record=True): + panel = tm.makePanel() + func = lambda: df.to_excel('something.test') + check_called(func) + check_called(lambda: panel.to_excel('something.test')) + check_called(lambda: df.to_excel('something.xlsx')) + check_called( + lambda: df.to_excel( + 'something.xls', engine='dummy')) diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index 1b6b0fc62f913..4856cd8c5e9a6 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -1,5 +1,6 @@ import pytest +from warnings import catch_warnings import os import datetime import numpy as np @@ -452,9 +453,10 @@ def setUp(self): 'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)), 'mixed': DataFrame(data)} - self.panel = { - 'float': Panel(dict(ItemA=self.frame['float'], - ItemB=self.frame['float'] + 1))} + with catch_warnings(record=True): + self.panel = { + 'float': Panel(dict(ItemA=self.frame['float'], + ItemB=self.frame['float'] + 1))} def test_basic_frame(self): @@ -464,9 +466,10 @@ def test_basic_frame(self): def test_basic_panel(self): - for s, i in self.panel.items(): - i_rec = self.encode_decode(i) - assert_panel_equal(i, i_rec) + with catch_warnings(record=True): + for s, i in self.panel.items(): + i_rec = self.encode_decode(i) + assert_panel_equal(i, i_rec) def test_multi(self): @@ -899,8 +902,9 @@ def test_msgpacks_legacy(self, current_packers_data, all_packers_data, continue vf = os.path.join(pth, f) try: - self.compare(current_packers_data, all_packers_data, - vf, version) + with catch_warnings(record=True): + self.compare(current_packers_data, all_packers_data, + vf, version) except ImportError: # blosc not installed continue diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index f46f62e781006..0746cacb01bb9 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -14,6 +14,8 @@ """ import pytest +from warnings import catch_warnings + import os from distutils.version import LooseVersion import pandas as pd @@ -202,7 +204,8 @@ def test_pickles(current_pickle_data, version): n = 0 for f in os.listdir(pth): vf = os.path.join(pth, f) - data = compare(current_pickle_data, vf, version) + with catch_warnings(record=True): + data = compare(current_pickle_data, vf, version) if data is None: continue @@ -339,7 +342,8 @@ def compress_file(self, src_path, dest_path, compression): raise ValueError(msg) if compression != "zip": - f.write(open(src_path, "rb").read()) + with open(src_path, "rb") as fh: + f.write(fh.read()) f.close() def decompress_file(self, src_path, dest_path, compression): @@ -369,7 +373,8 @@ def decompress_file(self, src_path, dest_path, compression): msg = 'Unrecognized compression type: {}'.format(compression) raise ValueError(msg) - open(dest_path, "wb").write(f.read()) + with open(dest_path, "wb") as fh: + fh.write(f.read()) f.close() @pytest.mark.parametrize('compression', [None, 'gzip', 'bz2', 'xz']) diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py index e6482d70e0ae3..075d5efcefbe0 100644 --- a/pandas/tests/sparse/test_frame.py +++ b/pandas/tests/sparse/test_frame.py @@ -3,7 +3,7 @@ import operator import pytest - +from warnings import catch_warnings from numpy import nan import numpy as np import pandas as pd @@ -953,23 +953,25 @@ def _check(frame, orig): self._check_all(_check) def test_stack_sparse_frame(self): - def _check(frame): - dense_frame = frame.to_dense() # noqa + with catch_warnings(record=True): + + def _check(frame): + dense_frame = frame.to_dense() # noqa - wp = Panel.from_dict({'foo': frame}) - from_dense_lp = wp.to_frame() + wp = Panel.from_dict({'foo': frame}) + from_dense_lp = wp.to_frame() - from_sparse_lp = spf.stack_sparse_frame(frame) + from_sparse_lp = spf.stack_sparse_frame(frame) - self.assert_numpy_array_equal(from_dense_lp.values, - from_sparse_lp.values) + self.assert_numpy_array_equal(from_dense_lp.values, + from_sparse_lp.values) - _check(self.frame) - _check(self.iframe) + _check(self.frame) + _check(self.iframe) - # for now - self.assertRaises(Exception, _check, self.zframe) - self.assertRaises(Exception, _check, self.fill_frame) + # for now + self.assertRaises(Exception, _check, self.zframe) + self.assertRaises(Exception, _check, self.fill_frame) def test_transpose(self): diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 63c1ae70e35a6..adacbb95f5162 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # pylint: disable=E1101,E1103,W0232 +from warnings import catch_warnings import pytest import sys from datetime import datetime @@ -1816,9 +1817,11 @@ def test_construction_frame(self): def test_reshaping(self): - p = tm.makePanel() - p['str'] = 'foo' - df = p.to_frame() + with catch_warnings(record=True): + p = tm.makePanel() + p['str'] = 'foo' + df = p.to_frame() + df['category'] = df['str'].astype('category') result = df['category'].unstack() diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 914d26fcafb4a..e3193cddbaaab 100755 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1253,14 +1253,15 @@ def test_swaplevel(self): tm.assert_frame_equal(swapped, exp) def test_swaplevel_panel(self): - panel = Panel({'ItemA': self.frame, 'ItemB': self.frame * 2}) - expected = panel.copy() - expected.major_axis = expected.major_axis.swaplevel(0, 1) - - for result in (panel.swaplevel(axis='major'), - panel.swaplevel(0, axis='major'), - panel.swaplevel(0, 1, axis='major')): - tm.assert_panel_equal(result, expected) + with catch_warnings(record=True): + panel = Panel({'ItemA': self.frame, 'ItemB': self.frame * 2}) + expected = panel.copy() + expected.major_axis = expected.major_axis.swaplevel(0, 1) + + for result in (panel.swaplevel(axis='major'), + panel.swaplevel(0, axis='major'), + panel.swaplevel(0, 1, axis='major')): + tm.assert_panel_equal(result, expected) def test_reorder_levels(self): result = self.ymd.reorder_levels(['month', 'day', 'year']) diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index 99361695b2371..fad1fbc52bbe3 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -18,6 +18,7 @@ class TestSorting(tm.TestCase): + @pytest.mark.slow def test_int64_overflow(self): B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500))) @@ -51,9 +52,11 @@ def test_int64_overflow(self): expected = df.groupby(tups).sum()['values'] for k, v in compat.iteritems(expected): - self.assertEqual(left[k], right[k[::-1]]) - self.assertEqual(left[k], v) - self.assertEqual(len(left), len(right)) + assert left[k] == right[k[::-1]] + assert left[k] == v + assert len(left) == len(right) + + def test_int64_overflow_moar(self): # GH9096 values = range(55109) @@ -62,7 +65,7 @@ def test_int64_overflow(self): 'c': values, 'd': values}) grouped = data.groupby(['a', 'b', 'c', 'd']) - self.assertEqual(len(grouped), len(values)) + assert len(grouped) == len(values) arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5)) i = np.random.choice(len(arr), len(arr) * 4) @@ -76,7 +79,7 @@ def test_int64_overflow(self): gr = df.groupby(list('abcde')) # verify this is testing what it is supposed to test! - self.assertTrue(is_int64_overflow_possible(gr.grouper.shape)) + assert is_int64_overflow_possible(gr.grouper.shape) # mannually compute groupings jim, joe = defaultdict(list), defaultdict(list) @@ -84,7 +87,7 @@ def test_int64_overflow(self): jim[key].append(a) joe[key].append(b) - self.assertEqual(len(gr), len(jim)) + assert len(gr) == len(jim) mi = MultiIndex.from_tuples(jim.keys(), names=list('abcde')) def aggr(func): @@ -201,7 +204,7 @@ def test_int64_overflow_issues(self): # it works! result = merge(df1, df2, how='outer') - self.assertTrue(len(result) == 2000) + assert len(result) == 2000 low, high, n = -1 << 10, 1 << 10, 1 << 20 left = DataFrame(np.random.randint(low, high, (n, 7)), @@ -216,11 +219,11 @@ def test_int64_overflow_issues(self): right['right'] *= -1 out = merge(left, right, how='outer') - self.assertEqual(len(out), len(left)) + assert len(out) == len(left) assert_series_equal(out['left'], - out['right'], check_names=False) result = out.iloc[:, :-2].sum(axis=1) assert_series_equal(out['left'], result, check_names=False) - self.assertTrue(result.name is None) + assert result.name is None out.sort_values(out.columns.tolist(), inplace=True) out.index = np.arange(len(out)) @@ -241,7 +244,7 @@ def test_int64_overflow_issues(self): # confirm that this is checking what it is supposed to check shape = left.apply(Series.nunique).values - self.assertTrue(is_int64_overflow_possible(shape)) + assert is_int64_overflow_possible(shape) # add duplicates to left frame left = concat([left, left], ignore_index=True) @@ -307,7 +310,7 @@ def verify_order(df): for how in 'left', 'right', 'outer', 'inner': mask = jmask[how] frame = align(out[mask].copy()) - self.assertTrue(mask.all() ^ mask.any() or how == 'outer') + assert mask.all() ^ mask.any() or how == 'outer' for sort in [False, True]: res = merge(left, right, how=how, sort=sort) diff --git a/pandas/tests/tools/test_concat.py b/pandas/tests/tools/test_concat.py index c61f2a3dc8066..2ff287acc4c47 100644 --- a/pandas/tests/tools/test_concat.py +++ b/pandas/tests/tools/test_concat.py @@ -1936,21 +1936,23 @@ def test_concat_multiindex_dfs_with_deepcopy(self): @pytest.mark.parametrize('pdt', [pd.Series, pd.DataFrame, pd.Panel]) @pytest.mark.parametrize('dt', np.sctypes['float']) def test_concat_no_unnecessary_upcast(dt, pdt): - # GH 13247 - dims = pdt().ndim - dfs = [pdt(np.array([1], dtype=dt, ndmin=dims)), - pdt(np.array([np.nan], dtype=dt, ndmin=dims)), - pdt(np.array([5], dtype=dt, ndmin=dims))] - x = pd.concat(dfs) - assert x.values.dtype == dt + with catch_warnings(record=True): + # GH 13247 + dims = pdt().ndim + dfs = [pdt(np.array([1], dtype=dt, ndmin=dims)), + pdt(np.array([np.nan], dtype=dt, ndmin=dims)), + pdt(np.array([5], dtype=dt, ndmin=dims))] + x = pd.concat(dfs) + assert x.values.dtype == dt @pytest.mark.parametrize('pdt', [pd.Series, pd.DataFrame, pd.Panel]) @pytest.mark.parametrize('dt', np.sctypes['int']) def test_concat_will_upcast(dt, pdt): - dims = pdt().ndim - dfs = [pdt(np.array([1], dtype=dt, ndmin=dims)), - pdt(np.array([np.nan], ndmin=dims)), - pdt(np.array([5], dtype=dt, ndmin=dims))] - x = pd.concat(dfs) - assert x.values.dtype == 'float64' + with catch_warnings(record=True): + dims = pdt().ndim + dfs = [pdt(np.array([1], dtype=dt, ndmin=dims)), + pdt(np.array([np.nan], ndmin=dims)), + pdt(np.array([5], dtype=dt, ndmin=dims))] + x = pd.concat(dfs) + assert x.values.dtype == 'float64' diff --git a/pandas/tests/tools/test_hashing.py b/pandas/tests/tools/test_hashing.py index 864b5018abc75..467b058fabc67 100644 --- a/pandas/tests/tools/test_hashing.py +++ b/pandas/tests/tools/test_hashing.py @@ -1,3 +1,6 @@ +import pytest + +from warnings import catch_warnings import numpy as np import pandas as pd @@ -195,11 +198,14 @@ def test_categorical_with_nan_consistency(self): def test_pandas_errors(self): - for obj in [pd.Timestamp('20130101'), tm.makePanel()]: - def f(): - hash_pandas_object(f) + for obj in [pd.Timestamp('20130101')]: + with pytest.raises(TypeError): + hash_pandas_object(obj) - self.assertRaises(TypeError, f) + with catch_warnings(record=True): + obj = tm.makePanel() + with pytest.raises(TypeError): + hash_pandas_object(obj) def test_hash_keys(self): # using different hash keys, should have different hashes diff --git a/pandas/tests/tools/test_join.py b/pandas/tests/tools/test_join.py index b65f800802bca..8571a1ff16701 100644 --- a/pandas/tests/tools/test_join.py +++ b/pandas/tests/tools/test_join.py @@ -1,5 +1,6 @@ # pylint: disable=E1103 +from warnings import catch_warnings from numpy.random import randn import numpy as np @@ -629,87 +630,90 @@ def test_join_dups(self): assert_frame_equal(dta, expected) def test_panel_join(self): - panel = tm.makePanel() - tm.add_nans(panel) - - p1 = panel.iloc[:2, :10, :3] - p2 = panel.iloc[2:, 5:, 2:] - - # left join - result = p1.join(p2) - expected = p1.copy() - expected['ItemC'] = p2['ItemC'] - tm.assert_panel_equal(result, expected) - - # right join - result = p1.join(p2, how='right') - expected = p2.copy() - expected['ItemA'] = p1['ItemA'] - expected['ItemB'] = p1['ItemB'] - expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC']) - tm.assert_panel_equal(result, expected) - - # inner join - result = p1.join(p2, how='inner') - expected = panel.iloc[:, 5:10, 2:3] - tm.assert_panel_equal(result, expected) - - # outer join - result = p1.join(p2, how='outer') - expected = p1.reindex(major=panel.major_axis, - minor=panel.minor_axis) - expected = expected.join(p2.reindex(major=panel.major_axis, - minor=panel.minor_axis)) - tm.assert_panel_equal(result, expected) + with catch_warnings(record=True): + panel = tm.makePanel() + tm.add_nans(panel) + + p1 = panel.iloc[:2, :10, :3] + p2 = panel.iloc[2:, 5:, 2:] + + # left join + result = p1.join(p2) + expected = p1.copy() + expected['ItemC'] = p2['ItemC'] + tm.assert_panel_equal(result, expected) + + # right join + result = p1.join(p2, how='right') + expected = p2.copy() + expected['ItemA'] = p1['ItemA'] + expected['ItemB'] = p1['ItemB'] + expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC']) + tm.assert_panel_equal(result, expected) + + # inner join + result = p1.join(p2, how='inner') + expected = panel.iloc[:, 5:10, 2:3] + tm.assert_panel_equal(result, expected) + + # outer join + result = p1.join(p2, how='outer') + expected = p1.reindex(major=panel.major_axis, + minor=panel.minor_axis) + expected = expected.join(p2.reindex(major=panel.major_axis, + minor=panel.minor_axis)) + tm.assert_panel_equal(result, expected) def test_panel_join_overlap(self): - panel = tm.makePanel() - tm.add_nans(panel) - - p1 = panel.loc[['ItemA', 'ItemB', 'ItemC']] - p2 = panel.loc[['ItemB', 'ItemC']] - - # Expected index is - # - # ItemA, ItemB_p1, ItemC_p1, ItemB_p2, ItemC_p2 - joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2') - p1_suf = p1.loc[['ItemB', 'ItemC']].add_suffix('_p1') - p2_suf = p2.loc[['ItemB', 'ItemC']].add_suffix('_p2') - no_overlap = panel.loc[['ItemA']] - expected = no_overlap.join(p1_suf.join(p2_suf)) - tm.assert_panel_equal(joined, expected) + with catch_warnings(record=True): + panel = tm.makePanel() + tm.add_nans(panel) + + p1 = panel.loc[['ItemA', 'ItemB', 'ItemC']] + p2 = panel.loc[['ItemB', 'ItemC']] + + # Expected index is + # + # ItemA, ItemB_p1, ItemC_p1, ItemB_p2, ItemC_p2 + joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2') + p1_suf = p1.loc[['ItemB', 'ItemC']].add_suffix('_p1') + p2_suf = p2.loc[['ItemB', 'ItemC']].add_suffix('_p2') + no_overlap = panel.loc[['ItemA']] + expected = no_overlap.join(p1_suf.join(p2_suf)) + tm.assert_panel_equal(joined, expected) def test_panel_join_many(self): - tm.K = 10 - panel = tm.makePanel() - tm.K = 4 + with catch_warnings(record=True): + tm.K = 10 + panel = tm.makePanel() + tm.K = 4 - panels = [panel.iloc[:2], panel.iloc[2:6], panel.iloc[6:]] + panels = [panel.iloc[:2], panel.iloc[2:6], panel.iloc[6:]] - joined = panels[0].join(panels[1:]) - tm.assert_panel_equal(joined, panel) + joined = panels[0].join(panels[1:]) + tm.assert_panel_equal(joined, panel) - panels = [panel.iloc[:2, :-5], - panel.iloc[2:6, 2:], - panel.iloc[6:, 5:-7]] + panels = [panel.iloc[:2, :-5], + panel.iloc[2:6, 2:], + panel.iloc[6:, 5:-7]] - data_dict = {} - for p in panels: - data_dict.update(p.iteritems()) + data_dict = {} + for p in panels: + data_dict.update(p.iteritems()) - joined = panels[0].join(panels[1:], how='inner') - expected = pd.Panel.from_dict(data_dict, intersect=True) - tm.assert_panel_equal(joined, expected) + joined = panels[0].join(panels[1:], how='inner') + expected = pd.Panel.from_dict(data_dict, intersect=True) + tm.assert_panel_equal(joined, expected) - joined = panels[0].join(panels[1:], how='outer') - expected = pd.Panel.from_dict(data_dict, intersect=False) - tm.assert_panel_equal(joined, expected) + joined = panels[0].join(panels[1:], how='outer') + expected = pd.Panel.from_dict(data_dict, intersect=False) + tm.assert_panel_equal(joined, expected) - # edge cases - self.assertRaises(ValueError, panels[0].join, panels[1:], - how='outer', lsuffix='foo', rsuffix='bar') - self.assertRaises(ValueError, panels[0].join, panels[1:], - how='right') + # edge cases + self.assertRaises(ValueError, panels[0].join, panels[1:], + how='outer', lsuffix='foo', rsuffix='bar') + self.assertRaises(ValueError, panels[0].join, panels[1:], + how='right') def _check_join(left, right, result, join_col, how='left', diff --git a/pandas/tests/tseries/test_resample.py b/pandas/tests/tseries/test_resample.py index 57e5a1631f8e8..9c66cae292c4e 100755 --- a/pandas/tests/tseries/test_resample.py +++ b/pandas/tests/tseries/test_resample.py @@ -1,5 +1,6 @@ # pylint: disable=E1101 +from warnings import catch_warnings from datetime import datetime, timedelta from functools import partial @@ -1479,44 +1480,47 @@ def test_resample_panel(self): rng = date_range('1/1/2000', '6/30/2000') n = len(rng) - panel = Panel(np.random.randn(3, n, 5), - items=['one', 'two', 'three'], - major_axis=rng, - minor_axis=['a', 'b', 'c', 'd', 'e']) + with catch_warnings(record=True): + panel = Panel(np.random.randn(3, n, 5), + items=['one', 'two', 'three'], + major_axis=rng, + minor_axis=['a', 'b', 'c', 'd', 'e']) - result = panel.resample('M', axis=1).mean() + result = panel.resample('M', axis=1).mean() - def p_apply(panel, f): - result = {} - for item in panel.items: - result[item] = f(panel[item]) - return Panel(result, items=panel.items) + def p_apply(panel, f): + result = {} + for item in panel.items: + result[item] = f(panel[item]) + return Panel(result, items=panel.items) - expected = p_apply(panel, lambda x: x.resample('M').mean()) - tm.assert_panel_equal(result, expected) + expected = p_apply(panel, lambda x: x.resample('M').mean()) + tm.assert_panel_equal(result, expected) - panel2 = panel.swapaxes(1, 2) - result = panel2.resample('M', axis=2).mean() - expected = p_apply(panel2, lambda x: x.resample('M', axis=1).mean()) - tm.assert_panel_equal(result, expected) + panel2 = panel.swapaxes(1, 2) + result = panel2.resample('M', axis=2).mean() + expected = p_apply(panel2, + lambda x: x.resample('M', axis=1).mean()) + tm.assert_panel_equal(result, expected) def test_resample_panel_numpy(self): rng = date_range('1/1/2000', '6/30/2000') n = len(rng) - panel = Panel(np.random.randn(3, n, 5), - items=['one', 'two', 'three'], - major_axis=rng, - minor_axis=['a', 'b', 'c', 'd', 'e']) + with catch_warnings(record=True): + panel = Panel(np.random.randn(3, n, 5), + items=['one', 'two', 'three'], + major_axis=rng, + minor_axis=['a', 'b', 'c', 'd', 'e']) - result = panel.resample('M', axis=1).apply(lambda x: x.mean(1)) - expected = panel.resample('M', axis=1).mean() - tm.assert_panel_equal(result, expected) + result = panel.resample('M', axis=1).apply(lambda x: x.mean(1)) + expected = panel.resample('M', axis=1).mean() + tm.assert_panel_equal(result, expected) - panel = panel.swapaxes(1, 2) - result = panel.resample('M', axis=2).apply(lambda x: x.mean(2)) - expected = panel.resample('M', axis=2).mean() - tm.assert_panel_equal(result, expected) + panel = panel.swapaxes(1, 2) + result = panel.resample('M', axis=2).apply(lambda x: x.mean(2)) + expected = panel.resample('M', axis=2).mean() + tm.assert_panel_equal(result, expected) def test_resample_anchored_ticks(self): # If a fixed delta (5 minute, 4 hour) evenly divides a day, we should @@ -3037,20 +3041,22 @@ def test_apply_iteration(self): def test_panel_aggregation(self): ind = pd.date_range('1/1/2000', periods=100) data = np.random.randn(2, len(ind), 4) - wp = pd.Panel(data, items=['Item1', 'Item2'], major_axis=ind, - minor_axis=['A', 'B', 'C', 'D']) - tg = TimeGrouper('M', axis=1) - _, grouper, _ = tg._get_grouper(wp) - bingrouped = wp.groupby(grouper) - binagg = bingrouped.mean() + with catch_warnings(record=True): + wp = Panel(data, items=['Item1', 'Item2'], major_axis=ind, + minor_axis=['A', 'B', 'C', 'D']) - def f(x): - assert (isinstance(x, Panel)) - return x.mean(1) + tg = TimeGrouper('M', axis=1) + _, grouper, _ = tg._get_grouper(wp) + bingrouped = wp.groupby(grouper) + binagg = bingrouped.mean() + + def f(x): + assert (isinstance(x, Panel)) + return x.mean(1) - result = bingrouped.agg(f) - tm.assert_panel_equal(result, binagg) + result = bingrouped.agg(f) + tm.assert_panel_equal(result, binagg) def test_fails_on_no_datetime_index(self): index_names = ('Int64Index', 'Index', 'Float64Index', 'MultiIndex') diff --git a/pandas/tests/types/test_generic.py b/pandas/tests/types/test_generic.py index c7c8b0becad63..7994aa77bb220 100644 --- a/pandas/tests/types/test_generic.py +++ b/pandas/tests/types/test_generic.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- +from warnings import catch_warnings import numpy as np import pandas as pd import pandas.util.testing as tm @@ -33,7 +34,8 @@ def test_abc_types(self): self.assertIsInstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass) self.assertIsInstance(pd.Series([1, 2, 3]), gt.ABCSeries) self.assertIsInstance(self.df, gt.ABCDataFrame) - self.assertIsInstance(self.df.to_panel(), gt.ABCPanel) + with catch_warnings(record=True): + self.assertIsInstance(self.df.to_panel(), gt.ABCPanel) self.assertIsInstance(self.sparse_series, gt.ABCSparseSeries) self.assertIsInstance(self.sparse_array, gt.ABCSparseArray) self.assertIsInstance(self.categorical, gt.ABCCategorical) diff --git a/pandas/tests/types/test_inference.py b/pandas/tests/types/test_inference.py index de3a2ca35a7f5..ec61903d3f20c 100644 --- a/pandas/tests/types/test_inference.py +++ b/pandas/tests/types/test_inference.py @@ -5,7 +5,7 @@ related to inference and not otherwise tested in types/test_common.py """ - +from warnings import catch_warnings import collections import re from datetime import datetime, date, timedelta, time @@ -930,8 +930,9 @@ def test_lisscalar_pandas_containers(self): self.assertFalse(is_scalar(Series([1]))) self.assertFalse(is_scalar(DataFrame())) self.assertFalse(is_scalar(DataFrame([[1]]))) - self.assertFalse(is_scalar(Panel())) - self.assertFalse(is_scalar(Panel([[[1]]]))) + with catch_warnings(record=True): + self.assertFalse(is_scalar(Panel())) + self.assertFalse(is_scalar(Panel([[[1]]]))) self.assertFalse(is_scalar(Index([]))) self.assertFalse(is_scalar(Index([1]))) diff --git a/pandas/types/missing.py b/pandas/types/missing.py index cc8b5edc27542..ea49af9884f5a 100644 --- a/pandas/types/missing.py +++ b/pandas/types/missing.py @@ -302,7 +302,7 @@ def array_equivalent(left, right, strict_nan=False): # NaNs can occur in float and complex arrays. if is_float_dtype(left) or is_complex_dtype(left): - return ((left == right) | (np.isnan(left) & np.isnan(right))).all() + return ((left == right) | (isnull(left) & isnull(right))).all() # numpy will will not allow this type of datetimelike vs integer comparison elif is_datetimelike_v_numeric(left, right): From 6c0cfff8aa643d31462de07e8a693d035fc0e77b Mon Sep 17 00:00:00 2001 From: Tong SHEN Date: Tue, 11 Apr 2017 19:37:01 +0800 Subject: [PATCH 10/56] DOC: Fix typos in doc style.ipynb (#15968) --- doc/source/style.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/style.ipynb b/doc/source/style.ipynb index 38b39bad8b415..2b8bf35a913c1 100644 --- a/doc/source/style.ipynb +++ b/doc/source/style.ipynb @@ -99,7 +99,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "*Note*: The `DataFrame.style` attribute is a propetry that returns a `Styler` object. `Styler` has a `_repr_html_` method defined on it so they are rendered automatically. If you want the actual HTML back for further processing or for writing to file call the `.render()` method which returns a string.\n", + "*Note*: The `DataFrame.style` attribute is a property that returns a `Styler` object. `Styler` has a `_repr_html_` method defined on it so they are rendered automatically. If you want the actual HTML back for further processing or for writing to file call the `.render()` method which returns a string.\n", "\n", "The above output looks very similar to the standard DataFrame HTML representation. But we've done some work behind the scenes to attach CSS classes to each cell. We can view these by calling the `.render` method." ] @@ -512,7 +512,7 @@ }, "outputs": [], "source": [ - "# Compreess the color range\n", + "# Compress the color range\n", "(df.loc[:4]\n", " .style\n", " .background_gradient(cmap='viridis', low=.5, high=0)\n", @@ -637,7 +637,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Other options\n", + "## Other Options\n", "\n", "You've seen a few methods for data-driven styling.\n", "`Styler` also provides a few other options for styles that don't depend on the data.\n", From 1751628adef96b913d0083a48e51658a70dac8c4 Mon Sep 17 00:00:00 2001 From: Tong SHEN Date: Wed, 12 Apr 2017 15:21:14 +0800 Subject: [PATCH 11/56] DOC: Fix typo in timeseries.rst (#15981) --- doc/source/timeseries.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 54e574adc05d4..61812684e7648 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -113,7 +113,7 @@ For example: pd.Period('2012-05', freq='D') ``Timestamp`` and ``Period`` can be the index. Lists of ``Timestamp`` and -``Period`` are automatically coerce to ``DatetimeIndex`` and ``PeriodIndex`` +``Period`` are automatically coerced to ``DatetimeIndex`` and ``PeriodIndex`` respectively. .. ipython:: python From c4d71cea79bee1e9ed3b6ca97f3a5c1b8ad9369f Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Wed, 12 Apr 2017 19:29:50 +0000 Subject: [PATCH 12/56] TST: allow the network decorator to catch ssl certificate test failures (#15985) --- pandas/util/testing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index ef0fa04548cab..d5986a7f390e5 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -2213,6 +2213,7 @@ def dec(f): 'Temporary failure in name resolution', 'Name or service not known', 'Connection refused', + 'certificate verify', ) # or this e.errno/e.reason.errno From 7b8a6b1bc22f6fc0023c02ac8301e07b4ab80417 Mon Sep 17 00:00:00 2001 From: Sam Foo Date: Wed, 12 Apr 2017 13:49:49 -0700 Subject: [PATCH 13/56] VIS: Allow 'C0'-like plotting for plotting colors #15516 (#15873) * VIS: Allow 'C0'-like plotting for plotting colors * Added case color='' and support for mpl < 2.0 * Updated prop_cycle references to be compatible with matplotlib 1.5 and 2.0 * Separated test; Used more consise regex --- doc/source/whatsnew/v0.20.0.txt | 1 + pandas/tests/plotting/test_frame.py | 16 ++++++++++++++++ pandas/tools/plotting.py | 21 ++++++++++++++++----- 3 files changed, 33 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index fd1cd3d0022c9..defabee3cef8c 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -345,6 +345,7 @@ Other Enhancements - The ``skiprows`` argument in ``pd.read_csv()`` now accepts a callable function as a value (:issue:`10882`) - The ``nrows`` and ``chunksize`` arguments in ``pd.read_csv()`` are supported if both are passed (:issue:`6774`, :issue:`15755`) - ``pd.DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`) +- ``pd.DataFrame.plot`` can pass `matplotlib 2.0 default color cycle as a single string as color parameter `__. (:issue:`15516`) - ``pd.Series.interpolate`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`) - ``Timedelta.isoformat`` method added for formatting Timedeltas as an `ISO 8601 duration`_. See the :ref:`Timedelta docs ` (:issue:`15136`) - ``.select_dtypes()`` now allows the string 'datetimetz' to generically select datetimes with tz (:issue:`14910`) diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 1527637ea3eff..8090b9cc44ca3 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -141,6 +141,22 @@ def test_plot(self): result = ax.get_axes() # deprecated self.assertIs(result, axes[0]) + # GH 15516 + def test_mpl2_color_cycle_str(self): + # test CN mpl 2.0 color cycle + if self.mpl_ge_2_0_0: + colors = ['C' + str(x) for x in range(10)] + df = DataFrame(randn(10, 3), columns=['a', 'b', 'c']) + for c in colors: + _check_plot_works(df.plot, color=c) + else: + pytest.skip("not supported in matplotlib < 2.0.0") + + def test_color_empty_string(self): + df = DataFrame(randn(10, 2)) + with tm.assertRaises(ValueError): + df.plot(color='') + def test_color_and_style_arguments(self): df = DataFrame({'x': [1, 2], 'y': [3, 4]}) # passing both 'color' and 'style' arguments should be allowed diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index f70a2b0b22140..99e56ca80cf97 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -225,10 +225,18 @@ def _maybe_valid_colors(colors): # check whether each character can be convertable to colors maybe_color_cycle = _maybe_valid_colors(list(colors)) if maybe_single_color and maybe_color_cycle and len(colors) > 1: - msg = ("'{0}' can be parsed as both single color and " - "color cycle. Specify each color using a list " - "like ['{0}'] or {1}") - raise ValueError(msg.format(colors, list(colors))) + # Special case for single str 'CN' match and convert to hex + # for supporting matplotlib < 2.0.0 + if re.match(r'\AC[0-9]\Z', colors) and _mpl_ge_2_0_0(): + hex_color = [c['color'] + for c in list(plt.rcParams['axes.prop_cycle'])] + colors = [hex_color[int(colors[1])]] + else: + # this may no longer be required + msg = ("'{0}' can be parsed as both single color and " + "color cycle. Specify each color using a list " + "like ['{0}'] or {1}") + raise ValueError(msg.format(colors, list(colors))) elif maybe_single_color: colors = [colors] else: @@ -237,7 +245,10 @@ def _maybe_valid_colors(colors): pass if len(colors) != num_colors: - multiple = num_colors // len(colors) - 1 + try: + multiple = num_colors // len(colors) - 1 + except ZeroDivisionError: + raise ValueError("Invalid color argument: ''") mod = num_colors % len(colors) colors += multiple * colors From 1c4dacb4464fa0139216130b1835e5f4d4b73342 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Thu, 13 Apr 2017 10:18:04 +0000 Subject: [PATCH 14/56] DEPR: deprecate relableling dicts in groupby.agg (#15931) * DEPR: deprecate relabling dictionarys in groupby.agg --- doc/source/computation.rst | 8 -- doc/source/groupby.rst | 32 ++++-- doc/source/timeseries.rst | 8 -- doc/source/whatsnew/v0.20.0.txt | 82 +++++++++++++ pandas/core/base.py | 152 +++++++++++++++++++++---- pandas/core/groupby.py | 52 +++++---- pandas/tests/groupby/test_aggregate.py | 83 +++++++++++--- pandas/tests/groupby/test_groupby.py | 14 ++- pandas/tests/groupby/test_whitelist.py | 2 +- pandas/tests/test_window.py | 22 ++-- pandas/tests/tseries/test_resample.py | 67 ++++++----- pandas/types/cast.py | 17 +++ 12 files changed, 418 insertions(+), 121 deletions(-) diff --git a/doc/source/computation.rst b/doc/source/computation.rst index a37cbc96b2d8c..f46a00826a8d9 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -610,14 +610,6 @@ aggregation with, outputting a DataFrame: r['A'].agg([np.sum, np.mean, np.std]) -If a dict is passed, the keys will be used to name the columns. Otherwise the -function's name (stored in the function object) will be used. - -.. ipython:: python - - r['A'].agg({'result1' : np.sum, - 'result2' : np.mean}) - On a widowed DataFrame, you can pass a list of functions to apply to each column, which produces an aggregated result with a hierarchical index: diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index cbe3588104439..03ee5e0d67913 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -502,7 +502,7 @@ index are the group names and whose values are the sizes of each group. Applying multiple functions at once ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -With grouped Series you can also pass a list or dict of functions to do +With grouped ``Series`` you can also pass a list or dict of functions to do aggregation with, outputting a DataFrame: .. ipython:: python @@ -510,23 +510,35 @@ aggregation with, outputting a DataFrame: grouped = df.groupby('A') grouped['C'].agg([np.sum, np.mean, np.std]) -If a dict is passed, the keys will be used to name the columns. Otherwise the -function's name (stored in the function object) will be used. +On a grouped ``DataFrame``, you can pass a list of functions to apply to each +column, which produces an aggregated result with a hierarchical index: .. ipython:: python - grouped['D'].agg({'result1' : np.sum, - 'result2' : np.mean}) + grouped.agg([np.sum, np.mean, np.std]) -On a grouped DataFrame, you can pass a list of functions to apply to each -column, which produces an aggregated result with a hierarchical index: + +The resulting aggregations are named for the functions themselves. If you +need to rename, then you can add in a chained operation for a ``Series`` like this: .. ipython:: python - grouped.agg([np.sum, np.mean, np.std]) + (grouped['C'].agg([np.sum, np.mean, np.std]) + .rename(columns={'sum': 'foo', + 'mean': 'bar', + 'std': 'baz'}) + ) + +For a grouped ``DataFrame``, you can rename in a similar manner: + +.. ipython:: python + + (grouped.agg([np.sum, np.mean, np.std]) + .rename(columns={'sum': 'foo', + 'mean': 'bar', + 'std': 'baz'}) + ) -Passing a dict of functions has different behavior by default, see the next -section. Applying different functions to DataFrame columns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 61812684e7648..0a957772d785e 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1549,14 +1549,6 @@ You can pass a list or dict of functions to do aggregation with, outputting a Da r['A'].agg([np.sum, np.mean, np.std]) -If a dict is passed, the keys will be used to name the columns. Otherwise the -function's name (stored in the function object) will be used. - -.. ipython:: python - - r['A'].agg({'result1' : np.sum, - 'result2' : np.mean}) - On a resampled DataFrame, you can pass a list of functions to apply to each column, which produces an aggregated result with a hierarchical index: diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index defabee3cef8c..c243e4ef81b38 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -456,6 +456,88 @@ Convert to an xarray DataArray p.to_xarray() +.. _whatsnew_0200.api_breaking.deprecate_group_agg_dict: + +Deprecate groupby.agg() with a dictionary when renaming +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``.groupby(..).agg(..)``, ``.rolling(..).agg(..)``, and ``.resample(..).agg(..)`` syntax can accept a variable of inputs, including scalars, +list, and a dict of column names to scalars or lists. This provides a useful syntax for constructing multiple +(potentially different) aggregations. + +However, ``.agg(..)`` can *also* accept a dict that allows 'renaming' of the result columns. This is a complicated and confusing syntax, as well as not consistent +between ``Series`` and ``DataFrame``. We are deprecating this 'renaming' functionaility. + +1) We are deprecating passing a dict to a grouped/rolled/resampled ``Series``. This allowed +one to ``rename`` the resulting aggregation, but this had a completely different +meaning than passing a dictionary to a grouped ``DataFrame``, which accepts column-to-aggregations. +2) We are deprecating passing a dict-of-dicts to a grouped/rolled/resampled ``DataFrame`` in a similar manner. + +This is an illustrative example: + +.. ipython:: python + + df = pd.DataFrame({'A': [1, 1, 1, 2, 2], + 'B': range(5), + 'C': range(5)}) + df + +Here is a typical useful syntax for computing different aggregations for different columns. This +is a natural (and useful) syntax. We aggregate from the dict-to-list by taking the specified +columns and applying the list of functions. This returns a ``MultiIndex`` for the columns. + +.. ipython:: python + + df.groupby('A').agg({'B': 'sum', 'C': 'min'}) + +Here's an example of the first deprecation (1), passing a dict to a grouped ``Series``. This +is a combination aggregation & renaming: + +.. code-block:: ipython + + In [6]: df.groupby('A').B.agg({'foo': 'count'}) + FutureWarning: using a dict on a Series for aggregation + is deprecated and will be removed in a future version + + Out[6]: + foo + A + 1 3 + 2 2 + +You can accomplish the same operation, more idiomatically by: + +.. ipython:: python + + df.groupby('A').B.agg(['count']).rename({'count': 'foo'}) + + +Here's an example of the second deprecation (2), passing a dict-of-dict to a grouped ``DataFrame``: + +.. code-block:: python + + In [23]: (df.groupby('A') + .agg({'B': {'foo': 'sum'}, 'C': {'bar': 'min'}}) + ) + FutureWarning: using a dict with renaming is deprecated and will be removed in a future version + + Out[23]: + B C + foo bar + A + 1 3 0 + 2 7 3 + + +You can accomplish nearly the same by: + +.. ipython:: python + + (df.groupby('A') + .agg({'B': 'sum', 'C': 'min'}) + .rename(columns={'B': 'foo', 'C': 'bar'}) + ) + .. _whatsnew.api_breaking.io_compat: Possible incompat for HDF5 formats for pandas < 0.13.0 diff --git a/pandas/core/base.py b/pandas/core/base.py index bdbfb7b949986..6566ee38c1ade 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1,6 +1,7 @@ """ Base and utility classes for pandas objects. """ +import warnings from pandas import compat from pandas.compat import builtins import numpy as np @@ -290,7 +291,12 @@ class SelectionMixin(object): } @property - def name(self): + def _selection_name(self): + """ + return a name for myself; this would ideally be called + the 'name' property, but we cannot conflict with the + Series.name property which can be set + """ if self._selection is None: return None # 'result' else: @@ -405,6 +411,26 @@ def aggregate(self, func, *args, **kwargs): agg = aggregate + def _try_aggregate_string_function(self, arg, *args, **kwargs): + """ + if arg is a string, then try to operate on it: + - try to find a function on ourselves + - try to find a numpy function + - raise + + """ + assert isinstance(arg, compat.string_types) + + f = getattr(self, arg, None) + if f is not None: + return f(*args, **kwargs) + + f = getattr(np, arg, None) + if f is not None: + return f(self, *args, **kwargs) + + raise ValueError("{} is an unknown string function".format(arg)) + def _aggregate(self, arg, *args, **kwargs): """ provide an implementation for the aggregators @@ -424,18 +450,22 @@ def _aggregate(self, arg, *args, **kwargs): how can be a string describe the required post-processing, or None if not required """ - is_aggregator = lambda x: isinstance(x, (list, tuple, dict)) is_nested_renamer = False + _axis = kwargs.pop('_axis', None) + if _axis is None: + _axis = getattr(self, 'axis', 0) _level = kwargs.pop('_level', None) + if isinstance(arg, compat.string_types): - return getattr(self, arg)(*args, **kwargs), None + return self._try_aggregate_string_function(arg, *args, + **kwargs), None if isinstance(arg, dict): # aggregate based on the passed dict - if self.axis != 0: # pragma: no cover + if _axis != 0: # pragma: no cover raise ValueError('Can only pass dict with axis=0') obj = self._selected_obj @@ -454,7 +484,7 @@ def _aggregate(self, arg, *args, **kwargs): # the keys must be in the columns # for ndim=2, or renamers for ndim=1 - # ok + # ok for now, but deprecated # {'A': { 'ra': 'mean' }} # {'A': { 'ra': ['mean'] }} # {'ra': ['mean']} @@ -469,8 +499,28 @@ def _aggregate(self, arg, *args, **kwargs): 'for {0} with a nested ' 'dictionary'.format(k)) + # deprecation of nested renaming + # GH 15931 + warnings.warn( + ("using a dict with renaming " + "is deprecated and will be removed in a future " + "version"), + FutureWarning, stacklevel=4) + arg = new_arg + else: + # deprecation of renaming keys + # GH 15931 + keys = list(compat.iterkeys(arg)) + if (isinstance(obj, ABCDataFrame) and + len(obj.columns.intersection(keys)) != len(keys)): + warnings.warn( + ("using a dict with renaming " + "is deprecated and will be removed in a future " + "version"), + FutureWarning, stacklevel=4) + from pandas.tools.concat import concat def _agg_1dim(name, how, subset=None): @@ -534,7 +584,7 @@ def _agg(arg, func): agg_how: _agg_1dim(self._selection, agg_how)) # we are selecting the same set as we are aggregating - elif not len(sl - set(compat.iterkeys(arg))): + elif not len(sl - set(keys)): result = _agg(arg, _agg_1dim) @@ -555,32 +605,74 @@ def _agg(arg, func): result = _agg(arg, _agg_2dim) # combine results + + def is_any_series(): + # return a boolean if we have *any* nested series + return any([isinstance(r, ABCSeries) + for r in compat.itervalues(result)]) + + def is_any_frame(): + # return a boolean if we have *any* nested series + return any([isinstance(r, ABCDataFrame) + for r in compat.itervalues(result)]) + if isinstance(result, list): - result = concat(result, keys=keys, axis=1) - elif isinstance(list(compat.itervalues(result))[0], - ABCDataFrame): - result = concat([result[k] for k in keys], keys=keys, axis=1) - else: - from pandas import DataFrame + return concat(result, keys=keys, axis=1), True + + elif is_any_frame(): + # we have a dict of DataFrames + # return a MI DataFrame + + return concat([result[k] for k in keys], + keys=keys, axis=1), True + + elif isinstance(self, ABCSeries) and is_any_series(): + + # we have a dict of Series + # return a MI Series + try: + result = concat(result) + except TypeError: + # we want to give a nice error here if + # we have non-same sized objects, so + # we don't automatically broadcast + + raise ValueError("cannot perform both aggregation " + "and transformation operations " + "simultaneously") + + return result, True + + # fall thru + from pandas import DataFrame, Series + try: result = DataFrame(result) + except ValueError: + + # we have a dict of scalars + result = Series(result, + name=getattr(self, 'name', None)) return result, True - elif hasattr(arg, '__iter__'): - return self._aggregate_multiple_funcs(arg, _level=_level), None + elif is_list_like(arg) and arg not in compat.string_types: + # we require a list, but not an 'str' + return self._aggregate_multiple_funcs(arg, + _level=_level, + _axis=_axis), None else: result = None - cy_func = self._is_cython_func(arg) - if cy_func and not args and not kwargs: - return getattr(self, cy_func)(), None + f = self._is_cython_func(arg) + if f and not args and not kwargs: + return getattr(self, f)(), None # caller can react return result, True - def _aggregate_multiple_funcs(self, arg, _level): + def _aggregate_multiple_funcs(self, arg, _level, _axis): from pandas.tools.concat import concat - if self.axis != 0: + if _axis != 0: raise NotImplementedError("axis other than 0 is not supported") if self._selected_obj.ndim == 1: @@ -615,10 +707,30 @@ def _aggregate_multiple_funcs(self, arg, _level): keys.append(col) except (TypeError, DataError): pass + except ValueError: + # cannot aggregate + continue except SpecificationError: raise - return concat(results, keys=keys, axis=1) + # if we are empty + if not len(results): + raise ValueError("no results") + + try: + return concat(results, keys=keys, axis=1) + except TypeError: + + # we are concatting non-NDFrame objects, + # e.g. a list of scalars + + from pandas.types.cast import is_nested_object + from pandas import Series + result = Series(results, index=keys, name=self.name) + if is_nested_object(result): + raise ValueError("cannot combine transform and " + "aggregation operations") + return result def _shallow_copy(self, obj=None, obj_type=None, **kwargs): """ return a new object with the replacement attributes """ diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index add2987b8f452..5e55196803c22 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -722,7 +722,7 @@ def _python_apply_general(self, f): not_indexed_same=mutated or self.mutated) def _iterate_slices(self): - yield self.name, self._selected_obj + yield self._selection_name, self._selected_obj def transform(self, func, *args, **kwargs): raise AbstractMethodError(self) @@ -921,9 +921,9 @@ def reset_identity(values): result = concat(values, axis=self.axis) if (isinstance(result, Series) and - getattr(self, 'name', None) is not None): + getattr(self, '_selection_name', None) is not None): - result.name = self.name + result.name = self._selection_name return result @@ -1123,7 +1123,7 @@ def size(self): result = self.grouper.size() if isinstance(self.obj, Series): - result.name = getattr(self, 'name', None) + result.name = getattr(self.obj, 'name', None) return result @classmethod @@ -2736,7 +2736,7 @@ class SeriesGroupBy(GroupBy): exec(_def_str) @property - def name(self): + def _selection_name(self): """ since we are a series, we by definition only have a single name, but may be the result of a selection or @@ -2834,6 +2834,17 @@ def aggregate(self, func_or_funcs, *args, **kwargs): def _aggregate_multiple_funcs(self, arg, _level): if isinstance(arg, dict): + + # show the deprecation, but only if we + # have not shown a higher level one + # GH 15931 + if isinstance(self._selected_obj, Series) and _level <= 1: + warnings.warn( + ("using a dict on a Series for aggregation\n" + "is deprecated and will be removed in a future " + "version"), + FutureWarning, stacklevel=4) + columns = list(arg.keys()) arg = list(arg.items()) elif any(isinstance(x, (tuple, list)) for x in arg): @@ -2879,12 +2890,12 @@ def _aggregate_multiple_funcs(self, arg, _level): def _wrap_output(self, output, index, names=None): """ common agg/transform wrapping logic """ - output = output[self.name] + output = output[self._selection_name] if names is not None: return DataFrame(output, index=index, columns=names) else: - name = self.name + name = self._selection_name if name is None: name = self._selected_obj.name return Series(output, index=index, name=name) @@ -2902,7 +2913,7 @@ def _wrap_transformed_output(self, output, names=None): def _wrap_applied_output(self, keys, values, not_indexed_same=False): if len(keys) == 0: # GH #6265 - return Series([], name=self.name, index=keys) + return Series([], name=self._selection_name, index=keys) def _get_index(): if self.grouper.nkeys > 1: @@ -2915,7 +2926,7 @@ def _get_index(): # GH #823 index = _get_index() result = DataFrame(values, index=index).stack() - result.name = self.name + result.name = self._selection_name return result if isinstance(values[0], (Series, dict)): @@ -2927,7 +2938,8 @@ def _get_index(): not_indexed_same=not_indexed_same) else: # GH #6265 - return Series(values, index=_get_index(), name=self.name) + return Series(values, index=_get_index(), + name=self._selection_name) def _aggregate_named(self, func, *args, **kwargs): result = {} @@ -3098,7 +3110,7 @@ def nunique(self, dropna=True): return Series(res, index=ri, - name=self.name) + name=self._selection_name) @Appender(Series.describe.__doc__) def describe(self, **kwargs): @@ -3156,7 +3168,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, # multi-index components labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]] levels = [ping.group_index for ping in self.grouper.groupings] + [lev] - names = self.grouper.names + [self.name] + names = self.grouper.names + [self._selection_name] if dropna: mask = labels[-1] != -1 @@ -3191,7 +3203,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, if is_integer_dtype(out): out = _ensure_int64(out) - return Series(out, index=mi, name=self.name) + return Series(out, index=mi, name=self._selection_name) # for compat. with libgroupby.value_counts need to ensure every # bin is present at every index level, null filled with zeros @@ -3222,7 +3234,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, if is_integer_dtype(out): out = _ensure_int64(out) - return Series(out, index=mi, name=self.name) + return Series(out, index=mi, name=self._selection_name) def count(self): """ Compute count of group, excluding missing values """ @@ -3235,7 +3247,7 @@ def count(self): return Series(out, index=self.grouper.result_index, - name=self.name, + name=self._selection_name, dtype='int64') def _apply_to_column_groupbys(self, func): @@ -3391,7 +3403,7 @@ def aggregate(self, arg, *args, **kwargs): try: assert not args and not kwargs result = self._aggregate_multiple_funcs( - [arg], _level=_level) + [arg], _level=_level, _axis=self.axis) result.columns = Index( result.columns.levels[0], name=self._selected_obj.columns.name) @@ -3623,7 +3635,8 @@ def first_non_None_value(values): except (ValueError, AttributeError): # GH1738: values is list of arrays of unequal lengths fall # through to the outer else caluse - return Series(values, index=key_index, name=self.name) + return Series(values, index=key_index, + name=self._selection_name) # if we have date/time like in the original, then coerce dates # as we are stacking can easily have object dtypes here @@ -3647,8 +3660,9 @@ def first_non_None_value(values): # only coerce dates if we find at least 1 datetime coerce = True if any([isinstance(x, Timestamp) for x in values]) else False - # self.name not passed through to Series as the result - # should not take the name of original selection of columns + # self._selection_name not passed through to Series as the + # result should not take the name of original selection + # of columns return (Series(values, index=key_index) ._convert(datetime=True, coerce=coerce)) diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index 52b35048b6762..c2d6422c50d02 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -14,7 +14,7 @@ import pandas as pd from pandas import (date_range, MultiIndex, DataFrame, - Series, Index, bdate_range) + Series, Index, bdate_range, concat) from pandas.util.testing import assert_frame_equal, assert_series_equal from pandas.core.groupby import SpecificationError, DataError from pandas.compat import OrderedDict @@ -291,8 +291,10 @@ def test_aggregate_api_consistency(self): expected.columns = MultiIndex.from_product([['C', 'D'], ['mean', 'sum']]) - result = grouped[['D', 'C']].agg({'r': np.sum, - 'r2': np.mean}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = grouped[['D', 'C']].agg({'r': np.sum, + 'r2': np.mean}) expected = pd.concat([d_sum, c_sum, d_mean, @@ -302,6 +304,28 @@ def test_aggregate_api_consistency(self): ['D', 'C']]) assert_frame_equal(result, expected, check_like=True) + def test_agg_dict_renaming_deprecation(self): + # 15931 + df = pd.DataFrame({'A': [1, 1, 1, 2, 2], + 'B': range(5), + 'C': range(5)}) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False) as w: + df.groupby('A').agg({'B': {'foo': ['sum', 'max']}, + 'C': {'bar': ['count', 'min']}}) + assert "using a dict with renaming" in str(w[0].message) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + df.groupby('A')[['B', 'C']].agg({'ma': 'max'}) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False) as w: + df.groupby('A').B.agg({'foo': 'count'}) + assert "using a dict on a Series for aggregation" in str( + w[0].message) + def test_agg_compat(self): # GH 12334 @@ -320,14 +344,19 @@ def test_agg_compat(self): axis=1) expected.columns = MultiIndex.from_tuples([('C', 'sum'), ('C', 'std')]) - result = g['D'].agg({'C': ['sum', 'std']}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = g['D'].agg({'C': ['sum', 'std']}) assert_frame_equal(result, expected, check_like=True) expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1) expected.columns = ['C', 'D'] - result = g['D'].agg({'C': 'sum', 'D': 'std'}) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = g['D'].agg({'C': 'sum', 'D': 'std'}) assert_frame_equal(result, expected, check_like=True) def test_agg_nested_dicts(self): @@ -348,8 +377,10 @@ def f(): self.assertRaises(SpecificationError, f) - result = g.agg({'C': {'ra': ['mean', 'std']}, - 'D': {'rb': ['mean', 'std']}}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = g.agg({'C': {'ra': ['mean', 'std']}, + 'D': {'rb': ['mean', 'std']}}) expected = pd.concat([g['C'].mean(), g['C'].std(), g['D'].mean(), g['D'].std()], axis=1) expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), ( @@ -358,9 +389,14 @@ def f(): # same name as the original column # GH9052 - expected = g['D'].agg({'result1': np.sum, 'result2': np.mean}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + expected = g['D'].agg({'result1': np.sum, 'result2': np.mean}) expected = expected.rename(columns={'result1': 'D'}) - result = g['D'].agg({'D': np.sum, 'result2': np.mean}) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = g['D'].agg({'D': np.sum, 'result2': np.mean}) assert_frame_equal(result, expected, check_like=True) def test_agg_python_multiindex(self): @@ -627,7 +663,6 @@ def test_agg_multiple_functions_too_many_lambdas(self): self.assertRaises(SpecificationError, grouped.agg, funcs) def test_more_flexible_frame_multi_function(self): - from pandas import concat grouped = self.df.groupby('A') @@ -655,9 +690,12 @@ def foo(x): def bar(x): return np.std(x, ddof=1) - d = OrderedDict([['C', np.mean], ['D', OrderedDict( - [['foo', np.mean], ['bar', np.std]])]]) - result = grouped.aggregate(d) + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + d = OrderedDict([['C', np.mean], ['D', OrderedDict( + [['foo', np.mean], ['bar', np.std]])]]) + result = grouped.aggregate(d) d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]]) expected = grouped.aggregate(d) @@ -671,16 +709,29 @@ def test_multi_function_flexible_mix(self): d = OrderedDict([['C', OrderedDict([['foo', 'mean'], [ 'bar', 'std' ]])], ['D', 'sum']]) - result = grouped.aggregate(d) + + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = grouped.aggregate(d) + d2 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [ 'bar', 'std' ]])], ['D', ['sum']]]) - result2 = grouped.aggregate(d2) + + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result2 = grouped.aggregate(d2) d3 = OrderedDict([['C', OrderedDict([['foo', 'mean'], [ 'bar', 'std' ]])], ['D', {'sum': 'sum'}]]) - expected = grouped.aggregate(d3) + + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + expected = grouped.aggregate(d3) assert_frame_equal(result, expected) assert_frame_equal(result2, expected) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 68955c954206e..8f3d8e2307f45 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -59,7 +59,10 @@ def checkit(dtype): # complex agg agged = grouped.aggregate([np.mean, np.std]) - agged = grouped.aggregate({'one': np.mean, 'two': np.std}) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + agged = grouped.aggregate({'one': np.mean, 'two': np.std}) group_constants = {0: 10, 1: 20, 2: 30} agged = grouped.agg(lambda x: group_constants[x.name] + x.mean()) @@ -1262,7 +1265,9 @@ def test_frame_set_name_single(self): result = grouped['C'].agg([np.mean, np.std]) self.assertEqual(result.index.name, 'A') - result = grouped['C'].agg({'foo': np.mean, 'bar': np.std}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = grouped['C'].agg({'foo': np.mean, 'bar': np.std}) self.assertEqual(result.index.name, 'A') def test_multi_iter(self): @@ -1438,7 +1443,10 @@ def test_groupby_as_index_agg(self): grouped = self.df.groupby('A', as_index=True) expected3 = grouped['C'].sum() expected3 = DataFrame(expected3).rename(columns={'C': 'Q'}) - result3 = grouped['C'].agg({'Q': np.sum}) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result3 = grouped['C'].agg({'Q': np.sum}) assert_frame_equal(result3, expected3) # multi-key diff --git a/pandas/tests/groupby/test_whitelist.py b/pandas/tests/groupby/test_whitelist.py index d566f34b7eae8..5a4f282789eeb 100644 --- a/pandas/tests/groupby/test_whitelist.py +++ b/pandas/tests/groupby/test_whitelist.py @@ -233,7 +233,7 @@ def test_tab_completion(mframe): expected = set( ['A', 'B', 'C', 'agg', 'aggregate', 'apply', 'boxplot', 'filter', 'first', 'get_group', 'groups', 'hist', 'indices', 'last', 'max', - 'mean', 'median', 'min', 'name', 'ngroups', 'nth', 'ohlc', 'plot', + 'mean', 'median', 'min', 'ngroups', 'nth', 'ohlc', 'plot', 'prod', 'size', 'std', 'sum', 'transform', 'var', 'sem', 'count', 'nunique', 'head', 'describe', 'cummax', 'quantile', 'rank', 'cumprod', 'tail', 'resample', 'cummin', 'fillna', diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 5fc31e9321f31..9cd3b8b839a9b 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -134,16 +134,18 @@ def test_agg(self): expected.columns = ['mean', 'sum'] tm.assert_frame_equal(result, expected) - result = r.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}}) + with catch_warnings(record=True): + result = r.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}}) expected = pd.concat([a_mean, a_sum], axis=1) expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), ('A', 'sum')]) tm.assert_frame_equal(result, expected, check_like=True) - result = r.aggregate({'A': {'mean': 'mean', - 'sum': 'sum'}, - 'B': {'mean2': 'mean', - 'sum2': 'sum'}}) + with catch_warnings(record=True): + result = r.aggregate({'A': {'mean': 'mean', + 'sum': 'sum'}, + 'B': {'mean2': 'mean', + 'sum2': 'sum'}}) expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1) exp_cols = [('A', 'mean'), ('A', 'sum'), ('B', 'mean2'), ('B', 'sum2')] expected.columns = pd.MultiIndex.from_tuples(exp_cols) @@ -195,12 +197,14 @@ def f(): r['B'].std()], axis=1) expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), ( 'ra', 'std'), ('rb', 'mean'), ('rb', 'std')]) - result = r[['A', 'B']].agg({'A': {'ra': ['mean', 'std']}, - 'B': {'rb': ['mean', 'std']}}) + with catch_warnings(record=True): + result = r[['A', 'B']].agg({'A': {'ra': ['mean', 'std']}, + 'B': {'rb': ['mean', 'std']}}) tm.assert_frame_equal(result, expected, check_like=True) - result = r.agg({'A': {'ra': ['mean', 'std']}, - 'B': {'rb': ['mean', 'std']}}) + with catch_warnings(record=True): + result = r.agg({'A': {'ra': ['mean', 'std']}, + 'B': {'rb': ['mean', 'std']}}) expected.columns = pd.MultiIndex.from_tuples([('A', 'ra', 'mean'), ( 'A', 'ra', 'std'), ('B', 'rb', 'mean'), ('B', 'rb', 'std')]) tm.assert_frame_equal(result, expected, check_like=True) diff --git a/pandas/tests/tseries/test_resample.py b/pandas/tests/tseries/test_resample.py index 9c66cae292c4e..98664c1ec118c 100755 --- a/pandas/tests/tseries/test_resample.py +++ b/pandas/tests/tseries/test_resample.py @@ -394,8 +394,10 @@ def test_agg_consistency(self): r = df.resample('3T') - expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'}) - result = r.agg({'r1': 'mean', 'r2': 'sum'}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'}) + result = r.agg({'r1': 'mean', 'r2': 'sum'}) assert_frame_equal(result, expected) # TODO: once GH 14008 is fixed, move these tests into @@ -459,7 +461,9 @@ def test_agg(self): expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), ('A', 'sum')]) for t in cases: - result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}}) assert_frame_equal(result, expected, check_like=True) expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1) @@ -468,8 +472,10 @@ def test_agg(self): ('B', 'mean2'), ('B', 'sum2')]) for t in cases: - result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}, - 'B': {'mean2': 'mean', 'sum2': 'sum'}}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}, + 'B': {'mean2': 'mean', 'sum2': 'sum'}}) assert_frame_equal(result, expected, check_like=True) expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1) @@ -529,9 +535,12 @@ def test_agg_misc(self): ('result1', 'B'), ('result2', 'A'), ('result2', 'B')]) + for t in cases: - result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum), - ('result2', np.mean)])) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum), + ('result2', np.mean)])) assert_frame_equal(result, expected, check_like=True) # agg with different hows @@ -557,7 +566,9 @@ def test_agg_misc(self): # series like aggs for t in cases: - result = t['A'].agg({'A': ['sum', 'std']}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t['A'].agg({'A': ['sum', 'std']}) expected = pd.concat([t['A'].sum(), t['A'].std()], axis=1) @@ -572,15 +583,20 @@ def test_agg_misc(self): ('A', 'std'), ('B', 'mean'), ('B', 'std')]) - result = t['A'].agg({'A': ['sum', 'std'], 'B': ['mean', 'std']}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t['A'].agg({'A': ['sum', 'std'], + 'B': ['mean', 'std']}) assert_frame_equal(result, expected, check_like=True) # errors # invalid names in the agg specification for t in cases: def f(): - t[['A']].agg({'A': ['sum', 'std'], - 'B': ['mean', 'std']}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + t[['A']].agg({'A': ['sum', 'std'], + 'B': ['mean', 'std']}) self.assertRaises(SpecificationError, f) @@ -617,12 +633,16 @@ def f(): expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), ( 'ra', 'std'), ('rb', 'mean'), ('rb', 'std')]) - result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']}, - 'B': {'rb': ['mean', 'std']}}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']}, + 'B': {'rb': ['mean', 'std']}}) assert_frame_equal(result, expected, check_like=True) - result = t.agg({'A': {'ra': ['mean', 'std']}, - 'B': {'rb': ['mean', 'std']}}) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + result = t.agg({'A': {'ra': ['mean', 'std']}, + 'B': {'rb': ['mean', 'std']}}) assert_frame_equal(result, expected, check_like=True) def test_selection_api_validation(self): @@ -752,16 +772,7 @@ def test_resample_empty_series(self): expected.index = s.index._shallow_copy(freq=freq) assert_index_equal(result.index, expected.index) self.assertEqual(result.index.freq, expected.index.freq) - - if (method == 'size' and - isinstance(result.index, PeriodIndex) and - freq in ['M', 'D']): - # GH12871 - TODO: name should propagate, but currently - # doesn't on lower / same frequency with PeriodIndex - assert_series_equal(result, expected, check_dtype=False) - - else: - assert_series_equal(result, expected, check_dtype=False) + assert_series_equal(result, expected, check_dtype=False) def test_resample_empty_dataframe(self): # GH13212 @@ -1846,10 +1857,12 @@ def test_how_lambda_functions(self): tm.assert_series_equal(result['foo'], foo_exp) tm.assert_series_equal(result['bar'], bar_exp) + # this is a MI Series, so comparing the names of the results + # doesn't make sense result = ts.resample('M').aggregate({'foo': lambda x: x.mean(), 'bar': lambda x: x.std(ddof=1)}) - tm.assert_series_equal(result['foo'], foo_exp) - tm.assert_series_equal(result['bar'], bar_exp) + tm.assert_series_equal(result['foo'], foo_exp, check_names=False) + tm.assert_series_equal(result['bar'], bar_exp, check_names=False) def test_resample_unequal_times(self): # #1772 diff --git a/pandas/types/cast.py b/pandas/types/cast.py index 580ce12de3333..85053dba0c18b 100644 --- a/pandas/types/cast.py +++ b/pandas/types/cast.py @@ -45,6 +45,23 @@ def maybe_convert_platform(values): return values +def is_nested_object(obj): + """ + return a boolean if we have a nested object, e.g. a Series with 1 or + more Series elements + + This may not be necessarily be performant. + + """ + + if isinstance(obj, ABCSeries) and is_object_dtype(obj): + + if any(isinstance(v, ABCSeries) for v in obj.values): + return True + + return False + + def maybe_downcast_to_dtype(result, dtype): """ try to cast to the specified dtype (e.g. convert back to bool/int or could be an astype of float64->float32 From 73222392f389f918272a9d96c5f623f0b13966eb Mon Sep 17 00:00:00 2001 From: carlosdanielcsantos Date: Thu, 13 Apr 2017 07:28:30 -0400 Subject: [PATCH 15/56] ENH: Rolling window endpoints inclusion closes #13965 Author: carlosdanielcsantos Author: carlosdanielcsantos Author: carlosdanielcsantos Closes #15795 from carlosdanielcsantos/rwindow-endpoints-inclusion and squashes the following commits: aad97dc [carlosdanielcsantos] Updating docs 568c12f [carlosdanielcsantos] Innocuous change to rerun tests 037b84e [carlosdanielcsantos] Fixing style c18a31b [carlosdanielcsantos] Fixing test of assertion of closed parameter in fixed windows Style corrections 90dfb0c [carlosdanielcsantos] Correcting bug in window validation 8bd336a [carlosdanielcsantos] Almost there 306b9f7 [carlosdanielcsantos] Commiting progress on default=None. Still not tested Adding computation.rst section (still not written) ec4bbc7 [carlosdanielcsantos] Changing l_closed and r_closed variable names 0e8e65c [carlosdanielcsantos] Adding doc-strings and PEP8 corrections 5eaf3b4 [carlosdanielcsantos] str closed -> object closed Adding test of assert for closed parameter Adding assert for closed parameter in get_window_indexer 2cf6804 [carlosdanielcsantos] Time-based windows working da034bf [carlosdanielcsantos] Commiting progress 34f1309 [carlosdanielcsantos] Adding window slicing endpoint inclusion selection to VariableWindowIndexer --- doc/source/computation.rst | 42 ++++++++++ doc/source/whatsnew/v0.20.0.txt | 1 + pandas/core/generic.py | 4 +- pandas/core/window.py | 46 +++++++---- pandas/core/window.pyx | 135 +++++++++++++++++++++++--------- pandas/tests/test_window.py | 45 +++++++++++ 6 files changed, 222 insertions(+), 51 deletions(-) diff --git a/doc/source/computation.rst b/doc/source/computation.rst index f46a00826a8d9..f6c912bf59b34 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -459,6 +459,48 @@ default of the index) in a DataFrame. dft dft.rolling('2s', on='foo').sum() +.. _stats.rolling_window.endpoints: + +Rolling Window Endpoints +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 0.20.0 + +The inclusion of the interval endpoints in rolling window calculations can be specified with the ``closed`` +parameter: + +.. csv-table:: + :header: "``closed``", "Description", "Default for" + :widths: 20, 30, 30 + + ``right``, close right endpoint, time-based windows + ``left``, close left endpoint, + ``both``, close both endpoints, fixed windows + ``neither``, open endpoints, + +For example, having the right endpoint open is useful in many problems that require that there is no contamination +from present information back to past information. This allows the rolling window to compute statistics +"up to that point in time", but not including that point in time. + +.. ipython:: python + + df = pd.DataFrame({'x': 1}, + index = [pd.Timestamp('20130101 09:00:01'), + pd.Timestamp('20130101 09:00:02'), + pd.Timestamp('20130101 09:00:03'), + pd.Timestamp('20130101 09:00:04'), + pd.Timestamp('20130101 09:00:06')]) + + df["right"] = df.rolling('2s', closed='right').x.sum() # default + df["both"] = df.rolling('2s', closed='both').x.sum() + df["left"] = df.rolling('2s', closed='left').x.sum() + df["neither"] = df.rolling('2s', closed='neither').x.sum() + + df + +Currently, this feature is only implemented for time-based windows. +For fixed windows, the closed parameter cannot be set and the rolling window will always have both endpoints closed. + .. _stats.moments.ts-versus-resampling: Time-aware Rolling vs. Resampling diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index c243e4ef81b38..07f393a814f8b 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -319,6 +319,7 @@ To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you Other Enhancements ^^^^^^^^^^^^^^^^^^ +- ``DataFrame.rolling()`` now accepts the parameter ``closed='right'|'left'|'both'|'neither'`` to choose the rolling window endpoint closedness. See the :ref:`documentation ` (:issue:`13965`) - Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here `. - ``Series.str.replace()`` now accepts a callable, as replacement, which is passed to ``re.sub`` (:issue:`15055`) - ``Series.str.replace()`` now accepts a compiled regular expression as a pattern (:issue:`15446`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ad56ea44a0dc6..86978a9739ca4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5962,12 +5962,12 @@ def _add_series_or_dataframe_operations(cls): @Appender(rwindow.rolling.__doc__) def rolling(self, window, min_periods=None, freq=None, center=False, - win_type=None, on=None, axis=0): + win_type=None, on=None, axis=0, closed=None): axis = self._get_axis_number(axis) return rwindow.rolling(self, window=window, min_periods=min_periods, freq=freq, center=center, win_type=win_type, - on=on, axis=axis) + on=on, axis=axis, closed=closed) cls.rolling = rolling diff --git a/pandas/core/window.py b/pandas/core/window.py index 89d2f5b24d77e..5b84b075ce81a 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -56,11 +56,12 @@ class _Window(PandasObject, SelectionMixin): _attributes = ['window', 'min_periods', 'freq', 'center', 'win_type', - 'axis', 'on'] + 'axis', 'on', 'closed'] exclusions = set() def __init__(self, obj, window=None, min_periods=None, freq=None, - center=False, win_type=None, axis=0, on=None, **kwargs): + center=False, win_type=None, axis=0, on=None, closed=None, + **kwargs): if freq is not None: warnings.warn("The freq kw is deprecated and will be removed in a " @@ -71,6 +72,7 @@ def __init__(self, obj, window=None, min_periods=None, freq=None, self.blocks = [] self.obj = obj self.on = on + self.closed = closed self.window = window self.min_periods = min_periods self.freq = freq @@ -101,6 +103,10 @@ def validate(self): if self.min_periods is not None and not \ is_integer(self.min_periods): raise ValueError("min_periods must be an integer") + if self.closed is not None and self.closed not in \ + ['right', 'both', 'left', 'neither']: + raise ValueError("closed must be 'right', 'left', 'both' or " + "'neither'") def _convert_freq(self, how=None): """ resample according to the how, return a new object """ @@ -374,8 +380,14 @@ class Window(_Window): on : string, optional For a DataFrame, column on which to calculate the rolling window, rather than the index + closed : string, default None + Make the interval closed on the 'right', 'left', 'both' or + 'neither' endpoints. + For offset-based windows, it defaults to 'right'. + For fixed windows, defaults to 'both'. Remaining cases not implemented + for fixed windows. - .. versionadded:: 0.19.0 + .. versionadded:: 0.20.0 axis : int or string, default 0 @@ -717,12 +729,12 @@ def _apply(self, func, name=None, window=None, center=None, raise ValueError("we do not support this function " "in _window.{0}".format(func)) - def func(arg, window, min_periods=None): + def func(arg, window, min_periods=None, closed=None): minp = check_minp(min_periods, window) # ensure we are only rolling on floats arg = _ensure_float64(arg) return cfunc(arg, - window, minp, indexi, **kwargs) + window, minp, indexi, closed, **kwargs) # calculation function if center: @@ -731,11 +743,13 @@ def func(arg, window, min_periods=None): def calc(x): return func(np.concatenate((x, additional_nans)), - window, min_periods=self.min_periods) + window, min_periods=self.min_periods, + closed=self.closed) else: def calc(x): - return func(x, window, min_periods=self.min_periods) + return func(x, window, min_periods=self.min_periods, + closed=self.closed) with np.errstate(all='ignore'): if values.ndim > 1: @@ -768,7 +782,8 @@ def count(self): for b in blocks: result = b.notnull().astype(int) result = self._constructor(result, window=window, min_periods=0, - center=self.center).sum() + center=self.center, + closed=self.closed).sum() results.append(result) return self._wrap_results(results, blocks, obj) @@ -789,11 +804,10 @@ def apply(self, func, args=(), kwargs={}): offset = _offset(window, self.center) index, indexi = self._get_index() - def f(arg, window, min_periods): + def f(arg, window, min_periods, closed): minp = _use_window(min_periods, window) - return _window.roll_generic(arg, window, minp, indexi, - offset, func, args, - kwargs) + return _window.roll_generic(arg, window, minp, indexi, closed, + offset, func, args, kwargs) return self._apply(f, func, args=args, kwargs=kwargs, center=False) @@ -864,7 +878,7 @@ def std(self, ddof=1, *args, **kwargs): def f(arg, *args, **kwargs): minp = _require_min_periods(1)(self.min_periods, window) return _zsqrt(_window.roll_var(arg, window, minp, indexi, - ddof)) + self.closed, ddof)) return self._apply(f, 'std', check_minp=_require_min_periods(1), ddof=ddof, **kwargs) @@ -911,7 +925,7 @@ def quantile(self, quantile, **kwargs): def f(arg, *args, **kwargs): minp = _use_window(self.min_periods, window) return _window.roll_quantile(arg, window, minp, indexi, - quantile) + self.closed, quantile) return self._apply(f, 'quantile', quantile=quantile, **kwargs) @@ -1044,6 +1058,10 @@ def validate(self): elif self.window < 0: raise ValueError("window must be non-negative") + if not self.is_datetimelike and self.closed is not None: + raise ValueError("closed only implemented for datetimelike " + "and offset based windows") + def _validate_monotonic(self): """ validate on is monotonic """ if not self._on.is_monotonic: diff --git a/pandas/core/window.pyx b/pandas/core/window.pyx index a06e616002ee2..3bb8abe26c781 100644 --- a/pandas/core/window.pyx +++ b/pandas/core/window.pyx @@ -158,9 +158,14 @@ cdef class MockFixedWindowIndexer(WindowIndexer): index of the input floor: optional unit for flooring + left_closed: bint + left endpoint closedness + right_closed: bint + right endpoint closedness """ def __init__(self, ndarray input, int64_t win, int64_t minp, + bint left_closed, bint right_closed, object index=None, object floor=None): assert index is None @@ -191,9 +196,14 @@ cdef class FixedWindowIndexer(WindowIndexer): index of the input floor: optional unit for flooring the unit + left_closed: bint + left endpoint closedness + right_closed: bint + right endpoint closedness """ def __init__(self, ndarray input, int64_t win, int64_t minp, + bint left_closed, bint right_closed, object index=None, object floor=None): cdef ndarray start_s, start_e, end_s, end_e @@ -229,10 +239,16 @@ cdef class VariableWindowIndexer(WindowIndexer): min number of obs in a window to consider non-NaN index: ndarray index of the input + left_closed: bint + left endpoint closedness + True if the left endpoint is closed, False if open + right_closed: bint + right endpoint closedness + True if the right endpoint is closed, False if open """ def __init__(self, ndarray input, int64_t win, int64_t minp, - ndarray index): + bint left_closed, bint right_closed, ndarray index): self.is_variable = 1 self.N = len(index) @@ -244,12 +260,13 @@ cdef class VariableWindowIndexer(WindowIndexer): self.end = np.empty(self.N, dtype='int64') self.end.fill(-1) - self.build(index, win) + self.build(index, win, left_closed, right_closed) # max window size self.win = (self.end - self.start).max() - def build(self, ndarray[int64_t] index, int64_t win): + def build(self, ndarray[int64_t] index, int64_t win, bint left_closed, + bint right_closed): cdef: ndarray[int64_t] start, end @@ -261,7 +278,13 @@ cdef class VariableWindowIndexer(WindowIndexer): N = self.N start[0] = 0 - end[0] = 1 + + # right endpoint is closed + if right_closed: + end[0] = 1 + # right endpoint is open + else: + end[0] = 0 with nogil: @@ -271,6 +294,10 @@ cdef class VariableWindowIndexer(WindowIndexer): end_bound = index[i] start_bound = index[i] - win + # left endpoint is closed + if left_closed: + start_bound -= 1 + # advance the start bound until we are # within the constraint start[i] = i @@ -286,9 +313,13 @@ cdef class VariableWindowIndexer(WindowIndexer): else: end[i] = end[i - 1] + # right endpoint is open + if not right_closed: + end[i] -= 1 + -def get_window_indexer(input, win, minp, index, floor=None, - use_mock=True): +def get_window_indexer(input, win, minp, index, closed, + floor=None, use_mock=True): """ return the correct window indexer for the computation @@ -299,6 +330,10 @@ def get_window_indexer(input, win, minp, index, floor=None, minp: integer, minimum periods index: 1d ndarray, optional index to the input array + closed: string, default None + {'right', 'left', 'both', 'neither'} + window endpoint closedness. Defaults to 'right' in + VariableWindowIndexer and to 'both' in FixedWindowIndexer floor: optional unit for flooring the unit use_mock: boolean, default True @@ -307,18 +342,38 @@ def get_window_indexer(input, win, minp, index, floor=None, compat Indexer that allows us to use a standard code path with all of the indexers. + Returns ------- tuple of 1d int64 ndarrays of the offsets & data about the window """ + cdef: + bint left_closed = False + bint right_closed = False + + assert closed is None or closed in ['right', 'left', 'both', 'neither'] + + # if windows is variable, default is 'right', otherwise default is 'both' + if closed is None: + closed = 'right' if index is not None else 'both' + + if closed in ['right', 'both']: + right_closed = True + + if closed in ['left', 'both']: + left_closed = True + if index is not None: - indexer = VariableWindowIndexer(input, win, minp, index) + indexer = VariableWindowIndexer(input, win, minp, left_closed, + right_closed, index) elif use_mock: - indexer = MockFixedWindowIndexer(input, win, minp, index, floor) + indexer = MockFixedWindowIndexer(input, win, minp, left_closed, + right_closed, index, floor) else: - indexer = FixedWindowIndexer(input, win, minp, index, floor) + indexer = FixedWindowIndexer(input, win, minp, left_closed, + right_closed, index, floor) return indexer.get_data() # ---------------------------------------------------------------------- @@ -327,7 +382,7 @@ def get_window_indexer(input, win, minp, index, floor=None, def roll_count(ndarray[double_t] input, int64_t win, int64_t minp, - object index): + object index, object closed): cdef: double val, count_x = 0.0 int64_t s, e, nobs, N @@ -336,7 +391,7 @@ def roll_count(ndarray[double_t] input, int64_t win, int64_t minp, ndarray[double_t] output start, end, N, win, minp, _ = get_window_indexer(input, win, - minp, index) + minp, index, closed) output = np.empty(N, dtype=float) with nogil: @@ -408,7 +463,7 @@ cdef inline void remove_sum(double val, int64_t *nobs, double *sum_x) nogil: def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, - object index): + object index, object closed): cdef: double val, prev_x, sum_x = 0 int64_t s, e @@ -418,7 +473,8 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, ndarray[double_t] output start, end, N, win, minp, is_variable = get_window_indexer(input, win, - minp, index) + minp, index, + closed) output = np.empty(N, dtype=float) # for performance we are going to iterate @@ -523,7 +579,7 @@ cdef inline void remove_mean(double val, Py_ssize_t *nobs, double *sum_x, def roll_mean(ndarray[double_t] input, int64_t win, int64_t minp, - object index): + object index, object closed): cdef: double val, prev_x, result, sum_x = 0 int64_t s, e @@ -533,7 +589,8 @@ def roll_mean(ndarray[double_t] input, int64_t win, int64_t minp, ndarray[double_t] output start, end, N, win, minp, is_variable = get_window_indexer(input, win, - minp, index) + minp, index, + closed) output = np.empty(N, dtype=float) # for performance we are going to iterate @@ -647,7 +704,7 @@ cdef inline void remove_var(double val, double *nobs, double *mean_x, def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, - object index, int ddof=1): + object index, object closed, int ddof=1): """ Numerically stable implementation using Welford's method. """ @@ -660,7 +717,8 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, ndarray[double_t] output start, end, N, win, minp, is_variable = get_window_indexer(input, win, - minp, index) + minp, index, + closed) output = np.empty(N, dtype=float) # Check for windows larger than array, addresses #7297 @@ -789,7 +847,7 @@ cdef inline void remove_skew(double val, int64_t *nobs, double *x, double *xx, def roll_skew(ndarray[double_t] input, int64_t win, int64_t minp, - object index): + object index, object closed): cdef: double val, prev double x = 0, xx = 0, xxx = 0 @@ -800,7 +858,8 @@ def roll_skew(ndarray[double_t] input, int64_t win, int64_t minp, ndarray[double_t] output start, end, N, win, minp, is_variable = get_window_indexer(input, win, - minp, index) + minp, index, + closed) output = np.empty(N, dtype=float) if is_variable: @@ -916,7 +975,7 @@ cdef inline void remove_kurt(double val, int64_t *nobs, double *x, double *xx, def roll_kurt(ndarray[double_t] input, int64_t win, int64_t minp, - object index): + object index, object closed): cdef: double val, prev double x = 0, xx = 0, xxx = 0, xxxx = 0 @@ -927,7 +986,8 @@ def roll_kurt(ndarray[double_t] input, int64_t win, int64_t minp, ndarray[double_t] output start, end, N, win, minp, is_variable = get_window_indexer(input, win, - minp, index) + minp, index, + closed) output = np.empty(N, dtype=float) if is_variable: @@ -985,11 +1045,11 @@ def roll_kurt(ndarray[double_t] input, int64_t win, int64_t minp, def roll_median_c(ndarray[float64_t] input, int64_t win, int64_t minp, - object index): + object index, object closed): cdef: double val, res, prev - bint err=0, is_variable - int ret=0 + bint err = 0, is_variable + int ret = 0 skiplist_t *sl Py_ssize_t i, j int64_t nobs = 0, N, s, e @@ -1001,7 +1061,7 @@ def roll_median_c(ndarray[float64_t] input, int64_t win, int64_t minp, # actual skiplist ops outweigh any window computation costs start, end, N, win, minp, is_variable = get_window_indexer( input, win, - minp, index, + minp, index, closed, use_mock=False) output = np.empty(N, dtype=float) @@ -1111,7 +1171,7 @@ cdef inline numeric calc_mm(int64_t minp, Py_ssize_t nobs, def roll_max(ndarray[numeric] input, int64_t win, int64_t minp, - object index): + object index, object closed): """ Moving max of 1d array of any numeric type along axis=0 ignoring NaNs. @@ -1123,12 +1183,15 @@ def roll_max(ndarray[numeric] input, int64_t win, int64_t minp, is below this, output a NaN index: ndarray, optional index for window computation + closed: 'right', 'left', 'both', 'neither' + make the interval closed on the right, left, + both or neither endpoints """ - return _roll_min_max(input, win, minp, index, is_max=1) + return _roll_min_max(input, win, minp, index, closed=closed, is_max=1) def roll_min(ndarray[numeric] input, int64_t win, int64_t minp, - object index): + object index, object closed): """ Moving max of 1d array of any numeric type along axis=0 ignoring NaNs. @@ -1141,11 +1204,11 @@ def roll_min(ndarray[numeric] input, int64_t win, int64_t minp, index: ndarray, optional index for window computation """ - return _roll_min_max(input, win, minp, index, is_max=0) + return _roll_min_max(input, win, minp, index, is_max=0, closed=closed) cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp, - object index, bint is_max): + object index, object closed, bint is_max): """ Moving min/max of 1d array of any numeric type along axis=0 ignoring NaNs. @@ -1170,7 +1233,7 @@ cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp, starti, endi, N, win, minp, is_variable = get_window_indexer( input, win, - minp, index) + minp, index, closed) output = np.empty(N, dtype=input.dtype) @@ -1272,7 +1335,8 @@ cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp, def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, - int64_t minp, object index, double quantile): + int64_t minp, object index, object closed, + double quantile): """ O(N log(window)) implementation using skip list """ @@ -1292,7 +1356,7 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, # actual skiplist ops outweigh any window computation costs start, end, N, win, minp, is_variable = get_window_indexer( input, win, - minp, index, + minp, index, closed, use_mock=False) output = np.empty(N, dtype=float) skiplist = IndexableSkiplist(win) @@ -1335,7 +1399,7 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, def roll_generic(ndarray[float64_t, cast=True] input, - int64_t win, int64_t minp, object index, + int64_t win, int64_t minp, object index, object closed, int offset, object func, object args, object kwargs): cdef: @@ -1355,12 +1419,13 @@ def roll_generic(ndarray[float64_t, cast=True] input, start, end, N, win, minp, is_variable = get_window_indexer(input, win, minp, index, + closed, floor=0) output = np.empty(N, dtype=float) counts = roll_sum(np.concatenate([np.isfinite(input).astype(float), np.array([0.] * offset)]), - win, minp, index)[offset:] + win, minp, index, closed)[offset:] if is_variable: diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 9cd3b8b839a9b..29b91fb115a33 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -435,6 +435,12 @@ def test_numpy_compat(self): tm.assertRaisesRegexp(UnsupportedFunctionCall, msg, getattr(r, func), dtype=np.float64) + def test_closed(self): + df = DataFrame({'A': [0, 1, 2, 3, 4]}) + # closed only allowed for datetimelike + with pytest.raises(ValueError): + df.rolling(window=3, closed='neither') + class TestExpanding(Base): @@ -3389,6 +3395,45 @@ def test_min_periods(self): result = df.rolling('2s', min_periods=1).sum() tm.assert_frame_equal(result, expected) + def test_closed(self): + + # xref GH13965 + + df = DataFrame({'A': [1] * 5}, + index=[pd.Timestamp('20130101 09:00:01'), + pd.Timestamp('20130101 09:00:02'), + pd.Timestamp('20130101 09:00:03'), + pd.Timestamp('20130101 09:00:04'), + pd.Timestamp('20130101 09:00:06')]) + + # closed must be 'right', 'left', 'both', 'neither' + with pytest.raises(ValueError): + self.regular.rolling(window='2s', closed="blabla") + + expected = df.copy() + expected["A"] = [1.0, 2, 2, 2, 1] + result = df.rolling('2s', closed='right').sum() + tm.assert_frame_equal(result, expected) + + # default should be 'right' + result = df.rolling('2s').sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [1.0, 2, 3, 3, 2] + result = df.rolling('2s', closed='both').sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [np.nan, 1.0, 2, 2, 1] + result = df.rolling('2s', closed='left').sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [np.nan, 1.0, 1, 1, np.nan] + result = df.rolling('2s', closed='neither').sum() + tm.assert_frame_equal(result, expected) + def test_ragged_sum(self): df = self.ragged From f2ed595d0b77c2e4c68edf1eae9ddca8fba42651 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Thu, 13 Apr 2017 11:41:03 +0000 Subject: [PATCH 16/56] TST: use checkstack level as per comments in groupby.agg with dicts depr testing (#15992) --- pandas/core/groupby.py | 2 +- pandas/tests/groupby/test_aggregate.py | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 5e55196803c22..5591ce4b0d4aa 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2843,7 +2843,7 @@ def _aggregate_multiple_funcs(self, arg, _level): ("using a dict on a Series for aggregation\n" "is deprecated and will be removed in a future " "version"), - FutureWarning, stacklevel=4) + FutureWarning, stacklevel=3) columns = list(arg.keys()) arg = list(arg.items()) diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index c2d6422c50d02..22d1de99c48be 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -310,18 +310,15 @@ def test_agg_dict_renaming_deprecation(self): 'B': range(5), 'C': range(5)}) - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False) as w: + with tm.assert_produces_warning(FutureWarning) as w: df.groupby('A').agg({'B': {'foo': ['sum', 'max']}, 'C': {'bar': ['count', 'min']}}) assert "using a dict with renaming" in str(w[0].message) - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): + with tm.assert_produces_warning(FutureWarning): df.groupby('A')[['B', 'C']].agg({'ma': 'max'}) - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False) as w: + with tm.assert_produces_warning(FutureWarning) as w: df.groupby('A').B.agg({'foo': 'count'}) assert "using a dict on a Series for aggregation" in str( w[0].message) From 7ee73ffcfd1cdf896a53589eebf74557210ab26c Mon Sep 17 00:00:00 2001 From: gfyoung Date: Thu, 13 Apr 2017 18:11:33 -0400 Subject: [PATCH 17/56] BUG: Don't overflow PeriodIndex in to_csv (#15984) * BUG: Don't overflow PeriodIndex in to_csv Closes gh-15982. * TST: Test to_native_types for Period/DatetimeIndex --- doc/source/whatsnew/v0.20.0.txt | 1 + pandas/formats/format.py | 5 +- pandas/indexes/base.py | 21 +++++++- pandas/tests/frame/test_to_csv.py | 28 +++++++++++ .../tests/indexes/datetimes/test_formats.py | 47 ++++++++++++++++++ pandas/tests/indexes/period/test_formats.py | 48 +++++++++++++++++++ 6 files changed, 145 insertions(+), 5 deletions(-) create mode 100644 pandas/tests/indexes/datetimes/test_formats.py create mode 100644 pandas/tests/indexes/period/test_formats.py diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 07f393a814f8b..a105a6801fb61 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1344,6 +1344,7 @@ I/O - Bug in ``pd.read_csv()`` in which invalid values for ``nrows`` and ``chunksize`` were allowed (:issue:`15767`) - Bug in ``pd.read_csv()`` for the Python engine in which unhelpful error messages were being raised when parsing errors occurred (:issue:`15910`) - Bug in ``pd.read_csv()`` in which the ``skipfooter`` parameter was not being properly validated (:issue:`15925`) +- Bug in ``pd.to_csv()`` in which there was numeric overflow when a timestamp index was being written (:issue:`15982`) - Bug in ``pd.tools.hashing.hash_pandas_object()`` in which hashing of categoricals depended on the ordering of categories, instead of just their values. (:issue:`15143`) - Bug in ``.to_json()`` where ``lines=True`` and contents (keys or values) contain escaped characters (:issue:`15096`) - Bug in ``.to_json()`` causing single byte ascii characters to be expanded to four byte unicode (:issue:`15344`) diff --git a/pandas/formats/format.py b/pandas/formats/format.py index 2665f5aea145d..66a81aadc4213 100644 --- a/pandas/formats/format.py +++ b/pandas/formats/format.py @@ -1564,10 +1564,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', self.chunksize = int(chunksize) self.data_index = obj.index - if isinstance(obj.index, PeriodIndex): - self.data_index = obj.index.to_timestamp() - - if (isinstance(self.data_index, DatetimeIndex) and + if (isinstance(self.data_index, (DatetimeIndex, PeriodIndex)) and date_format is not None): self.data_index = Index([x.strftime(date_format) if notnull(x) else '' for x in self.data_index]) diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py index bf7975bcdb964..ab5c01388e652 100644 --- a/pandas/indexes/base.py +++ b/pandas/indexes/base.py @@ -1820,7 +1820,26 @@ def _format_with_header(self, header, na_rep='NaN', **kwargs): return header + result def to_native_types(self, slicer=None, **kwargs): - """ slice and dice then format """ + """ + Format specified values of `self` and return them. + + Parameters + ---------- + slicer : int, array-like + An indexer into `self` that specifies which values + are used in the formatting process. + kwargs : dict + Options for specifying how the values should be formatted. + These options include the following: + + 1) na_rep : str + The value that serves as a placeholder for NULL values + 2) quoting : bool or None + Whether or not there are quoted values in `self` + 3) date_format : str + The format used to represent date-like values + """ + values = self if slicer is not None: values = values[slicer] diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 927b9f6a48718..2d2dfa9a3d849 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -1143,3 +1143,31 @@ def test_to_csv_quoting(self): df = df.set_index(['a', 'b']) expected = '"a","b","c"\n"1","3","5"\n"2","4","6"\n' self.assertEqual(df.to_csv(quoting=csv.QUOTE_ALL), expected) + + def test_period_index_date_overflow(self): + # see gh-15982 + + dates = ["1990-01-01", "2000-01-01", "3005-01-01"] + index = pd.PeriodIndex(dates, freq="D") + + df = pd.DataFrame([4, 5, 6], index=index) + result = df.to_csv() + + expected = ',0\n1990-01-01,4\n2000-01-01,5\n3005-01-01,6\n' + assert result == expected + + date_format = "%m-%d-%Y" + result = df.to_csv(date_format=date_format) + + expected = ',0\n01-01-1990,4\n01-01-2000,5\n01-01-3005,6\n' + assert result == expected + + # Overflow with pd.NaT + dates = ["1990-01-01", pd.NaT, "3005-01-01"] + index = pd.PeriodIndex(dates, freq="D") + + df = pd.DataFrame([4, 5, 6], index=index) + result = df.to_csv() + + expected = ',0\n1990-01-01,4\n,5\n3005-01-01,6\n' + assert result == expected diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py new file mode 100644 index 0000000000000..ea2731f66f0ef --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_formats.py @@ -0,0 +1,47 @@ +from pandas import DatetimeIndex + +import numpy as np + +import pandas.util.testing as tm +import pandas as pd + + +def test_to_native_types(): + index = DatetimeIndex(freq='1D', periods=3, start='2017-01-01') + + # First, with no arguments. + expected = np.array(['2017-01-01', '2017-01-02', + '2017-01-03'], dtype=object) + + result = index.to_native_types() + tm.assert_numpy_array_equal(result, expected) + + # No NaN values, so na_rep has no effect + result = index.to_native_types(na_rep='pandas') + tm.assert_numpy_array_equal(result, expected) + + # Make sure slicing works + expected = np.array(['2017-01-01', '2017-01-03'], dtype=object) + + result = index.to_native_types([0, 2]) + tm.assert_numpy_array_equal(result, expected) + + # Make sure date formatting works + expected = np.array(['01-2017-01', '01-2017-02', + '01-2017-03'], dtype=object) + + result = index.to_native_types(date_format='%m-%Y-%d') + tm.assert_numpy_array_equal(result, expected) + + # NULL object handling should work + index = DatetimeIndex(['2017-01-01', pd.NaT, '2017-01-03']) + expected = np.array(['2017-01-01', 'NaT', '2017-01-03'], dtype=object) + + result = index.to_native_types() + tm.assert_numpy_array_equal(result, expected) + + expected = np.array(['2017-01-01', 'pandas', + '2017-01-03'], dtype=object) + + result = index.to_native_types(na_rep='pandas') + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py new file mode 100644 index 0000000000000..533481ce051f7 --- /dev/null +++ b/pandas/tests/indexes/period/test_formats.py @@ -0,0 +1,48 @@ +from pandas import PeriodIndex + +import numpy as np + +import pandas.util.testing as tm +import pandas as pd + + +def test_to_native_types(): + index = PeriodIndex(['2017-01-01', '2017-01-02', + '2017-01-03'], freq='D') + + # First, with no arguments. + expected = np.array(['2017-01-01', '2017-01-02', + '2017-01-03'], dtype=' Date: Thu, 13 Apr 2017 18:47:39 -0400 Subject: [PATCH 18/56] DOC: Demo incorporating pytest into test classes (#15989) * MAINT: Change CI picture to reflect non-owner * DOC: Demo using pytest features in test classes --- doc/source/_static/ci.png | Bin 224055 -> 374599 bytes doc/source/contributing.rst | 33 ++++++++++++++++++++++----------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/doc/source/_static/ci.png b/doc/source/_static/ci.png index 82985ff8c204abcae6eead66d676cb880f92d3af..4570ed21555860ad5c8ab41f159e78e492334e82 100644 GIT binary patch literal 374599 zcmeFXV{m10^Da7*2`08RadvD?>||owwzK1jZSB~$ZS2^b*tRFS^S<|+O`@2clmoGoQNQnxm05i`wmJv=b+dAA_aR`RrLXCVJxZ3apz$IbFp z{cN>kkEN2Mi;L{5-87kY93xNA9()X=L?@4~;2lR%m6uI*bDK7^MeEj3dnJKnSRkNl z&)5zr=H!4~e3&M~eQ9OGcD~Ys>x~tNY_Q1HjdOfm`;N&4 z5%IaXrjdr_RxH-WY2tx;XVNuo!K&7)X7W)D5vJDf%JJ@?P2Q8P8maXJk(;2mzgoG& zIkoA6p2Is4^GKWD@djJH5Owt}-D2(_pLKuNv)?2h^&qa|;FDRJwzaCGva0IZV4&nc z+~nPdk`w#f-Izff*RuKx;XMlud$Ze8+vg5Pl@+W!%vr}fH|qZzv0?U-+Z6u|MSHRM z+yK_yOY?Jqj>D^Ln2EKVnK#F1Vn9w)z0k{33CR{*vINGRbHL?Mr?1=C=gY~L%MbRd zoQ3By)sl;2w)Lys5h#@S5TEKT-_gy_J9s@R?Mxox+fjYm&SW^f$$`HOu>}e=RB6aK zT^~^ck+{=M#LU*K23qTS4>K=z3=K3+x3v;;UgAf*?-Kq{sc2}{D{EUb;6u+x{ZJ$%2+b; z;L}l!NU_cQc0XLAz)Eo$TXdQ6q~{|_pD*X;MM@Mz-3#fAS!wJ0NX*~AVY8R?OCudp z@Bgz)4*_iF;8PBbpecsfm=T9Le>yrAWw>U)qb?qMXo9&9tK&VmTe*T)xQI2M+MH`y zDp{=RP?_c@4jxYLXKU~K8Ri<47(rYxYrl$dX6h_`sQ6I7y9=9aqF&x^Y`)4@kEk(- z048JJK%Iut1@AEm_<}G+4`~#^BrC4$K=pcN*>`!OtDaYK76sVIA4V)%ahN=6T`}6X zj;^(?Gp6~c_+r`dVAcqWVY((9b5rl>Vnq%|n>}}LD!36qeH4&|1j(3mfWox`7$deg zTkC>&@w2-0B`^shSr`=^%-!&X#dJHFkaw#e__X#|l~v^Fu6rBB6#;98I>uB-V@kI+ zIEUehhAuxms3?5AtSA?rV60TfCW8z5G#aNZp}q~E8y~sN>{Ph=7<)G@B`uJoD!ikG z&LZ2qbnojNaG}~)(?G%8T1=w@>t@q~kgHD!FPyQ=&pYVVh|ihiS(Q@iR&rdwCkqiJ zK$3Z4Q)LoPaWyW%-p0iSL_q}qeYEaK`BahSAZG31DR$>QIA)@%VC25`8@qt zVoalu#Sq@x{)bQi$fx=oAhX@nRVo&UW$Ns+}_!?oG5{gA<5u%3TChEp6|^o<&> zNI}}?uFE<-ruyVKUDy;gatGu;t}m8 zn%uCw?`cuy>pHQpO7I8lj$^7zr&#P;qMtD|J;5l#VSd*CO6-+CL_{2EHWQb?-t`M= zR^i0LT7FyIuspxUPKvqVZ3&`9vK+t+jPMGdP51}0d<-U*v+d$%E8g*60RhLVh$xpi z(^>9()?oD;-E*zTSod+8<}BjJ$hv2@GKgfk(W8IIPkMM%qMoT<@@<+Oxj_jKDo&W| z=+Jm7CmxDtMXw=_6;HLOJ?ElP7bDoWcfOoPXxhzr-v%ww=eTCLC+hB6(?m*1V2gHr ziz#;1qra?e(>vPhp!m~#^AjS^y-$wE+|8|b`!wC!KI`Q4+mgDKw*d(B__TANCPXB} zV!;2MyZyJ=VQt--$wtMV)i{U6*6B?|5HBL=9!7^iFR_DS?CDa`I;jj7cgbk*coP0^ z7xQw4{D``}bH#~skCD;X%3JCz9h)Cv#{+5$vE7|B6?yfgLYt~_iJekC+ecHwGBIRw zegv)~ZyqO78&SzbInA^;yw`63?BYpuuG{9cZx%(-7yoD-rdgpCgI`VVQN#Uy^)BgE(M8h8H<1ohq zvVcK@U46O#OCZtN_;ijq+0;`_YiGK#{bqi%SLkBMPzbI6vXT%gQnO?T(^ zw$oJp%*Z?C1M+A3g;(tJOBQA9G9ql}hXpqZTJ&_=^INJ;mX70}zp#PYpYWeMa^X34 zZ5jeeukr_R@+6lte8BOo&kdrIJGjwdsz!I>o1E+gV5jYNC;Wc_+|5yZ?=SBwKypES z9NjtVX|{9gQ^=lJv=p~FAM@%;`Ham=(*41&Rbp+5ym~0H1ri)NE}mOlY}~3UENiP( z&ja@<__8Q(X3A4S)?>>~&4p|AU+a2L8mX$&yYl%d74LJ*J1+eXP2#11KkjN$w-}tb ziI6^aJQ0cA5=K84IILUgO3J&F-362V;XtwWTztG{IBPa`=e;}&lkQ?%w}%gi$>t_P zIj!!3EiJ?2mVpZnZ9NC?{$4#Aa>DYVkes^E6rp)JyJ{q_*)mAwHQ#Dlr;S+lZgq?^ z)#|h<(lg-TqwU?fsDBhm9!*I2FvQA9jm!Qy)5c*Z)}Nlu$Gsaj_B3D4w6HFGl@an>9omHr+8DFZ_XuX}(aCLNDW-UhKD+CpHn*5_QcrSee*A$zc|DUC)& zE+w3BP?(kvY{^#Vw3S)wYo_B}?qy3+9ngx;yA9)w)(ip|mMf$L8fqSk#iWUuXbLF_ z`hQK2Hq}lVLyECAh*K^ED59V-Lds(^1 z^tNadRynD}#}~mW;-G>7nt3CtF=oS%1-~^x2|l|1w*u?EryI-J2~;GO2`CiYis?NS zd2=%^=o^i-gfL*&T7+-EzD3(;QncYP6YI}j=r}IrII~NoS)f;H=FIl)p0e~Y+sX{| z-O+h(4YMG!6>a)#0i@DV#9*b;@1f!STzsx&S;FehOpmhTxm zGIL^2+=K&GG0Mrr-|^d1k>Fiu+Qap4>vUn(*qGRZ)yNeF>9DSbSLuGx@uKy}HdXDd zxoVllmX{e?>+6>UkWNeXhmj*lVVbmukHL?y&LGqhu*nY4peg(VMT*B*DQ8Ar3^94e z*5goOV%c%z0LRRPV(!)D5@yk3-bx zNYo@-a?bUvl~KHu(!sqw8uIVU1d&{I(t#eh2W5k0gM|Bf=U2$Z36@ z;ZG^s4UM@UZ#U;KDFK=ZwHoAw!H<`CJmYo7*LKrF50@xVVt5HdsE%4FCLG1KdmdOZ z#=go`ohrX0qzYiWDt;k7RbR72HmyAYQuAJpo;cpKX$8ghPl7%cp}?l)6osYluT3T=MDo8^C|Df*a zgL>!3ZCr~x{(WYSgZ{W@mQxmONQ8Tuy1CkduYcA{1oRJRDjO`VHBu-qmIu8=VH%c?)wN*Mp`{rr_XeqFbC&FV#(8Ms zV~V9Q$^*Z8&O~XEgjv8|#xFU!j!bv(y%}>NB;1BjgLk=gi-rBx&f^l?)AIPjFG@h>F+3z?n&K@OswI)S6I?kah1}j1+QJ9SZo=>s#_aF}}j92L*+TnxqSO=U_7m8XGx;d{ELsyf_Py4*98 z%O%#nc7PN`mvsn4b?oF9?@5+E!6jTCA09aH%TaWJ}beb!Tq)z5DRa#U4{Uia#i zm+J=uA;Zn+icyT92L`T=IwVz{_qBdt zvOoXr8C-dBS+DK9a_jfH+9P;?s_33GI;0&_gPx73US?}|vtJabQ=c)SofS+^mq zD|A^ARv$LMtoH1?T!dh=-4tCNc8PH;jM*e6r!*@5*^VEAov|`(^I-jr;p%Dyc0#3P zaC1t8|Y#Lv(JK`|8g zobb-5f9>~9Y`@(%&15G~nB`&-t;c$a4BQwWE&sW2N%6+zWLcf0sY`GCrF>+z9(Qy< z;cTD=_;QBb$&Fv@&TOdHYND3>cZtkSpzc}mvcCRQv1;|$0>?v=U%+VdJ@Y6h>+ZAr zJK)k+W^WdzlCE0g=WA5tIRA7zfYr`$iRfm#AnGuSqhbvyCA|)m?fiDG?)|qV$B`+L zYZ@*IVzNp}cVVT{Cu~dSfXdQesb^zeUC{*fWIy2M#rZL(duMeMN}}s`MKCGnn1x-| zgM@(TlX2+Cy(K5_7zH|^7?$k!_E()NzJmt&>6e2u=Dfw>O0|_u8V!5KrC!ZL#4Bu+#2v~dm?Cmc8OV(~v zjQUTYBa|~FwLPTC3yC)_=b1HpP_@Jc^iO1kSxio`~CK4XvDXsgnbqx}J zMk_^5yIwAs>~<7CM&D=D@6#UK$%ThPBUUa0r<(7|NxAMTd1Bb-_VM5HlWJ>us!a;z zx!=9Wm#14j9|C$lUr87T5IGuLFFn>4YywsJ$cc5CxL+d%YLK3Xj=(yyMs~tb2Z-xS zgqOTAVdb__(#iEjiuG^^EcFh92A=>Jzt3O%gtksbHqKYsQnx?M=DH3~YUEl^8F8@j zrY>Vw=$4*yf2Z8Q9sQ-v{-_`SeD}z?At#k*bxKZ5WIC3?eUh7fFr`THV=Oci5VW?v zOH`rfd~DP~m@nM-kDy^2Lt@tkznic5e>VT4Qzq{c|I z=xNSkpRY^cn(AXl1C)IGw$CYgK8AiFi8I)CO$PqKGF>*>V11S)N%>bJZ?=PGGyDwv z1onF}H_Yz}0_Mb%mpW)XNItl($~7W`$U)3q=Pd1bEbwC%7qUzI&!A8NkBP2DSvNdP zwXu4?<($HiA6W?@tRaR|t~Pt^M6|R-zLsq{$^W5P2YZ4b^>)2vzT$dd)qy5)-HIXO z!l_m=@oEUekJ5Co2jRPv8v1^kqQYu#ExFOujh<5vIQ^!y>!MF+F&1T7< z$BDv*RFuolRlu{L4B6Y|)Jmc?wW^j{rJI@Kw&XHqLBxrI=$DD7v0ZJ4C!D9R6aQu>Y#E?{Va;)Vep+3|+?6KLt$ zrd_(lstrtc05LXWHmO#keG`uuTl%#-6AvLZ!I)^f^rSJ$n4)+IfM!yHDZnE>QCpR* zM7&EcU$p^%iz>cu&J)GeF)LZ8u~n|%22q^qQ!VecuCpd{2%DOU}IWf4Y$xH9K5EOcV&@Gjsknq#d^j#o2>Y~<`wnEaM^Z9D+3Yy-p9 zY}CoL3*$(b@0>faV9Y_Ru&OVGlLiy23@RcShQ2G1#gnS-$-eZtUvKzK^hK?!o;HY}JR8>0377_ehQe~F8wl57xved^NBEjUeg@lqigo6Q4G zRAFR2a1J1P7KW$bm$20*8mt4xQ!^I{^)A#LGceuW1GG3uxqK&AH=Q>-xlFHIro!No zU)LH{f<$DhkwLorM>8>&)mlLzqjE!LV*1T>^R5}BlVM=vLvScvLQq)*jvNVVdSeTL z)+3ghW%9)$oa*x60)opV_))v^e0}dDRyb2J^lzs`3-LTWdOkUc9DMy-PEf`RL_PP; zaa0TAqsRL$NFDq&RS33g)5Lu(m`jlUaDPw4ZWDB#1~|Btpu0u;`3Nd?AM0B~*XulX zm8=iUcGsyXbXoUpJ1=k{cg3InvnvLCi>J zr^07E9*(H%UY820xe9fG9z{$%^U>?(*+2OBz1Jq8)G=vBG2Flk-N5a56*V8|+BwW2 zmaoyK-QP^W)*;l?)jzet9+C(u;q{CI*# z&dAn;*JldX7^qrR+AdhI(`slXYZ(V_OhVIkON~N`EYD1!wfBsks`VC75rz0nxd4X@ z&}Oqf)$rw-I>&l^?DsWgI=_Qt**t?o3M_3nNS4iX9%91}RZm@Wl&rt8XXH-9M8OzO zRb#I|Ii25x*~)Y(Th4x|PK>MNc@ zgjh3JarIf-c+A64QfiyFYJa_Q4Q~WdOf^m zSERerYlAwAK$WFYG%-Bn5r(bn!$@c`kgy%s86C}-D%1B2f5YQ(bNdT-KI77Vv_v1Mv_s-)8`G<1^ZMRJCKm2rf6LC= zH>;GuWx6R)bXPoB)5US4`_1d8d7cm+qa-$LgV-ROOqg*roeCrrosok#p-7SFPJjao z1;}(lIZ~247eU5%n!B7JnJTCNU%aG->%0S@3F6m$imf&wamie+7GU<8UfL}tB}NKM znI|xFmMxHUXa=6e6a^b8Sv>90^IJ)nLIblS3QLR=mQ?pK^{XRO@7%T#NUCAvJ+_!NXu3Y0^Y#gWHG zpd^WW-tehln{kn83EHwyV$fc=fVeyB5^n%mESLgM6%cfXX~fjW)!N#%`>Qz0)My)N zI83MC4%=>X{0^r92~e7BrW{$UI8cy|auEAtUt|!*hblO!RmT>5A|rGS4p}8Bbz)iC z2M8h{#&z_{DuGXxS!X!avDy{N<*Jq*(CkM~RYXeE$??$W`J?QKvvG2a23)_w;R#Jw zR!oUo_7!EQ_9Npu8za=L=9tVXMJ4W$^{>xcr^kH3+6NUPC#ChgjS3u`tmBw%WAL6l)Uf5gLkN1n{k^C!*tT$ z(V9Bd(!;fFm8xrIES5CvUl+U)m4ihSQ8)IGx-z)YtE9B_jvm#*vnJ8Hlk;|e-#j2L zQ>7vZN2WE&Rq5_cxrG-KqUsYcw_Q5pdQHH=FDnA&dMBS9)@2Cc0R90!khlb*p< z?Xs)vGfe@c>FCpcC!m*gHYQHwjdtB_t!1~s^j;{z7zaeJVBc*SgKWU{9@bB9{pd>%rLP{QlLZlnZ{xLT_2sjMd_*7QNy_uNt!^ zlj+I@OY6xzGE1@IaJ^6ouJ>-ZiqB2OzOQH5?3dl^9p<#g~BmM zbJk7=iJ=v%P7?L1zJLqE>uSkP+|B{&5e!u(GATm|cbQopE+q%64m-sHy|A|8r$`RD zyd;(S1Y@@IocxQ`F={ZBFKWR7vtvq}1!Y;2^y<@UsY&AoXqNZT3=LI`TguRYy`|Jf zVE>YyaErY*il87Y%$(0tqJ~@G%86g@7#cTtZbk-qzroOQ*iS<8o9v06)PlO%Je|dMAQ5lI*8G(Mt744Q(SMCxE ziJ78YHm3;@Tb=Q{{b5kzZzd!um(YccUYqru5@JiPA{O6uD#C1M58i{5j36c{fEOSR zt?1v_0=)vQ_^P4qM2X&#d=F~JKn_Dp)KXHbJy5Y{plgGjzqSH&N$|7PU?uI=N}g=N z+o?jTBgMgkW80<&{Gec*J|*QE{X=!oF{@skwEbkp!=v}XV2UY3vwS2HVxY`$vBk2= z_U`xt?jR%-{>o@<>*e6zcs3BQ*RU)jVV=o=7U6=seJq!DdZ>%nVoOxHsu2xg>o98I z7dzhuj(-pPQyyZfeywY~sz)6u1vZyySMThucE+u#ZRiM4cVwd5YbobW@!~G?9&=gQ zxkwFlX*XtB7Hq@vsHT779)n{W=+kYJHND{YnZZLb-8)juB#`Ugy&d^2BY0^a9lYzd zT43}0qnd|`Ak!IGdUro^w6kZM4TP3;_s4a?(Q^a7l>c(asij_EF#*uIln6g8eN*FT z@u*cDD5u-ggDPeFfy{8s==A6bFN0PbPud6N9y~mZtCJ@8s#lAG6#vv#hHqhk;);sv`E{Ur#8V5#j3N7HHQmH$4`Nt z{~8j8WN9ei=NJ5}>8Ppy*|Fb+ZTq@* zy3a9ZXThjExYNfP?T}}6PnCCC0^b#+PvGpvy?xAMKwxI_n?>3}F7WU#)=%WidSf6? zdx-7v=Y)xdoT4b?JAYRJs~tl-0=Bc6;?(^CzhlDhKa{j}2&)Z2F6)M94Bs1qG92X6 z4}(1X6*q>hFC3mOv52{`LX7jz#$kJgwv#SjC_=H(W7@yM#u@!SF@AAfvb%CP>z=fo z?BzhH+-ge)uBM<>9_Xr2C(MS?^^kVAgf~0Kig)x!aO|5g?JQnnW}$~-o4eO7FO^jl zwT}MIc*iDqKV3d2ggQqgC`Wc{eZP}m;i$F<0lP4U@bY;NOR-37{4v^Dc0kNh8xm}x z!IAq0zOPLPy5CKDu^1ewaM)W~S+}^^R?Dz5MO@Z1we<;JtdEQg&r`ptXlcER57*Z{ z(EFsVuF=1K8Apvw%sh90XV`~VS*JKAu6;VN_I)^0j@iY7_shuIpwIT689u#8%=R4; zx=Jc&QQpj*QR4`I$kaN>Y;_+zTR!ssaGDV7meSiYISN#D<7A7l>p%V>8jTg_r=8(S z7Z_#2W6=7U5Q)vm)t%{-oC3JuH!?QQfVqtOvm4RU74|O4-100No=Wp0tN)FDV2U=o zoA>ZVQ+m25*4aEU_EKgA5dFLx$zU&E)5^n3c7M5x!$EA@rlLzcuWN4VH5Tj3A;fA# zsW?_?j@o>w-2LzyjGXcIW(T#b-^a+Vsa-Muh>+2e6LX5R*P9YVpjE0o-@-vCN1rTZHlEb#yqugWugOR57pAf5^3Du5&tCfxbsCTvY9Oi zP=MId$SWX&164&u@1WizVc*Ats4MsWR@f}l@>&S-b^qw}o!YEWe|uuAZk<>LEfC)U zTl#x??9ucIoErRk7bmqJk1!9Nvy$6Ix=xI=p^MG-cqmPg32X(ML5_}QtoS@h>DEch zS9O{-wR-HOHMWWyRt-xwGmXT`<&7_O^Cdb0rl0Ea+?n!R(N_D1SlUmQH}g4HwX@aR z#@niEt=8I&+WfLl(*P+3$s;mKnCaH%TX?<82cT!NV*4jah5KcLcG@f*^nGm2ikI)G z!`80nCMCSK4F{sj@&Fq~jP&SS6qEGGZWLQYIGLCqL{oF@Rq>#dG{zu#xYW)9(>hIz z;a={0%u%kpzd9BwQnlok_&r=O9BYIy%)oG2jpCMXZE_5cp08 z&cp5o;-m|v-jwywI^VMyFicl6F(tZ-|^)T@$;eQ{9i!CZGGUo z5q*YzZfF&q2=bgTbpglEj_iRShf#Niy{>VI7+A;r(b~lIzE`lZL>mW3G5$wq7DHjH z+i{$q*8qLKGY1=9qq-Z`kGm}!CEHaEamnuEIgeLN`OmYG&P%V%7lEV8#Ev)nPe;TG z9+TViD=y#106(0jR>@xrXEW8}Q4-EP%gX)hVr{Z`9r7!GXQzQcd9X7AuY|~agz4GC zC6T&)_x8NGdM7U)tDFp4MNrRNaa~gY-u-CJRQT8v!0nSPmIyz|J5ZXPBI7BzN{FH!jogcARGyLW?~ZeqUAB+X#GI-eB) zPSxn^-`yMZP7254FchUrSTB+tBeub!8SS05g&r7X@n$FJT{%17r!(pi-o<+9iYp4m z3HwLer2wh~avhJ1iF|6uKTow*QbH7E!iJAB1wPrmruD9GJ^NOV(Eyk{&1xPKLu_TDEx=+HAMr4U(pxAsbIwg85cMFzAXJw4qQ;zsw_*2whUh- z7n&4@eE-_eYnt7z5fW3)(&!?cEpz+6h6@jF0j?XNLM|>p8qg zO+}4>03I}1buE@OFWd|0i=K4s5>|_~(1s5%=+9j%`uSafmfb;@%^0_^O*|lN5)|_q zR6rEyD4L4^_Y2(A=njAE9APnbUpyY2H%n7v%teVQGVor4NvN-WP3~+~@HOy`w%U$z zT@(tQ4nZ3%m>2PV7j|#(j5gDi;B>Wl-8enzA7r5?SnH(|Ua%Rq;o7bz zW`_OILQjA^q6|k*raz$(B{31m(tvUQnbXDjT176^@aAi0*8Td>p?`_MvJ^E@o({c$ z$M-$p!;TN(hXthJ56j}kG}X4D{;bZ<*WFh$&(?{t%JPZx>dWEWnJ{!@a+KVTw2Th= zt(R`+L1pMS#-TgcZ?a;#>Pczh8qhI~wW19bHY6mvlOtB43iq(K$SviiXJKqppotQb z6`L5_IWq|^g`>f7?(v(T7qvPRpO9#wz?%lhFgvF)n*qrupl78jckR9rQUYXs?Kz+Zj?{K)17HiRyL6;Nm>C9EzgF|iW)D%LYzlg zhKEpPg(ME8G=g+9j=`*QoFh_BXid6o1*HZ%>I$8sEgugE=^tE+(Od_go2=RANtDnb z@@Nwn*jqHpiBSQAl?Yg#53*EI{*FG6iqkL1!F^&i+PoAM&F{v%|5kcvPTv-#yVODg zRSaO34UdB}1Zfoaa|o$vms<>Y@)+LMn$!a%o5BqNHg#U1=~lAc9JpJrIFjp$DDy70 zLv{lDehmy` z=07>$i*E`4xS!(5YRaqUAP}x`glF3ZlE1KU30AZXS9iX&=cUs{*B}wr)acdMa=p?F57S2u3ODU_gT|aZ|Xbz>^XY$jM)kYt|=|}``79OZ3s(waXt-Em@4l!3HO`V zaC`WU*k>ajLlMfL&e(+NJyU>!XTBTR|LhF3eqBikJX(d6CW}NIdJH)@4~ca*u3kz20(<*L@#XY#Zr9KQhtnJ- z25p)Sd_5> z0-O6&el+&R5rvQ)U%Y7O#m0p_{zWc~1Z*z+_@r7ag8buPp+YPm!_BRORs&b*&fQ+k z^ph_@G3@1r&#KKd`=Bxvc9w?n$u5m|^Loc_%7^0zA(8X*IXmNKrn8M)U)HrjiZ@W_ z*1@lDW6%C%nw6*6zyh~bh0Rqe#cVJGRB7X8-K*RYWN8RP5@Wqb<4=YcMOiFYB0~h$ z(UEV{lw5k`hbc(yd^@Kv`3UhPZR2pNu3^xiBLZ#lI?Vqxa^%?7Y?0`|cB;3(y&>5h zcucXqkJ3zN7;)n5qS&;q<$;O%Lj)ri%$`%**0a5H8J}S($BO_v^>1BT zHnhbaGCAWmZd0-3*XF<0r^n8@g=?4TV!mFMN7x_@Z5b6*{zW%_#~1|D2y@i=e9kH- zK?37XdhX^JV%tAMqClpzbua>pYH>RrTWQ_~|I zq;Hj8Y(2v*R&V6%1%5vOy-H11o-z|;-tOK_d(6k+sKiE%M?G(_6LZAs=;$)>TY$`9 zffT;3q4qJj7q9z{qa2phR^bR*EB>*@X&St$DW9g_ z+`WQ}^XVu&=6I`oAe1>Rq+PnrUY>sg5dKVkmk^2rCyfRi(CzH-OK2HUiCOlPZ>{YOu=c6tg?2|RXT)qRjbW*AFWmo3K5fETo_FU@|e|?wYpL?9n7i1 zftRQ?l~dT)7Bf{TfCP}wU05Nr2H{Z_?B>2c;0tH=M4AE-P=Sf@ifv66Y~cXRU_s z-6r0FqjZafA<;}w48w9$)qiT!VrI3_OZjvxXRFaVjfULXc6Jyl?juye7__yKUCD zn&X4gYJ;(PD*x2SwiC^ zXKDHstn_?3v(bo}ahpd-wE55o3U!+&sPD&E~XKa_dALVGtRx{(tS2Fg-V8JqjXNUDB4 z;rRC$;f}{wvI;YF*|d%Bqn++upY=dBNj*J*;bP}r zONC~z)T(h^iqO6oUM(X)5>c^u-08Z~lNDF#1KteaAeX}!@q5PWe!fvt|tK&(t-`6DTJ)TrD=9AjAq9o|qnJEzHl zThLR)8q4yxDu}v$4h<Wsbq;f?Ub6bZRu<}^u?me8Lpe+2eb>5Ka1UI8J3Z(-Fy2KGrHnKORS>k zNV8)!<%=hs?WjdRKQQ(P&l`k(_Fsi;@ zaC#AVVSSS@;DBvkVIiu)mA?$PL#u}2PrhMs{&rK+%?WGDZ_bl;eYugOxu< zh0`zok&Ma{W@kN|A*X4e{_&rKN_X zm&!s|f&jzN+_N3Y8`{X}#F|95GOf^7i6h>#Kb@BVm%1O3YU`Wes3Y}CmB~rpoUxGi zo_4OnRVaxT>-sa8l!|mNUMjB3+fIAOV|AR>1ahf1J;4;wBq>2HZ7#gVrDLu6BxpPP z!A*0R^9Mp<&DlyUx-CBg#cB=uCGXDYRcE?KuO43c<6?1aiaEHOSp8aeE`xnM;6u96 zNA{xCBp5h836@CFO*A1_tN@HILd7D(E{hRw*gE?D$s?vAAS26&^#Bqxr`g1-1s|r4 zczi>EH*B_ahwcP23(}6>&Pg!sFOH-0du+ z59bed{mpe|DR_=V(91UB16f9!N6_U*$j;5LZm!kIDmg1fTdV9A;*ld< zcTQ)s3#bf{)ZJcDY50N0c%=`;;u$x2F8+j+_NgAoajdgnu+B~x7_VDSCL`UxFB7Xv} z+tYLtO^4+tz4#0c(yT7ICsiWiTa}r@Ld~&({B2)=R0fIOb(DE1 z)!XADE@LN$l@Uvimb0JRB66WHVc^O^axD&K7e23ZIprb`Z9TZ{YA;FQ}za*{Z1xJZAwiu*C(iXg{EW>+JO zTqn)$dPTD9Z;>4Ix{U5$jxsY{JbX0@s+KkOb!0akS+~&a)x5pXZzf=YC$veX#;^Q# z+q*nJg+&*L#xA#8UhiRighgi1(AL-#`=8^7*~!0S&!a+y2)2l@O!u{56r%`alwX%9 zhx^z1sIB+t1FPLax-Gmcfp+? zAJ1uh=0EuLR~Z^^!4u1Dv!Ef4T}uBp?tocy1o^G&p;cI_s-Rpp=4QZjcV>NBu?&`S zJs9A)kVo(r_>Kj0M07zO{0g%)oA7EowyDO9;kKtet@~x%XsQw;_cxFQG$8Z0*LK;jPg$A=&xH zIMlz4cqo;|2n_{z3m$F-oE}%XKkv2(9sb;1%laQ^NP@xjkkt+fzf16VHaPOGwx#{c zWP&bsAZ>Xzhyo2$IiOP%zANXEzl}dLAhriklE{-0mB2nJuj~`bd~$;&%-;?lSf{io zE-FQsa+J>Bv-8)TTDnMLAyQsQ>K(6>3M=4t^T{rO^5yd1|HhJ2;9tZ zfpwC}XR#&e~ z5Wj##JN*O>N3LoSZA}U+@nknv_9uVm^6kD@MiwBI(ok;Mv_22eM;`k>cn%(T{>DJe zYck>S-Omt+(p=-?0!|s0OiKNL4J*xOj&z+muN?3 z-XD|DuUG~*tU>B^xmtdc&xcQMJ^Fgr2mdezpC~~L@Wp7vSKYwwYLp6la55z<1n-;j z>dFAeg-BrT)(&UnXGFZ+GY4Fr`&dPf*LdXgqemTfK?3Z9u9P<)Rkm&Gt7pHFMf_!p zLI(R;hL_qCJV#o%g)0}=iKCm%dbbN^XaKm|5wUz*mdo|T>(ms1}XM3 zuMOq;rnq;81V*vE3ZZd!D%anwe)FaPGZAA@;U@0T4!+(!!qi*TT21%iZUy}CxHi^S z$hu|#c-jkL?Z>S<-DS`I(^mJMfFJi!HId@~1J*z(zYK?*pY=HzIe6uaYI@1XddXOq zq=54i>ESxhiKlDgSw?9bVuXiyZb(-`VSu24CPh~pDmfqy-*U3&^LO0}vAO5=58v=G zy=MN6!TW7WzwO|(4$=0EPJF?9Hs-^$B=(MIA0E>UEuQ%q)qKpwKfYs;&e&dDa_P4j zA1jVM$Tetoi8Bo*xiezZ|ALo8#fw(P^8I@*zxv4C{E_AN-*W4?j89#T_B3XFL;d&_ zUw`|K|NIYQ9`t~;eazlrj~Aytt!9CkU2rpBbK6uD?9Qq?@^?!X&$%DZxOhmIRRhG{ zXFNalI2lRw^oq^Jd+t|{T;Kkl$I#?%eoAOI7>jR-KK=vmuf}}6RJgBy!`aC*PQ!)+ z8L@s`aCI?3GzsZ^#cI(dJ+P>-{wR6>37_x@pYREv@b3+u2(M508^mtguJtavFbf4qhqxgbv*Whkgne&k5k-wL$Xz@x6hcz6>Uqjp`OT1?nWB;@xAm54e9tyF zeuY5z`L1a#sT&)oHQ?avoEJwfAsdogljPkc@jPduZ8GZ+Z@KN0KgNu+YgY3)H}eTS-(#I)6 z8aF&n*Ie9XIL|{y$FDd!X!E9H&}x=6vB%}RD`xL+n5G%y`3X(iqm8EjL`)cl#16Z3 zPR=Mrf!38&{C9kph2dqX|lut+9kNz5#1=qddp*`dMm zTZD(t*gxv>+^caWCXe$En5&$-Ob{0>oc007FZ%S|6rUyO{g&0cJBql+Dk*3M4F^9Z zYn}p*mg^G?L;R-A$BoB?0i)57=g<0d^9|x=O`X_ms~*bg(;oCWK6bH57#PYyg(LjYIj1i}UiHB#CnQrzG5g5PmHPx%j4C3Q>w@_T=sC29EyA{gEsbAZe6$9wF_eW0hxq$v92|IjerQ5I zAzIv1EkCf1CZwjt{kFi=n*Lu+cwVf)2U`d{wR_8Wa&Hp?+*XLzL)z1f zmKD(iZ4OTlIXWFNGE1Ct&Q`Gd8|hY%S<=qz3_ZX#z1T}a9>|dGJ4W0-ue}Ei5 zr?-E`*6U!6A~)B}wiypGtZf(VhDd4RmMJs_1s;vz z;Yb6s$ce`*R=pK*)F;Qpk_tmwNTE=MiBKIZ|CrI?OMd&iFZklP%c%w4&qgi`kOCrhOK6}RKtG7ClMIwL%JT617Gb-EBgG%drl!a!vxsW*NF2>r7CBob=(b$Uz(VVa zIx9fg;Q2@kq>fo{BjSgI<(@;eZz4Pk({->N3rh)1DG}I6)5G;!wAyXjK){~`xs08U zCV(XcN*UCFhv(Qhrb7O6tUe5ZFdfXGhto0%S}sj&p_IVHrR{t4TP}vDh!luMQ)f9v zv?RCh*d{9$O-<^Uc%g&om^4LBEo-oS@B);oG3tora*KRSSnau#HE5C0p>fQdgk;m93Q_&q#J(X%w7h^Uh(t0ZQc8EnlCvj2?E-ZP%<4LRxLSdAjS zx#PfhxM#z1yP{YwNYk21xrF^Tqh}r3w=?9`oG4oJut+)T!kIZ?yIu0IDVWwCb$1{C z;E*tI35=ZOdQJQ|C)uWyjg4nY!oa|k4S8B&XanV0ST2ZWL7q;TMlp|3P1Mypf>BAN zr4ULXEEl_fNc-jQIDY$#-@I;f+*XLc@Fm=hys}GzmWN|oSkgc$g`pfQ$HxytTJ1I= z{6v2EUje-%qKhJCzRH-cAu2;u=ZL{@kMoleN4|j<+gQT^N#Eplw#A%JDAGe#xg-z{ zfu+dJhPx~ynJ-ywu6SI3#fR?`{@ect^~nKxuS>Wp(6J`R#0h=e(8V&R4QdS|aVJLvGr z1*;m<#7kz4rWQVUA-#iRj!*l9G??j^7=>ME~T7&tA6)n+Mn`eB+Ms<&t$F*~S^$3PJ_?&lV7^ zHIxxmI%Bn5ay?E-+>rj>IR|HZyy_|r%$RE9GQGQIbN7L0Ry5rYoC*RT)JXmkUX z*CISPW&dQziiX~TznAKuQ91Y0z?#i0a=T)zUo0LWT9k;5YGN8r)X<>Fow4S%IoQCsnzhg7axXA(t4>^7FmM{MP zobzA_vkTV!J7(6D>+OcScXuq_`fM{vz1lLloiZOs#B~eR>a)M!;bbHUnlZ`vJ1*Y8 z=O4elW9pw{ojl|8?3gb;@6#{u5Dx|a`1K_Z;|Y&ZP3k>Ix&d$c4a1s$X5!_qeV+~J z97K%~^so;<^G}7E8qS2jZ(KlkFw5Zihwu8#ZZ;ySZXDe#gb-Eni*5=+i!( z?>Wck9f$0Hs$DZ`*4T#bol*No>8w_BTM zZja#Ll%t{G%v`fFCFL?AF%9avq%KREr-a>4_=HdRgirW{f3NsNczwd(Aa)^PT~}o5 zhxCX#GpizlNSZ$ zwjwDdRyV{Qb@2oHse)!_g6)L?y-|l&VjyP=nyjIyTnepdq>UvUyp~NjFtJU!lPb(N zRPl^BTeFBGn)@44O16Q`%zdixs76e?oo&eLhD^2) zcAG)VrRSNL(g4Fjd3&@*KCd+FCk6f13RSPrjUcrKwEU1!dncozBpAX$xLv$p!JcnW z=Oyd3WV0?Q+gd9O(ir9KV0*# zfMH-*HmcRdG9#?yhUj6Xc`U1PQKE#O{5eUhJjQPWmzaCf4=QM1jVw5 z&FqoM?KOAHl$)HLG^*)ivCqa$+3Qtg6*M#$RM1pZwWewW+Hw$EV?W8mfsjy5z z%Qmq2bE2(;Dk5Fp^YHB@_meI2++b5_EF+@MR$P5|!);WsLZkIATm2WGd zd+Iz#mlaxTs_mTUyZ^%F=92&OTa&Axf?~mHKIY@qlxbd4q&4O;r$~4G{gc4wud?j_ z_AmfLfG{OO{!kINBe@Mjpp0EL$PXFi22TkjRYj8*yMC7i#pIgJ*Z&tE=HK(5zEgCq z6j3f%EoOW-Ub3krBB{tX1!Y=M*OEqNG{uT&J>mBDj*G_)YYq96%TQ1W^eQ3DG=~-F zrlF||>MGk+yQ9%kA}teTDx|TiR5!%a+OEKs4MLab>dAjen5b5lV01*QmE*Cbj53y! zn7N-3d-p`~jAd1kIySBSHp8PfL11I*9IBeCC~4}Ps@Sl+{+18_he_J9xwmUna!0Yc z;r4#PGD@kug1D&I<~2pt{DiSI023ixe9vRhcWH+f>fd@o_*Y?9%Hw@FG)$;N71Nv`22C<0R;Bp613aF6JXw!Ex!(%QhOHf$df;-aLES7g%}i%G(4Ytu9p zO;bM=o9)gG(zdWW0d9L&rDlp<)vlB#t`c~vVPk0OrlKxmYP}+_8)Dfev_txVL(fx~ za+jnd{XN>FfR|{J3igHuzQ{0ziBb-J;1TqE+JXJFe>K4lP_~OBSETx>5=`?8d)OcM z{6$%l$u@@9Wze?i`vON9AWei5V*4Eifg+7EmSst_t;mc9?Eu2UPy*X_X*r6(Hc$c# zLm*5WWtu2MBb$n*X{ehgwZWem<0>ym7i-d0L{@^~wdg)O;rPvSPELB9h6TbrrQ1C6 zZVs*c1t}mgkjlbzO)S^LwIq%bC~1M^;)X4PZWrH?s667R@3ZJ@(rC@y{E}$ZViGCL z)s}}z!FX$vyGI0_BL;1szFEM0!e)BQCP|2tjeB&$=!?%ed%4exvw)uV5Cg>~Gnh=j zqh4iXvnkQI$2Jb|4Na;4wbQ%=JWSQ4-|h44pwHgGrRA6Of`I*&;KnXcbwSgVRJA70 zO48+qbh#x>OB&0ib9}_nXD>N9>vKBP_+dhhRyuZ(h`C;0sp~)!fnE57SmTH>7 z!#@adL+2;bqXKY|MvpKY^5Sg7`Lhv&K6q+Lan@(On(%#GvCvxN2wrRI|oE;4~9JL6;5`q?c zp~={5u=4_4m(+DlQBS^6OKk4ynfEfi$hKhB;CBGG-j;glJ<4UvIfCZ zvWaqW+ltdy8OrX_S;1pk;Z;jk4+-&j#^X9;kv9}|Lrp{0XmUe>8(_6UY%4_-5u3|z zxUM3azRTD*Aa6*W5&m$W_R)~TQ49ac{@`zEmH32D_=HdRgirXl#3#b*6aGe_(N#&d z*svY1h~kjk-9!03y0#*8eM~vR^*8jak1RJE=Cg<<$T$g2Dx)IXq(qNfs^%G@J;V#! zv>i!cN+c5D`E>Ss^h!zV!J^h0K}p@zL~+432Ho-q_WA_v08bcL!lp4pf-vM@-zO6A zaVzw+Y=v3gyCqaLl+j?`0zz%X58r;Vz6v>!K^(Uc|@q?Ji&NVIftOqYJgrPp?F zY;z|elCG(?k8INw(ERHZO6nn#f}G)63el%0|$#Xb`aoLCblq; z2!tU}VwY)Mps5<34nksg2)87*<niUWwy;|772&m z1d&adW`c?C@Y?Y?2rYW1)rVXrpS@4siB+pO+8+zbco?d>xh4%iLI#E_+g)sqv)uAds!M?)zoE4kyq3; z2t!~hg`-SNDKQKU!UEwVgpEQV>V_(>C^CbhD4?oo8e=yhq~(CC`N=xI%e1E4kj$>Q`Rc#$`){uKewJ{xE_R6uO+!^xRNAHX z8bbYJnCvf}AwLpc{}kz^tC}V+Xo>>eG!&~x@~{4PwqGkQ1t?>8wWhAA%9>jGsHDW1 zWfXBqT{l18%{{fFE081e#r(Xo@TtDQWW!u zPNI2U{qVh&)Okc%m6T|-Qka&FX{smkWoJ$#mB2C$EYTpV63U#qs?oy23I{j`=d{Ba zE&WKcNtirDJX$52_7&UBm|0m6dkzODJr17t30n%Kcm1yN9qCr(Iopq4vbz12?+gQF zXpARMsj99iDvjUI*=7~nv>~rF`cDWi3n5&Bmd|M9)9u=rruZ8PX8!OpRy4XID;v^U zP~w2;V=5Cz7${*pkr@hQn}I}IY+lBR-1vO;cSqA15uf_1#% zX1b(3j|h^Q#*tVZ8#lC&=071;Ygh>7<9Avd9fyp%F0KhmIy6QP&!4mJLsl1DWjXV0 zK`Lr66oyhL%OiAr2CgEs1yX{cLJTutC^m$xHPLp%Y_()DjoEHv;_ZfbIbpeunPe_a zlkdpXM$_o0dDC_91|eQIz_m?GG=^azq={>SZ#GDyrl>3GJfbe=3W)QAGFSq#)@p4V+m_l2^zK-3|sU%F19oyi z5MH`L=Mi~$T6;qKgHU?JQtZYyQ4Wm2sxwpGD)Qxcms+SX5%wjPe- z)A1CcBT(`wVa;|>mW3%m>XN2uC>yB76Y2G5sM3sNGACMWNJ}t+HiK8^oc;DK=O-S| zy##xZux@Sm>ive!?^nAze#1bRHs=4&-g`C4k!9DK-#N#%hlkG0NC}{{=x#Q7dDYmR zhk2R*JFoLP#%ps)BU_e2L8l5G;o(}(g9lKku0p9UHoM1i!O{$t8R70n`aXN@y|!&& zIUQWHB^i}65SEM6>(Lzz@iYg!NO^GBr96@>lMP%x#9#Kg-PzQufa{x-o2^0STreJ8 za5(62XhaDA2LJXMn=m2NTn_F(;QSx{o_8M{@ZphUq#Z(WMB*o`A6^qJBc9A|@UK1m z@C?^($p7p;UI85peaO-1fOqa4aeidevkJ0qhoG-`>Qop>wzokm(ljGpZ3&lK;<%tR zdJOJgaPQY2@!+h(TiuLaS(EevE~f#bPZq5ADjC!z)Dc-4GT)>uqK4+gp>saK>Dw5j zdoo4>E=nCT9GvjsyC=N=))7ZGxaN+|yGMk;sjJXX2;mHa5@R<9?gyC?) z^>oVZY|dh_U_PC4b2VieWNdPaysn@u+8_vC#)wE-0&vKvcffo154m^JW#HuKU60eA z#;j9g`8iFMQC1aMUJ&~M*G6=&DCytuq!x>z!Mv0#2ZW42CNCKY*IQ8hJ5 z1(`CCoi0YVk7=w?*__Rnzhiy%i0j^f?r_X#e9GbYg5${<_uo3=V$y3Rt^eFs`q0B4XuNq|GJs%a}i!Q?Be9RV4(|DVLj&RiyA2 z3xv~UcP!XB6;0L9)HRLRM{`Sw6aw)wyi-V|rdk;oC6QGFqLovtn})jPm0XOJZB(cb zvb~O!=t`oi=FQQfZ-9_(W{FU$JxB@hDhgU6P(q-jpjNFDwDlLYpK1R!t;7QjP1AgJ zC{}XsL~NsyUpZ=&M7?Z7UC}i4{@a2Eg_NqzPLTU&X=p#w)HK3IvrW7~pM#@5j-_Ku zkfM!fuZxPNtP!0d)_I?UzDC!O-y{GVpsw5TXj&;M_Joq!kBN}9x+ryn5cR%&HFeX* z>(@ddq(Z5-e+2Bua_>0Z7tW?`sOq+FKp9RSVusu2Az*cmPmau%&m0KpTbq4F2YUiJ*_x;n(S6#I0r1hg}2xVh;91e~w zq5y8U0o$@*<1T6HEvXnm{}8+5(K8f|e3P8e>;2O@yt{5<$V0Efql7##j9({*ZM>Vv0t%mjge+Q&x^Ws{7cpD zXJ4CxLbkPmbg|#VzPAF;clKe^?DtdzNUd$RXxA_RQtp4JP%j!L2GL_;GbN>^xxMAI}-?K`2NqAV-2qN1v0Tbrh=$g`57sHnb`l~Eua?A|eF zzxYQoV?;kJSeAS8NSUaEkLVm9^3jm$SR{LQmV3~svGLMqNxh9 zvLeiBGsCoYT${S22tsx*K4<#n=RCfga~VnU(E}#;hfJK34l&bb*UV=L8^sR{dieoR z3aRTzO+!h6;dar-=k)r0j$FmiR$qUMXg1pDLyY4tZy#B>ruyn{f5FVIuVep&-}1T> z-}-CO+AfBUR1&3Wn7uyc@R&ir!%0Ws=(4pHUVg*uqaD7*@T|+dv4NxONUewJ++*^V zPOqB|GsGV zYbo07WkWV-bpd5cRh1NVL)Cz`CpN31q$(TgMr#8&l#Xkgbh;*`10voMhXuDuMP0gR z9g`0yidf5tZ)entfII?gXyDj7t|igGuK?NJZkGFZr&6*dTleo%Db+snudRq#zg8Es zwFeYJ$=B=_X!3|UU9+06xt;IWhB;{?C~FCti|Gn9k)jzj;aX=I?AOOjn?V3lAoSLz z5PKWwWlLIn|D~*%U|*HcyPI%#TZu_yUt1LE~4hu4Ybmn7;NCqJMNqU{UQ(9kpu zDQlEeX#08e`U-kwYgKhS--JLY4W+gFnXc6sLZdSEeHOPsHc-<*ok13mCqAo4uqzxK z$E7=R=sG%lt+Hr)?hr~K4294&q+)+oY1g$B?L2OphPuosgBwz@W_o$U=MOJgggLvc zqO3F;cZAz77YVM?CwlKb_4_9rk3C!o#+!7l z?{J4Z+~E#)_#4I@;dO_yN1r zs*BaJIeY6qAN=xyv1ifUN13;Wijq1nX*3)&VPLL2blj>KxTQ zYBl+l#F9U?zxE5d*^{Qt_mNzG`t{ZkR5!0leo2Mq4zW#})6NOK!yW!EU>8QL{T=J= zlI=R-GBm*)a{lrAy!G}WqY^p|-@TS!ZZ9wA{rB@-uN+rzYQLmGQX^lrrOgn0b8gB1 zzaaL}|KbOFafR+8yXW*f=Y077fWLdUhX!cu$+)_vC|XCh;kr0I3&#ZW2TstkD9GcK zA}=WGimEQ!x+O(SQCCz_A`A^_83@Bf=q9>Un3|1d-a|Y5nESst;{W_F2Mlcuo!8ce zut0U`cpk@-E~H#MXNO;b_i1zA>5lod3pb<|Wb zRh>~ZHMNo`Q^&9*hG|f18bY<-&5M#W%_&Po)3lO@X_ArW1$F&RA_Sp>?9dtC=dJ%b zWb%vu!ar0E*(-;rL>L~L)uH1U^c}UAq&1MT%^o&Yi7I01G^8wYin4B9*5fY<2ru)%<%g=uq+y^m6Ki-*@5BGfJNHld?AVi_e|brx)clC(I)whuV%yIc$;o&gkmCGn|QXog4c z`~%+m-~R{ij9uRAs&CZeQ%)Cie1g|=ICKOe{#s8WL|e1X*~_kdr+n~V6fdnz9Vv7y zO=7DWt;%SMh@vb=t2UEfuPf@Zq{!NAR~ijn8n~K4*RJSzCaD8#*Mxq|O;XXAUB(mm z`}Z_HpJhC|os!Od@>DPo1_N8svD<*3pQT4$KQp}ik#cVjeEkk}zN6ee;^x_m-#m<2 zCOVyiG2Woh(6#CHa?GkGi)w<#iX>v;JbZ&D}_uk51cM?Y25v`W)e{d3q)Ejgl0o9^);2jMBh*)tq@$$I*j zPd^DMmOfEXAbKvnw@)~F>xh2W#unn|d4Iebo|`U)OxcJzhhVLB63#d^V9FC|m}%%h|xBXTR>AdX2QBq@Z~9fnfBiqth>Qb7f5P~n&&A=8fs|;A7s#^2&)(EB2|~RhfxlW4 zZ|;vD+EZUY>o-uRkZsuA-tfu)`#XO3`xiV7O%}$O)AIvPj}Pgon0`H{$ZD>GuPi=k zbkTYrF!T=jN8_Bcix)h9cE#n*jOl8_%nw*^HY|cCOs{5G{cF~{6m#TZ^gT2k^dIod zceukH?r?`Y{H@`R@Vdia9qOi{PJA+d&L+&+RyM+wOu8<**&dXo5N-67W#hRPNm8>7 zV??xM7fe{h1|y~6dKTKQq)B$!Za0@1(Z3d zACh%7^3uYT1)|Exvy>pr2_r#K))+s!4ii7fvV8ML1fZdmhHWd%s-lb%(p^O6TI8OA z5EY^-D3gT9k4W-{(r^*Fg)S8JtH%&uJF~Y@s#aSYMP-;$pqUPuH>5LkIk6Qw8FiTw zY(sWgPL`K#bfyyD?u-@!q0Cm!Q56_ikSX-SyZcv7M_1x+51#xb|Ej3^9oje=a5 zG}P#2K$XSpk`m9Vv9t!QDQK#gJWcS^hN#dGhQhVm$bOoa5#{1*?fkPL_HFvPulwyc zdUcR%d5(rE&nOe0EQeThkR5}8W#hOewk^?`l484J9jEwyKob-^@HDEf}++nkvh5IchA(4s%? z((x=TO`?BiM}2j$t;&Wl%J9RSfr8qUP-c`_NR$=$xgxi8TuZ~X6uPFhV^J40O+i`J zp*L*ldqk zr$g)#oIe?&ZyUC6H0}FF`g+<G>WegS}&u_+o*Gi{EmI7 z*=IA=Rn^wC%A$5Xq^-1ASvTz($r7?8B90m&-9TwtE1Tu?vASbiJ;W+Y%JrJv%`NK# z#iDBwh6}RWDVxoXxNH!%Nq6ipI5KbtLyD}2qir$Dl47-FcX`RC*I`yhG}8??*8!__ zN>SD5{K@wOMe6gYw2$odI2aMW<58#zYxidc!`G z6BE6NsI!DHu1E@j)D@Peqgh&;wFLVNS%s+thA2^WNSW;j!+^~;K#KO|s5GOMVpFYz zUYj*gQdT8pnGr=PNtTnBC3PcOdA&R(i$mhLBvc@^oUE!_dETGmITjLWdFb6kT-&Fs zMWkp*OmwTq;AqH^2UZnPWf^`{lhy*mP#7Hz-DtD!rLsZUbS>zPe!zVz1-gr75Akf9 zkkjkbpN7(%y$HzK$olqqa zX<8EG3NWxW4cCxu;MX@h6D8XHR*8QPIm0&lzpiVlqN1!a>LwzJQsT5ED_e&;bxB#| zq;X6XM>t9#RgIE5%CPBoO@`eDXB#q$XROvsUd+}gucVjESghAvZVFbJMQM#OoB@5? zV(6B1`WB(5koAr{$tX%2+naDQQoQ@NVzmjl-Y$uD2@#5}wCLJ8JyT(*?|OeU&{Qqq zU9>h(QdHDBKnZ0+k!J+CAgMJpT}QVy3{8H;Fu!t!@2^Kgl}2RiIsS6XbffSa2ha2v z3Mg&R4E|*jt z4bxKShIn;<@x5(-X%~Grl%`{N4u)%?s~oa~V6k9zbL_W+BB|MEgCo4t`;5k0PVL96{RQ(U1zA*6W+p>>$m#Kbi-||y(J?EHy3@hxcCkz! zqb|rcE4J5H%*~vsG%4f;;#+2mC5t#?Cv=3@!|L_0Eel&qurx4Kg(@SOcugFPS#JZZ zY(=%4vGKQTvVy3F@;jR%-Z1vx90;jt=&p^?vC)kLS!Tqm6|2i@7J8R?QIMo_qMIqp zl}}id#@9jRKJ|Q#zu6L(B?YwE?Nv^bEJ>mlTwgus z%jdUT%TtocfQ#NCN5@@y{)T*~F?&_7bx&TGIe8qA zB%3)w){vNEyzz+Bca9l)7M*=zYnqy-N?NIq*_y|X)`WFH5X8)OeOzTQ9(ibC0oxT> z&|#PL>6SXWUQwnA;c`c?42Tn*+?Ldhj#3(~s}nmw78A|4L|udQ$iS*gR27lLF{^dL zYNL^A^;gdCzX;lc6&M)0j^-)CyrkIrl-8E8J0KltC|M!OjKmM|=UbxKCbvfjtAn9y zAX|g^8$qB_Lzo`Q>SL)Jj4~h!LuQ){$26Ik60J_C(oS%0@PvZ{&)|HEUVR10F@EmO~LHB*w7qZR; z>s^GlNf;S|+S4JADB>-w<{}>vcVG-Rhbj-V*Eu& z6t;)Dnwq*yNj5(IN_q}wZ$U!j_ktgOhZ=BGJ!e=Z0O zAq^Z$r|&BKxFlKa@XZeKfg(dg)+u=wvsuN=X9-bRQ0adv+43grg+H4bQP(szG(w>} z7FN$j+hx?@nk4e@^FF<@!O$C;JSO)S#Qqk)5G2y!#L(#*66;%?v+!;4uyxZ$2>%o* zM1e#Z60`3>k)m#W%3zKk^jO4)C>u;!P=`yx)s(C2l*e<8-pMf!Y?}k!Ko{_R%2zZs zbvUP*eaZ4x@Z_eT)NZk!T`|2}u!}MiUc*dMg9+KYPu(3bmwlm~;oRK}7arJo1?L4I{o1dqigwQ}bn5M(H zXE5&8nAw_5e9iUjnlGN{)Y>i9@)1|FB{x}(ukk8syyj(%k*^%jO;b}>IeFrDBHm?B^|AzDKoM-Ce3oPJSW(N1W87cmz1@n zstU3=B-(B9O^bCcF`SaTq^;-kQ-(__l-+qu#|GWHz>cp- z{S6NET*kj|LC>oBol7OY8DPgviYJi2zt);osbLvCI? z<2H!dNQ-*V$2l0&aU5KsVW5!GrQ7dteD2W85|n?7AE!KtQtHkTnK!{09?`WOx)KZ_ zk(!P@8sMFbaUVtK(~NZIbNhVB#AxulHd#0&TU@hTMm+Uxw9zSVD~Aim#nB}{sNCSs ziUz1?s+=TFNRo)MjtRE`+bAQ*YtphM*z+_d@O}J^g|2Hfmc+CTgy}FGS{x3*i+#%Y zlH1uepFbVbbR)*al<9QJ7ps!1yhqkK!{{B-b4&(KjNLOZItocdT~-uwMD9*7hdPsq zU}!$V&3xjlqR@LZ%EYrYI;Qv@x!MMrf~HQ1vy{ay=eS={nzH?U5t5}5t5gz{CZ=K1 z>u6Zon{3U!Gdj=7{eUcmLLXtcha8_wcKlEMwCy)xE z+eovIsi!#gnkoxf$2r}!VxT0ANT}i&!FG%BUj@pNoU{p`+mWf@q*K2EQLsRcBf9oy&6 zxN>YR)ey6nQ_mi=diaRz*_xoL$@p>micmVz9b@#*81^rDt0O4#7=HnA?jjjsIYZ6| zHs@W9jv>)%1A8#QJ2=F1a;!2Sxw+)#vrie88zyCsZ8b-(9`p3+GoJY|3pGHD5AY^O zcpaCH2yhLBB`dVTrwVS^ZF=0!92)JKYW9NL`J7pnv;988>&Nt|p=0&BIDHS>3Q&21 ze|5=q=QA!DGKj$1J;8tci07Ac)=5s<0h5L*Psn#m>TpYfL1p%EZHtjDa5P>^Q*MBa zs@k$%P5I)pr^qy-b1doXu9$tf~1QfcU&4%%>lWv$Tbh=ku+Pq~3Yug~QLYH{m_R7%!6DuI?{jv)&-)h&`NCpaHIP-bQ8fTHWwIsSe9rCc zG5_{x%fnfLc5;C;ykImw;v_rd(3E)FTfW>@+>|9vSyD9(Ra4V6ZB5&<6x5AG*V{4( zceukH?r?`Y{H^1T@Vdia5B3_`bybl0Yr@5xAhXFj#~fWe;N$=FF6W~TeF?e%K@DX< zmE3am=n?g1#na`E&=0xY794jitb-xeO@WrI$O4abFv77KG^3>0g{W6|?j$vTprD6w7{Qw1k&#T37>{Fb-$} zpF}L!jU}6*pfO5B7_eLWET#cL<|4WW=&p&a)vqMf{uDxLNVA9T#B_`)E)j9)GhO(& zy5z#Nsj4kyc*E`GC66AiSv5Vzhxcd(V@3uvf8w=lC5s$1XMk<3>8gaV*s=CQJkMpK zbQC-C)eWoLTb@2CnMXQ}H{hh-#n1z~tf|v2vsFMxR}5=`sCLxRoQ>}@jTOFT({oHl zeI2V(fBS@&XzL~@YGjpD=DL--XHrE~rKH;h{_`)HCI(Lr z2OOMwbUPNu1Dj4qVQPZKvo&d!fNUu1_UAtiq9wePiR0R+zR4o5DOOuT&B8y?*;yKu zh=_Mv7K@PCEFp3#svh53%>A3!S8aw!Qz5FHA}z==&}<8H*uk6@sF!={Za~KZ!>nPq zC0Z=-HvwU7P?}x3roq6JSW13-=9FmfCxt}GhO(~6vYa%_$yI~V{1_*C8@lZ;QU9gb^@{>22ZyO*mv5}9}a03ZNKL_t*i zJ~TB|v?5Q!W;kEG;PQHfp9xgoWjOITI_cu}ZJaD&GVF4HsByFNx!ye| zTy}VVJHZGOs%FXR`4yl1_L2o7M0rZ&9CC47?Q7fo7<|=;y38rV4N2g$4huG6K(=1t zuVS`QK~|I$X-KwNu$!6OIxe#4WA_Dp+hpKOa7Onz@SbxZBjVMR0F5gz;h{T5-99Dx z@{+4*O4M|*hkZ`(^*FoNrRSWI`U{5cB}1{}W_raoQ(X2Au|tTTO}Tv*Fk9u6RrB?y z|4G?G0z@m$V>%uOV~@#LqvQJ&@g+CcM@XZh5;u6;XI#(LEXsyhg4#4bh=iB6|M<`Yv$7lPoG*OswT2@w4uZrn2ZJ3 zvY<=?!sU`}loF;n{%nQs$83^4k-!By_f4u$#~L!)F;U(jjNvGxFp6Ov*O9>_T zr}w)I=TlU!(D|zu@w6$8R@7taE{R@qqqu%|ZQ_6;lRYy`Re>Vz*hbUT?{&gkZg3wh37! zB~hVh%8VrNSx%>PwT8lU$a*fl!w#C|F*@pVd~7k?Zc+Xfv)dPZ`ndKzcnzPJI)L3F4^x7=VA9KGqJ;t8!((FFAwZ)U{y=7mV%hER* zLV`n;xArVo#yPzGIZ;{gf#!8`|be?qRBWzUg^F2GoIzi{m zn`e!DO8dN&^u$``(&P8LyR|LC#78HYpj*@}mR;PBMAT|{^fAPh!Zii7j~9qmr8zU2 z?{Vii7Fu8oz2@N)+}(Ke_rh#i49F;*uI@ysn|Vg%y1*FEBll{= zCsXg5`n$8_&96>$_P4u^$s)`1bS&+grka!pdv{$Npr>-s9Qdrw%(#APN+!!jXT_m+ z4}vZc^SoBIdG>q!h`t5gNBIhhU zEzVF8r3@|=Uzy=AM@W282g#Mw|8DGR_Wo;Qm%Ik!hCXtUkvy*+kj&S^n=(O!z6T(T z0Jq8ozcTx9DI{kyaJP^<@F_z~sMAq;uW38mg;yZ?T&IaFT-WxhnaM$^fA+W3-1b3; zQ(5dh3eJzuADmDjr(s?cycCp~>oK}kx3Z^bqU8#*;&nRjjByL|>VHPwNfY^7src_+ z+k)1NOPCtL(N$rY6&^f0y4GkirK6`qeo>4hGtZj5oU4IM>%uzF* z_JxTV+w_OqMDLB>t`A-Yzh+sLffb=|q)y^GH=-U8fhG|>({16BIKQ+w90oe|I}lF- zWU_r1uky8+GTtqtr2FzMNALI=gGbKpd{_QVUDQ#EMJ~1#58ywHLX9`+6geuU|m@I zy25x%Ue|E{srwz3avFB|o8*)Y3<()IVsR3ZBYukO>trrhgkorqcwhHV12c{RwmeB3 zE?LBr?nqt%{@oG_TRk7Lw|cLD3A091+(_bh#fL!+4qxVvZ1s>=Y|`;>Ve*gi!RED(ZzLpw=SwIi_(!x zo@fkx@k9wC1JmO$twlXZARxlNlu2k&Ivj|>UwO3a9-6VO$&0rLrP!M4<`vwwb?o}S zpkE+mc^6=gIQ8)lUPP(g3{!HhL)r7G#MUa^{#_Kjdh8>?Nm!KRsE8Z%(jxulWOajC zacb5uao~oiYhW>moHs&ijkH<|?HKIhAAiVrpcb^K9(oFi_pm+zcYT1%LCrpZwuUj`7_P~8qH^3+Yc*>cYKweUAE^w z7m>odq;l>p-1dMylO9ANQZHL+y#B{!*#UeCdcam0q0WojB1amGmkc-(SJgdaIN$ZSi~+iE25zJ8*8xk4Hek=V!R) z>f%e<`>mL1dKN11d=%z5*&ioJ?B3HFxegI5_9J#;2{#naKRGRasrz}1t0ivv1Oh)l z01oo`+?sOf&0hx)#43%+$=@{x_u5HxE54qHp2}qx*xv50SDR%ibZH6Fv3=?f(R(J4 zr*}(-^V8)ZZa+B9FrH8%d%j8(c+!38gX~0@DORpAN5%bcF;pV8M`=+t@*AjQ z%jy;C;$_!azIMSwv~T(_`Uxp`kz^}lrT-w&vG#px%%>zZlEgFDNJuqCdc?D`C$-TB zK72=N0I8RnQUbN^7%I@lyMGHO)LSEIfH4P&0%ftg37k3YvbJ;itN(YY?F)sRtuhKM zuefWs$;&x!rl+&iMBE#@j@>^YSJXxs2{Tu5y~=|&&t}6H$8drcJ^N1%Lbu~5_I$*8 z?N1hR4B?TkLuF0+&CTEMntg-`k>$lc5zpkTy35F|LH80*&Wa-=k}nx8bCooKDG+^g z24W?N@TN;S%zfaZBR_7SP9VFyVWRrCgh2f!a7tJ7FHevJ?eBzlR{HmErw>!sbx=mA z^T#N*a`Fh$SqZ{;0WQ3ilseaW3q^BzQzVhF5W{{mCIo90spWU)H-dBX)J?QlVls8; zNMAx<{Zu#kn)uzx?>amE1Uv30$Hv|O6cSlCavnE3OQSVLij#ukm&UgqjfWl3Pwe6* zgCQgC)?EwkZrqR%^*x_Bgzu9;K536-LH~j`pK0F+?wV6A3?r8tj0 zo*s$4#{1%WDIs|5&9Z?h=9ln|wmQJC{0?%=D~?6cEFWRS4%x<^JnAN%UI@oyKP>JZyGpM=@AZRA}yHntkyHEh=R z#{8bw%f|Z7>?s?~UGbQFsQTNT_K7K=ADOgW7t5MWNE>m4E3)y+&LyI{)9`d{cy%i z&~kw#VdFi3cq0!sv1C<*PI!z^dW`k%;KIf}ftP&$h7(&eT%?x-v1BC;PT6FI9aaUM zict||Vc85-T2!NS2r%#QJUfbw8H`Q2VlAwH{lM@Ho#|CV7Pn?AzNbaey6y@N2tQoU zrjR}+?JrSb&v+ZT@SL}+Yi&6v8jPof^%EEMceGMgJY93cheE_82^&aonv1GD($?F} zD;g~X?#~e$4)L+&c$U7*XPx{q;-sfz{U>%d8!Sc614l}8UOo5@fuGSE8icpH00zj< z@Kr(oK8SW_D*;~JEJu_@cal^7T%yW4y`1*=%g+HU;LHwVT8_xIcIL+q~skt$vbaaohb{Z&7FmlUmT(CspAEz?93)q z)dg92=&<`nbFZfPw@X8j+wV_D0FEbxQ45K=eATsW!y~uPlQglW}cz~@7_%}!!WR=%K4~P5e2cASvWDlhn z_2R6n9UERpD87(ZuXpR7PX7d9b&_z@I&wBZREp);zYmOP{SGIn3Umiw|yut(>tA;H9Od+1}7V z1^9Ulj}O@oKKrjb-}NbP%pwV1=Ow<`5%h9?0l2I;f8YGLsb-D?`N@Q^Y{hlWrA|`6 zpAVOOF{)z?FtoiOm>w*Ef_NVGgyiMr8oa-KH726etS7Ku+nv*h_n@g21*!=yWn|qJ_kC(CeEAEPR9NYvr*c=J-Ewh`<#TJHZ zgl{+mrEO4bkK63b=A@z@S9FY~iR`PZ#j6!6H#6-X zH<(Eu+h6d7?+>Gz@i`GMVwxBWibAqDW_EoR$8~LQC*3{1dpv&7AHQ1kIG`Tf9!@#& z#9`)UZkEBvQ*zH~wc=~8S6lb?BVE&huC}IGA8vSN zL}-T2o5n`e52W;Oud$Qw#!-J5!VZ8Xv561kIJ?aXkGrbe=i8^=rj?oC)I^~MxyXv;N6sD@|GT;ItTzAt~A)otxZ z-6$zjd;eBhT~Q?<1T{lYU}JAXwB!goz~Zotzqbr?u%DsEx@DM0CK;rKH=cQsJ$`(# z;g@+<;Jbu=dd|9N8(7-v`FtpUv!Dq(6ohWK$%=}z!a`%~nbpyMu)WP7c&;Op5|38p zU%Rro&#}=nZ0i@-<0N@#PdL895&Zqa;d12 z1*p4i>KNY01swmOAI+val0R{{K1{>Z@n>5l$lOj6}stACOL zAjwiQx*TG{FT94$u428M<~^^yp5^~Z4>zPxMe>qckQ5&~eB%{8OJ zmdj568g4FtwPz$OL-k#C{d{BXB)Ln6e%DYReP}h!il6{R(~VEJJcjG#p3?aleW^~W zCE=t!F+F^Kd;WteWM{!P@EYm(Pz?);w4sC9#pj3{ox7yy~SaUP@I3c~fCKtZ3 zuVdCL1weBC;YtRetx;}_{UnO#;Yd=4=QTm#s-L{oml?AObu+^u;(Q@;IJ{TZ{#GTi zeHf|P`Dwx)LxVJ==llJnr`%k-{rWP^aHB2TT}>mSR8G8*Lxawm2;>KTH2c8S{(LO~ zuMS~>d*sX(ZV1#!)(V}6?pAiL-gyCh;C|Rr+3UuhrwOb8pOJxpEik#5JHU=AQ;{9b z;$axOhCPQyzQ@9fP89u-*upnb^(_&SU!zj5&dnRXOYJ0VvVsyM_&~*eg061K)*`L-#Z&a2))v&r(__*tKyS=a*)w%l` z$7O&SMLgD=mtn@qt_uxh%}VG3_zr9&-Bd-_XvKB5K^#f*H|Ea8)%Y}QUsqd6K+|D! zIl!P+&Ti>7v|qBF1-s_mL1uTw?ze-0xT z!h-n6@uR3WxZSk?8}-DT>EAf+pPd(JGiWIY*MH%oH>yV_i_<~!0!^6uZuQ7DAWn55 zK4xoy*5V9)LCQ-IE*nNP_SC8=xnL5+Na9p!{RgMZ)2dN+nT2;5{lnxt7-bPJn4Co?YD)&i%XE8M#p6s9Cq9rl4qi7!*Db z$omu^znyH9nVqr|n56U`bu2Hy>?qbTmFI{{9S>XWBECI*ojCF>Wa}rLYrFh}4c5%N z1YBHT1b*yw00#n7Uun=nwQ6;psBeq3i(w#}sh<39i3|fhWa}1ahxblgC{(?}61%c; zE(dVu?-jC4Fa>Nr9K%k@SE9xpvGD8Ma>EjFkXbvfoPy2fLsKye#p~6F2|Zq_Mk`2} zq))Z1e6MgG$GAxNp~lr><<@MLVhzmSvt_@;8gvUiS&M^^gIb*UAiOTS2;3HvHlIJM z3SNgn46}q3h7CQNbAkl9#L?~Z`_jj0nW^8`Mo#PG?_i#x(y|a(=zGaIJw`3o=m8&Y z3nQWYY8N}d6C2IJk?Z7UxFsj*X^+0~Y<@YT;_b2&W46l_TiB9v$oEQP(4DILF!T=j zK@Gus1}lV7AC3xLLD40Gj@~9@Gxr|b7YwHx-;J*Bn&!)9tEOc!U9^}b2OExsMJ?dtld&yAAsK2 zm>h+0&ETbrM`Ql3bXEVGc*}I^J5psvui<6l`eu;^&hOp?ld|x{>oJ#g02K@yKGCUJ zQ4Bfbw6Wh+SbgKBrIzd~S|;wfQry=;#G$?Y(oG!m#})e6&$1qp?k~PS=X*w#?%@;V zns+Ua!b&%>j&HdJtrMThq>rDbOOWWGKs?RNKC?O+$9YS z`+9VLJV)^tBi|`w5?Xlr=7_o<$T|j{+_TqI^8(^;Y>vYgOXH}szQwXfa-e zd}@t<^ScgvVc9Wn3|FhBMad<(cy!=NE{C|L<;y9kz-3H5y?b#UA(D9x9k8~N%MHX@ zW)OZrQczw*T>}6Tg=Q6EcjznsC#eFoX zwrFSpRO~VA8y^Qd!(up+u)yg|W!2V=n;Q10aBC+ZI<{@KL5N5g5DW!Ya->&^)fy!@(TnLT8sJ(O+(@O;{5hVtGaP!oN6)?y*BFx4{|-3)nnJ2{)OR^ z+|GbQjDfWjPN4CqPO7+(~8F6XxUwM(K2Taja0PAY{hH;bf z0-vBo^5#+4<*RwB4Ibs1Q)#;#HY<9?B&6yU>nCowO|yXv_N?m4P7Mn3r!3<8a!nmf--2P&3k0!QJ!=a|0s6zLcwM7kbzrkt z%Z1!!sRXL@Rl+QwLbLHffanS)K?#Rx!0~MBu6Lp7+Rnkq%mf)F>lUE+E@M~y!I^E!OOC=tsCzj;qHaGRft2-ymC% zQ!HLyeP_i`(!HP?)WP^_cAJgngSF#cnd6=lPD*89q)|)bPjp0ZH<>GN;!;TW>`*_= zzeMo+LBelUx180m0L;O}_~6IKjCFOP<(yVd1g8<%<$UipJw;IkkbCsK!WSXp$wQ8P z6Szv<%~Y-#O|*IGwOWi({Xn@btfd`=P#nW7)^M-B@TZhHu>o^#DpuVq68PRY^LyXn zPKk~?r)qH(c(XIc8ooNM7%Yt`ueeNe5l%r5`1d*}y>KPMCI zX5jvkEwl{wLK0&xN^=(}LS8#N5s#{F*VXO1_HnSofCddk@DEMzk%Vc;xSV~2AKU@A zs(QGf{rm<&;P;yD4fmIbdW6JV6Tq`+`jd!XSvmw1k*ve~P$8I3ndA|^CO*m%6jT*e z11H4?Ig=V}R1)35=3*nvCewZyH**X4>5emfP`QSVyt;GI1!5Ld{gD_O?X0TST>XVu z&6+raU03bTFOelpeIt|D+FAs^6`FIVj3beJgR6w@_GFGlo=gWGZbnNVNIs>PEW1EH z{pOxP*J%HtHHP|l>YL`13R}ZGcZ_0ZNgJ2ctC{>wH7yAC_1u2joyj4zSw@%g+bX(s znF~OSxe~gyl;+WZ$kPHK!H$UFZ7E2efzwHdx+3JMrt2LwVO+rwdqG&e;wl!DCgdqO z^d@~2+8hHM^uMJN>O+=f59Huv+(ng(gz}W)@-@SAJX*|#v@%}M?N4cY)?PCJ&@R{o(Zj&ezP*DgUtUF16g5vM2vcZcw7`nuQm4P z-{{PB58qQ-_bhdhF&K2wQbrOdi1nd)t^8_jm8dFEe?#YV7;~?>yq5jtNgkeH@LF(JPx2&#yf<7 zjl^|J0z{(v;ItsSnS9m^_VO|}f34OkyKW>MD3Dj^!LlFw{^mNW)*QgM~Qy(wLFH%nh9&tg=BT2%pA(^Zacyr_g6l z^3>|1t_Itnn5fIp#CLcgi|DfT@42?7QF>24##3w3A1 z1l)|lO9L4&@n#tsQ7#=pjBfXg6_ zqQokMKUQsh!vc(&+AFDeFpga;c{Q%c@4Ps*bwQ0i*NIN?NK1K5BJgn6S!H=CC#bJ8 z!qZm53-ZiT`NB6QrARvOk%}LqO6I)sfzW1haq$;a6JhOqJj}_in?N{3^3zRGPTX68 z#|E*st<8E>(@_RvBiZm?wa^3phO%6GlA$iLQSz#R{%VZ5Yg06K=U6g}iyiaasy2Fd z18rjAKFm)s3p5yLa~Oqw{nG&x3d_kmZql3AKArl zvPd;vH({^~_Nd6yv%mH4zLDn<;MpX%C%ZcRgb!sfVMsMH-Qfz-6tZwX zVAGS+7HEY*>lath{EM3*k-@)g3;>&j1qH<>(K;ffm4mX7Fy?}TG(_FpSqE$M8d#D( zP>x`KTJN;8Q5=WXY*9PCi+O`L@A2G9ly!_ml+)8^La+<&JYU(L1=LyXi;-mMcY-y( zCKuyZ_w}kpeuxN;iQ=hf-`P1WuioIat>#3~WLjIY2N)-~!u6rsWh0m+Oj`T&#V!6AgR{r!a*k)_>d6(sp*9BLl zO3V!QyxNotO}0v5g3B{$Xx!Kh`NYM{p1N**`f<%#Ay9#R` z{9J!2arS0D-BamJ|H_4mAIodM4=};a7YO|L?8cmNSLY2C)*ly+V>k@X2e6wkN@qQ< zU#wEk96OEPhL#J;CCnc-ERzl4q&WwuFYAeP7%fmBqq>R2W(h+={WuA6eQQx9c#WrjnlbmXd7tv^O_Nlt|s?}BJjPeExv ztQl|7S4Q&d<018R3uz;bTHn5@J`9~1JaW*;VW+4|9x~q4-|cvJTWlHwiEJ(h#p@f> z@Z$P@I zt!w+~6p?Z@j$in&x~iTr^65(1L@*3Fs5)%-m(+2i|J+v-_@z`VFNdGcIUA#kvpvX+ zeW<6t{v6|`93JGEJM2DEcIQ?aa+xmKnujB3PBY{qL^TG878Xs*HdtKqfKd`^E^0}o z`|zl3`qO*aVPyp`OMs2|b~U9aFfgo8Vx8e#3t5FueAL zc{CwRj>D=NGsAb)B#{K5FDPiUv-e=bPCh(CMkDJ3>9tOuj)gK%_|ACL%`i z&7ELFAQ3Y>$h+Ye?slC1$1VN_j0KkNTISYY(i6$7Ao`)DuewjxB*GtLl(Hr$?6n0v z#8a_Gy?z86JTiWG%z9soNKIwdXt`!yQNW}_(S39GXYkvrxlUxZ*9TKq&2L5l(^+}n z7a1lALY)L#XsM9kr#gBaI+yTx;2>8p4Ipg=aa<#h&I*PI8saT*sXEVOy2FRM;fQx#SlpBAq# z^ZwD;=-mOr?OjqpqFuI~U*~byV2hp(vnsv;qjeHCHLqF9gp9yrv2NuVMawl`>95}O zIjU^#ytvw24m_H$Qvu9+W?Lve;~$@3D{Lt1lk>-|fZO`#BwNSOz7>;W@th;P(FLk1 zJPWO@GcLaZ5!-4#&&|!xbVnj`=Abj`ab7;xK1ergS5o5rV31Dr;IGU{8~&bMz-%wg zh+oV&+DwN0#K^$>{C@bo<#!D2A+-`c(N_tpE)Js!7D_pPcM58fL$(w~u!)NIS?ZOCA5=tHWdw@b zCy%rYKSh~^n{XR1@ggp>vaWBW!J^K1!gNp!r8o5KVx$?fuj-*eW=tTi_w-X~*^ryh zWg&x&Yh3Nl;W$`WjR7*+FU-mzVxPC2ROWHx9Ey71CMM>m>871-KPw$SUwFy|pRM1K zg@-d)Lj%GWygXkjntboW46efhGd=p&AJ<83ZTBZ1pE1=M9YU{WjQF4Dm+MqD(c``u zb#Cu_j%9fC#iGJ;Up44=HSEX!wJ|aF}UQ@q# z3Fc&ah-shNBgE3AkqYiiyJOy6LJ&R9r!(zuNMADKO$0OUidSx*NiU!;Pu?3xPfphk zre26C6!Fvf-|}4=U(U3jpHGA#C(n1h_#&jSj%p+(Xc`;iKsL{3A>qacU++h-Z}k<8 z<@3%<2kGkKPWvj-qM2*^bvZMG_{ccs-Jb9h=<)F7QTR6X0Yf;&s`?u0J>-2O{32m( zg`wu~OTT?H!~+Wp%azIOXJSy0hr6v4{&4IbKy zHv=WT#}Dmy!cMj%n2FmrE+>`7j5#p{T^ikHMwJ6Zm6ugjH;yjD)=Kz*oAdDt zrT8qudiIWqk?RjWq)$(RLhc?t5D1cr^PW}9;O;Y!zt!Ikt#kf*x>cwI{llMkeStMy z5{|rExBFO#INhE3LLx|cEjei!b_(2;M^;>1H?A((y+ltw0*?`gNPo4LitZE}D|%f- zOi(zR$FpNId;IopJOW&0x{0nLr9p8Y?te9ceJZQkl4Jy6?(aqS@SW;*NHcoL7d{No zVprV>3xoadk*7mn-0s#VyIVQ}rMrYp4b3_UQpaUh(|72!Z0QWIp-P1=2D2d@#9_s5 zlC2sk&;|Hdw$a;eWN`B>l#`KV2FznEZsG8Aa!&Km%IbX1d`x^8YEhlsIF0HVOFcYY;Bd}mhQP@l+;y!{YMLmZuiQM%Q%e2UKA$W279J`YVnq;jL;@8i$)(l@n2y@h zUxTo)?ykf9ke9*fo_+43&pes7D&jo*KE};r!kX9kx4W3vLUU$OwP@}(5?YLmn+2kH z8ur?SUa^$~!rjCDj@;yHNa7_rj7chAwXPN&R$dX+2gRh>ag$vfs&YHx!(@KP%*Ncn zYi5&L>%!Q%hxY`b3&eNWwVf3n>s;_D;G{kXQ`bCVk@UzaUenVZynk038zHdve4$)b zYbXZ|po6>n!!>#q6z}zg-BBpB>T*?&*w&zHm)diKSuNsY#c6D{2)=-x!NR zQpRmk(e&)JNc%64Uq^P_c)91o9f)#J0~{CSZx$PrzRrEsw|jiYjfCuxI8eQwJgeQo z_L(E$1JT5yb#%Ckx-R35Suf~B$5zLs#g^M1a_e}5p{vkVU8c5!>O zrO}@`tt^;{fPMBC4Gd>>NdJX|2j5U>nbl-KuFqR>`H0XNDg4wLGh?{b5vucc!<`V= zEB~T0z*`-dP5qnfOAF8c=JhXzS{5iRf4}_Kb6Z1cgX-U}!+x1h`O6&tLydFq!NLFf z^nZQ~)aL!GM*n6~#`%91`B(S;4@FzZ{(!;Tf9U_O!e6rZ|A-VCLGG_~^U5qjnsN%h zj18kQhe+=_i72ar+)dZ%`Un^kI_|$)e>(K%>FZ?0GaMNP0+qE{OO0iUd8^GkTQZbp zsHku`uZDpc|FNHec9hUgPK=B*q;Pq?#Fx7{_eD{6qn4N-s=aJ`JnCr|&vz0YwqB9= z7dX4K1^~C#N5FINhVe z+w}}nAWgtXag^5EHGY10{Cin`aj!z#ndE8^(LP^>lhg_ljqPb^pl9$yT@gc3lYycv zFRaAueG?Bp&z`46LkxVHG-c>}jibMY?*FB7xKOG(vc}Ykts7oCRl0+e9`PPIe@?vq z0>4=X0C=?MZKf>nVqW59L?RLP#+dpHl~XC3bGtkh)wKE8#UiV)-~-aQ+rml=4;J&- zFcGZEYH6pIXTB2ECTaMI+j3p$w|&a2qPQHeo#g+e==nf>^|=YXBxMH-yui!WI{@qU4(_7L=o2}G14G(`M zQ*aj=D-Y%ohA}gTZ@ZFI*tq(Fprfp9ucCDdzE5-IG||Byu8_X#JLv}<^@pTLab~Av z+VTkRR;4MnZpS-{Y1>zTJ58!-W@av1@EAIMaJ@hFTSP5L{u7E?@P^zP!Bj5H1qD0| zARfALR~=%U5sv_!8zaoK_|q?VP9=qOf8NI04`nfb>o~?N$udwGO-^tKY4T5OX%B7M zxZ8Rg2^~kW>TqrtGG@twa0XchDKjblEk~HEBTInRb~ca}7rhQqX^|bbvQx>pu=VN? z<8@sW^`L5rVQBf&lvh$ze7X~Yck!sNWu%FVYox6>KPw%HFHIjY%$|YYf0&Fe?v>cK zPF;yr=1GOA{vYjh3XaGeKpF=n{9rTsiX#Q2!(fg8w66 znB#<7ZnUvO7&w7|=8NGzNNsTj%T-EVMFum~a#WoWc`%oICuE-;IEn#g72a)W$jNbA z?EcH+`kSJ4&o$oaAlTYUrYBiX7}Kcll=ca6$Dq53gf0ZUcN#27vEvcm9K7UZ0`=De z5xPrKlvFMfTLJ&*806>DI>jBstQdu{6@ckIVj7<82`xMZU00z3f*>NqDUSW>>8 zLrM$BN9yhh2{&v99j&j7a~4KUD<#6Bip!VOK)^uB`30AJUIGN8)=_gdiC_f5bJac7->krPdwJ9r$N8Q67Y@57Bl zCDNG3l2t=NLYhkKbSA{OCd6ZryjsNUrN;hqG!;T)2i5=0$F*G99-^MOfNW>Sqp5Wc zV>7swCdHYXb00rGcJLOzmnuKhK@K0v_~&gKWRr;0LZ^6@`XA~)|KeV1exVG@RFe2U zZl)d2)>F{WLd)X-i$@3#ipeGZPf^}E>lEjHane@!_SQ&QQOO(9Zo&fYBXj=r`oJ*! z4n9KhpDk<)pQk=8`XR*$&EYXo0hu-c1lwho$EDJc<{$qQm3G|1M&@aoiA#1Zh`olE zt%8=~%+By)&`E<7FJ$r%aFZ8_TrzIVlXt{vTbm{K56*3U7C2xynPR&BPm+4_BH%Wb^>zloE^@+ds#jmoPHkSQF$J^F}gonRMx#N^EoN!w$E{qc{j*b z9pR?w5zAei<;pv<|7hb~f=)5zW=q5N*(JcUeYE2wsuB5MG74rgjYJMAI*Lk$m6wlr zL}w`Q?b3Rla-j}Dnap#KXJ7SN@M7wDdQ|}m^=uq|FLGe0;n(Vc2^z?WEbRVEs=mY$vvI{lM=C z?M3dr^X<4Vk)R;79A*m?% zHg&37RAxbMKGd{b)3Q;0hXWkXH1wH~B5?Jr^Hmmj&>+eBrycQwOQ-ys%96f00A(?5 z%O!-36~}HNbE#onKv@th6M^1hH-j_9D_B9p_CrRTMk?cUe?sn!tb1HOi;bGujZOEG zAzt98#V3B|`gOnwxYu6NiQwEd!u*G?^ttTEJpE}=L9(&~P!5xaek@59wJug!C5ucJ zluiAxRgKr(LZ3AM*O;1<#>M~(>T-u&ztNnXw*@;lvABM!@a-qN1fA8$v!raF0$JXM56vBdnq<9DF9sg>BE-}|Z zSiQ!PMhw{?MC|9eor8gNG)@#BYcNi916kV@E2C;3pfn1$ z&i20)J>O%25AL+jRfToe_cOXmzb04{29V8M(Gk2-6&4^ZX%z){C@o$RxSaVryqLT4 z;3jZqO-eK$E96&y$U2R~R-#~!a8gQ2QCS0HJ`7r11>DD`_Z=camBvDm@pt^yc@0&@ z%3oLX;(K#f2(?`ymq_fr$+4FC(^K<=(EUpqWC~FWwGd?Vz;gS?ysZU8XSgHuCA0Ox zEoNKb;^}#m+p4jw=-Rre|1|;HD_^aq)#{inOMgeZOPfE6O}sRpLbBC*kL@uBf0Dw7 zS=kg74>wW{0S%%Y6NUCHDuW>h6m{*zh3VC zsxPRv89&5a`31@7bIJ6HU5zltMeBqCdn-@Q-)8}u?xN&`^zsb8beRkmR_`)b9&)wS zOdSUr>x>!+p*CR0kF7Uk;$f#kP#b+R3Df0>i?)Ctj3RE z+~SROA1h>)7BRdP!d1(T*hR7@%g+S{_7u98r#j|+1n&ur-os`g3QGogpFB1vZOspF zi5Qz@Eu~xbem^&RXwco7DXW%IMMnU><6v~LABSh2!X_@&i>#G$?M`2d!i=9|%F7tv zBPF!(%@3%Iu3{`Dk_+MnD8FH zIe2Wk!+E`VHhAeHH|IUs?OwPeK9y*=q#ik3B4JI5Zt}y)CD_R(xeCdPqcXHJsvyow zXqWuh9$VKoW2CrgELNJ&EOTYsySC_~AGIQDX^vkP%Lt7CYUCn)X(et+)gMshACSRC zw+)f{|6P|ZDDIDWZc4@~RWc+es#atkGXyiLUAxEbAK1 zYIjwykc#C->jS~LeEG2$U_zjs&7=R*XS@CQORM? zMf!c1vSki=^xr?wI93TM0TgSc85cO`T8&c1UK{a)C%YfVa`=qWg+ z_WV_r$dmywCnnaWXvu?vxs)o%g=28xGuSL|oIg%$|1wTIvpjJJDE+r_|LZSrE3u6$ zCf6>*PAn}3t5^;tk7elEEi29a>rzFIdoj$j+^Cc^=Q$`(t6ol?DH_8uwrUG!!YVb@ z83eic6?wcZVZAFM(81<02{G10#z?Y=Z3Z<72f>dcq!5ia0z?31=SYb`YS3x)CE^j= zQ2xJ_LK+TJK!Vohh8kC~7mlI!gXKDP#h#JqCtT0sDA5n6{?dyWNdA!^En?Po#wF7n z1xbqFZ=YxlbE5a!r+{DAsmv&GaHmECDXHK7GEY=YB8!M3WJ+XjS84cmb&%Up9@B*7{d%w@sl_aw#UB9pq zVh57_=IBAGsjT!DEmX-~D!>6V|9B3n|~7c#waQO2)hjZ9@%FOXw|0-YH)D^Oyj3$2jJR^SUNYMeRe zP=@G-k7}o$7)Q+)fd}byh;EK)i0eY zB+H6;av8L2q#7Ud**xNfTRa|Jm-SLk4SMQeeQZBGyqzN1L7ZE>2GH_-NOE~9tx-}y zh+RRY0Fgo!9dAI@f**B^MVumw_~3+}H`;BOh950*X2J}+Io=Le7ARZ2R^5QR z9(F#4#q}M`)qe*Nul%i8SNQD>mBfa|1uMQ+3k*O&+o+LZ^_>jy+5*Ey6l$B3j+xm7 zV!}jhN|Tx4xt5l%H!HU}VIWySQd0c~r08PtDu>UITr|&q$jh;>^AOf$qyMxNP`V=% zpc(S+>>no4REr8_2ta80@-xGAFDIV;#T}Bb8~D8>OAPYZwmF@`i+8L9PK;njEjg0n z9POnZve)gJ>igH!?68p)N%WdgwOlVS%Afy0wbUIKFWdP}+3wId!5sxxQ7?AhL)q+7 zC5dPFb&XGboL*=3^`;5$l{xdgVO9`dfvdx`nk!)BP4naObkMLFa*HgNOWAp7d-LGzk=UOM zS*fpwDXO=2>?t1Lk;_A%JjS>y^g|vF)5kmN<$343JNbI>)h;$}DETS<--F?^AIq26 za3zG*q-OX~Z^8!4@}@_qs&*L&<@H9SJ7%(0v@8y_Uy`@kq6bHv>qXJHyt7uFbckY# zJ($`RDjDmJQQoGqwbV3PgpR7~QcXtX8dvF^;&3Aq+r%}m%T(RGRe3z3`(VFXOfV6` z5r0SmyBtMqIQQiZzmaoMfCaq~OVs1@SDa>ydR+!Pt5w6^AH64TaOA_1#sEzD_n6FR za-*AL=_t!ioiRQ`!*v__Xx{;wln_79UjF`oRBm{kR&@h+I6N^d@Y4befpt5o7zLKI z?^@U_1}&-%xd$D`zKQ$O8{`?>vPUOcse|i=V|1a$Mv3)!I-ABjCVz#9`8&#hv%X@+{r`um zuZoH*P?imZ;7*W1gWKTl!JXjlFu1!D+y-}dcXzko8r+@W?(leby>s5WfAg__X7-xu z>gwuh*^R0*c?8}Kv+K7aOp2g}iGnvclnwXj&DZ-FIZdGo-Cfyou{Q4j#m3R1k77MU zUvW-<+i*={!VgcUZrnYbkdezLBGG6wS1-$=BN4r@oP1QJB)nQ9-BTeenk#jHcWLU$)|JioP&0wu+X6E^DA_Tr z^pb!Evbs7KOfd&&OFf(IXFaQkX8)vVQK0(c>T8!b=Q6p(kU8(H zNo2sDkprFHiWI}q!qCOY!dhD7$lrE9_7Z& zvqxrwF)Bdh&}D|(|0CV*dzO+T&3p!pA}v-YH;Ct_Izx$l#EN#{XlT7op#penJsi(qj2KD#bn_tDUgR9)@f{ z6w6b2C4VBdrCGkDvJ{n(*J?k;MlzJHf85zNbOf*kQ&&MPb&vQHeLwjyYSyfr=WRW4 zzgM3~Fjc-CT}Y*Vq8`WQ&S4~vat3-5no~VQr&A~^{FH;nHExB6H0wI|;%GWlpHWJ5 zm@=3d*IIYBN7}8zQ(E~9y$}6%Kh%cbxQxUhpDJt%2G7krAF!{4ocBMf8#B)i&^*56 zmOQ%sAW>JCiz1Rmdll4MioQhXcL^65?G~$t>J%=?O%{(Z8H@%)+W!L@gcy@EpU`X9 z_dXM7Il+BgHZi7Zr~AK|%VU(rZ);M>S|zVa;?Qt9Oq9$zGt|IZxKY(=0_`G1hwBu5(tBk#$ZZ8@DJzgA_y!IGfL4ZM z{XJ^GMrmWq#5uW6XNQ?!R?<_OOvhsx7WBQ#PU9+h=~hksF4?RNsVb?tMP*qOZ^C}f zkY*o&Us_L2-M{C)wnp7KO{NnG+CiuS!{aG5oGLfcu(xUl0p+A{W&LstU?ib3Br?ev z#o)%tw{?6PYQs*ZdMv#>dO0SM533@>bA0XQ@$&~SjSHL>Y?%w-SIg$h9pi5)jM?6A zpVv*^j={P%ut-yuI7mZ#1Np=OyGqdM9Sb=zjOK|m#-~=uTmJp;gjm5 zIuX}(xh9CX0gAz9G^v5%J_SdVF=l^$_r!5Z)p9U{nFC0#Vfk|`=kb+z4uw6FT;HPf=2I61e703XhX*MOP2dI{hPAZE{bpCO| zl~(m#Kh%MKQ?6^`qbW~lk;LJc`C-PKbBId)GD65dfZ9_{>r~1I4!&M-!uk|`x!bEZ=&sVdASsByt z8F}V$1O4TMQTA2m^dcA`sXBFgE(Jux{?n(%+23eb`*jmSgC@0B^_op3Ej#t@^1xqR z`2Y7WH9!Nhb?LhZan-OZBjer|a}Sx&|2E~)eaqHYcNr97w9HxP9;8~v!b}${M$$N- zUjq~zr2iyDfPIYfC_)F@>p&ZJV_>mazCB$?k(%kU<@L$I>TB5(#Yr!7^`cEqWKnix z%IKvhKka4|sa4XtC?wC8hSz|`iUAp=8EffC3!!!@3{W732rf7L*w`DnPl-R3*GPby zJYfcf;`Djm>mBTFg=|0ImG#uue{aouweR>+J(e?@a(V4P^TZTjH?)L<{wO>rSNUh? zt4T48j17=6JQ>dUn}QYl|8CPRcR^nY6(om19EbgUlDI7?py<)O+`rpEiuwB|6ts10iiLlQh>B+Ugl|Sx~){4eKyk`;;z@Biw2t#HExe^^ac zPFTzyQikAazpXsjS>Z`b$=FQK1Xj3C8n?&_$N>(6<6z#U(NZR}6PSkd8^rdhnWrS` zGk|JTZ|0suPb@@2ElW`mC!UX8Aty^q3$MvfY6P1{`0?Te^~CwCz}gnfcjm^H1{&;r_V(J*KQ8TbV6@aZvAlew zaEyAWz;S!-Vo61Ntt9cdpe{GwF&8^tqQ#p_9lXt2)XmF?^cncf!95M36sa9$KHs`O zebjF$jq>D?UOOlM7fel=Jxyv16ysl#m!zDBVk zv(%VkxO(OIz25{stY9`(TarX1Mbl{Ki;D*wYVcte?=GeJvre|uv6{Xun6mWbGzyte zyC-?GizhJQ^dBBn_Mo#jolI0Gey+1%Mb#V#;6*~v&1jO`WB?~5SR{*BSG?ZQV6R%I z5+W`<-LeAc228W5jS$85aWdmgxg_IarA4a{4{oBB)Y+5BYjjW+@2+(<9liweV=BY*QK+M7SDWpsA7iV@f+u75O2ZJnYCn#tARCqpK^l7;sBxIEF$kcn zBs9%r+Er(kMuNtZc1y4rQ^w{{-`v>y*C@My^@)pH>7S8(C5|#{qV{b3gK3@#>0m^gNW7F|@7rZ%!L_Z*nB`3oLL ziGs$Op}lqd2}jeCy5%gaRoOWaU!@@{MRsh6x5pV)u_<5jVrWQ7GmZ$ z@^bH6b`ObP_ zJw>uA(aG5({*NFm)NI{3ue0U;9( zrbN`Fyq)~T)p_K&kj*Ulz4ZAuczEcZw&gGM-9B5DcKhhU0Sz7kzwD-uu7br+xtW>Y z@1p59yN;9e!5K(GecHKSBEM|!KqDmJ;jt#Gwt9HSpG}abfv$q466Bx@;@LAJr0-EQ zwQnhI`!hHfZ<^p>?Z%dTBA>6Z%JEiDwKa@g>*Xq0eXq|@l++Algt*V<1o*dh8U`gj zWnA$wq=dsG_f_m^WRuvdYr5+D&n*JZihj|d>LF4@LT1LvhB%=-m}NI|9!Hr)W8}$@ z+F#+FB>XF{_=ELVvXAWRlY&$j!bphFAu@%6Z`VHA^jB&;~x z(IXB zuP_?WABYm|AZLYb$+LxxnGV|Qn;@rAxiuUdk^JMQ%vz>W1Z$J1|5k3}g7kx&S%%BZ z^mR4$K$|;*QwsnC*%9S^t zG%Mr6IT#?|Jmq4=(y@|OsKb1VNZ5S?Jb2ofJw2xTf3$8--}WqqQz_qy$L%ri?0$== zwZ3VNz~bQC!`GpQ#xKGZLEqcgN&EY5mpJNxpe8RufGI0 zmHIg|<6LyyK|*-4t^6x=;$EFTIX&e#~B0Ya2e+b znb4T@R>`}T`3hP+zA=iCDm26)yADTlf#oEf?(;(KE{d2CubHlZg6jT@Ws5B;>w`^Q z!?Xqt@)&t`A}6PFJqQ(wsiOFG2{fELLV+JZnoncHTx56N@j*cIaRP!ZEHO4kTxDu1sAcdXjHsjEH}dAQrv#> zFCFe&2X4Py*iGYJ6=bvIhpXrs9RIV{eJPw;trJ1z1pd{|n4$~7BUKprUGo5*3wdz= zczAGkTHCd!G)YkZEiSKR^t7_7?R_TuzEMt6hQQe*yZp4f5Q|GHe6102KI2W;<(Ui< zE%)?jEa4d!gCpP9Uu7+?)!iG}>@#>`n>ZacEY;tCnC^`>?JmNSqM zcNz(EsJhneP@p#6_C_F#xg9CFNP_qn%{j~dc7})7t!B709>Q}SkUPJp{=*B{EaiB^ zuC=(iH@OP0VZN0_n_zwUuA}5j=uv7cGfOzvb!z|k7(*E?$CYLF+!~^_Sy<~6!Qo^5 z7}OoAL^A`6@5Qf{YxJEa6;+qF=L6MpN}|RcWs|v$8r%)e5Ynt=)1KEu2lDb z2Lvdc^I6Yj(HLrH9AN%SueLwC<9K`tA{VI?Tn@S>GQl|d4tM}$^6zY&!+2ee@DmX_ zwcwZr0wj$5_IFg1sp#|!ZVoJ8BX6I~<%>^Sfo}IV?v_8YbNjFY`lH>iC;9ka_d-#k zpKo_F5apA;x}@qGWEP(BXt!R`bYqd920>Sy_?)wVGd?r#Ye}j2T zQ2@sNuGPN8b3?GST6arVJ2&&8#4=LMJ}eg|KMHkn5j4H{O6Y5s?JB^3IPw(g zQ05xDtSF|Mr2s|5_eJ=_0TF8C5B_;HBOLom}baUd*!=_fG z&1rh#o}hJga|HdNEetf*h0^`6*thKn?%hMjG>;0lb9zV8JH(@D6xb9L+7EZa!O$V| zvO^{cVs~X9eZ7uEx8R8c|9cWEgG4b3oRZ-`w-{Y&y*hN&4E&iF8WHlsqq4KPt;+ZJi?!JTk(4k z+z_c`U1azx8wUOef(NW?vlmigXA2J5NQfdHf;oifbS=E zehU(aBK=Hq7_E z-2U0)_G6CKJUulfuH-mKR%cIp;$GQVT|t1VtP>s?IHX_Cl5%p%>!WsVfx9!UdG#to zo2A$FwDcb@!J^dRVb3h@F9G0>7nxahEb&EY<&zq+oU4A#q7MbyPi!{N_{9}IIfE0t z9Jqc>VcE+}s~azB>sP1qTn?kaAWb)>*C#YxW2ueML%-QBQ;+s08^-Nk#g+_g zmq^>ZgbuB;J&6Q%d-E}n<;0ze;uxD5lLA0MU}oF|JwSJf_hlf4cI0T1&F6X)o(D;I zA3;g_E1dc4;j2Ch9So{mD0H~Jg@J*cZ1`yKz`D^SQO7TpVb-_BD_R~a(e8Dwf8uYLDoaUb=#v^LI_{ich&YEFH99GfT0}s>6=yYoN!=N`w?^^Vi$gp}Dw(WGuEce$KV} z)O>@%;WOKSxd>@Zl!vEB*Td6HxlnLKeTT658~373_aK8Uy@TgO?hd)ki>VWlchCDU z(alUe2BF?$%Q(KWeDTij6ZJnbvBhv64sstdmjepMvxZ)t$$Fa_hV>eK^%YYGbIqw= zOHk`r#5&PjiGD6e0q%^TS4b)Qnc*`L!-9)5_|3Og2yI*3MlRM7* ztaIxjypMYc!^IeFETV?mfefh3;y6JHXch|wPS**ft3%Plto|G$SW@%Y&84>IiVOrw zY%H@4(TEI3vXuD0zWbpd@Gp@J9>LWw2j0{Juqy+}W|qja^)17O&S?AR4OpHvv0>?+V zY_HdZ_FtO3oheMAKQ#$$^o?i(V&Uszyw*(Y{#`!?aFaAreca;ubxW*sB*=eh^N{fa5y^}6PQ~cWjFu&aG=6BstnI_m~>zJB8=P~2-gpRCmsjc1=@u3=dU1yl||Cz_a3ut0- zvO^)`IrmzYBaiL7r#}eTEaf`}fyqZ?uBn=iyv`9sOin$0tOw=!Hy4Nk8=XTD5@;7vltD<9l*b@(}YMF`$=v_8UE%Dv;mqmMWypIp=FY{)|d=BwPA=ohVX3+zpmr zZCwr1BNmmlL;>;QLA!>#SC?yIQn*2h+X4q-g0RuLsr8&`u|Yi{>}XSDsk?T&=1>A= zdEaiCtz$Vo6#{}^PZQJ_TSy~8G*3ExI-4LLBj-?I?N+T$tPFa<0OJI!eO@ecF}8V@ z7}i2~|Jg4+Ewp*w08>vOJ6SV33rYU*0avGsl<(i%LtL@k9XAUJ@8r8o^!9W@9=6^( z6bSkvbGQ8>F`#)8FHt*Li>gu!7Oc^9Yr(3d+oMcuP`Hm-{wRI-5~AB#_}JgH9GU}^ zV|a+JFXEOvrN(QrTz70QRbyqAN@Yo{KI@0Rj#k!6RyiHlt#`iwiwyAx3ynfIpoNquf#D^SRQO`Uk zj7k1+KSlk=jp>@%SXkcC)-FnJ6PvX{CgnIil@eLh5dG}5NGY-?C2X6Gv>Zf7 zXO52*p_txkQvQHifep+Efyt7SRFYg%Xc%H@8ZWJ6P+DIZay3|}YY9DEn%f1GGv z^K}`z0ve@QD8i{js!;)@<4rMU1%*r(NKZ2{j8arNYP3VnMi;%D zpC0HY(IpMl?9{3hyPu@1HhluRJ6i`P^F#+Dj~*tu-j9R{JA9+tvEe*fBGxo|kVoe& zke&77Whny9C0O}PV`Ok9942At6-?^eb#iB5QPnr!lk=H!NF}ClT&>R|d+iTB161kM_UG(CYTfiP*aSlw>SjlXy-LG$a;H+c(jU7fEkZ}do2`kq}p-MRLpOc!NlZF zBrH3PKAB6rkPu5E%FPwGb&43d26=P}Nm7(-5bkzv-fVqN?gSj0&k_ z`xEN*eJ4}xDd2=C`DRO5cAzWO_cTaK3e^pxc1tM8X^Db<@{yCm8dLhCac%#KW@h3UcNHbveBgPnQr7Fe>zcqn`)bP8KVHnHehwq~3K(wO#Q#v> z@`>$zhk*DqC_2zA*-|4Q({_JTilr+!Q_pDk!{!HfFRu~v43>JnHIGP#Ekz7;EegbE z;(Cqf38$0L)ZWMq=a(yA)1i7#tzcP1Uj~FR_bIWf4_($WHL`mLc{gD1h&=S zjK8n-B54$bwT1TIwUWa*~ogi`Gw_pb{kF0y))bbV2aqFjm`W^oT0 zi%6}=3SeFSiW3XCsI9ws*HXV-r_iK+*~9E_(!ZZb7osfbXk$M8^za#iO7(IpEGU??BCVEbL ziOp3II!yili&=^mC!8cH< zrJY}=Q7H!7x=WBM_)@hD^bVmRW44P7QX3(pYfCF;=1!ngo5p}N+>#<{J`Dop=OjrW zrbJ>4wtPyM1eWYFogy;Gfrn+yi)gQB{2(Fb@ge+Sx=)$-R(yK&G8%wbD`iHhGr1$( zBz@xb&J}mzBfxO8`~kv8bEZ$#bx9Fn%C(Oz1g$+|MLy%=O;8}5IPX;A!qoNwEat7G zW?PLH^i03ArG9px_Xul06c2NRcg(XR<`AOpwHoM)5J@6wD&?yfW=oRP%M$IbnJ5TZ z2<48V(M{s{F}&r9>jgbMGAotkU*bO(WPmYwV0%0uJGQz`l|m!>tJi=rW8P{eb4p-A z>!9&kAz#{Tnz42%JUAQeVHGU*V4j_6zdGEBo9L0bzAuU;mG&FJoHpHk%9Mtj zf+Tm;NR!dD$c0~^j2}{1_5&qEB$M>H5<_m4Z!!cROz;!r7r8I`PbE98*v`4nYTAGq zmnF1lz$jz`lzsh_vSE>GJSKkKithM@RB%F~SckjVTEwAe#yq56a&o@fXt7y+y=y2E zk7uo;Zro$tv^S#m4$O&fzYK8L$ePaPE?9s>u;c;SR@ATGY9Np~%u=2i)kMsAE;t#4b7k|bEeeGg zi?!D2L(L4?>DgL|{-{fd6uqO|C*{ry$z_}h*UaDU-4$H=4FIn;@& z6>7h+In0ugr;(G@Qg9IJ8pTR^hZTAx?gxF*3;H7&V&E%L&SCUDWT0)hWz&>3GQ~K& zkhiylDSsyBCW83vGl}cI)*O_3WfY_nMD|7m2%jr-JK8 zTtg8!M|h?h(!Wc8<}=CZn$srKx>B%QarIIEp!+Kh-!0gXg`SsQ)iS|q<~DLNlw{7#&#`~AiJBtDX0F{R8c3{?G5Eh_A$i89fR$ryc|xt? zVu3^ghl`d_ocrjp6B^)R@GwH3_Q&yfESu#*g+{%}MB3+vS6SkMpXzZ^`n7Y;Esgu~ zn6&oW&f$eW*uM3vPvhQD&Z7t2E!LMT(3SZW{Hq8r zmi}BE`}C;b9ka6f<}sDYmvEd)Q<34H!?x=fW$0uX*!C9h`aZUf3cnQe zU_nb$CmD>eemArJX@0GZR!pvjF-XR?2OS6jQoXU*954TS*AmBMTxbv{^bZblGfyE_ zS;z0g;^zc(yTx`3R+IlvI8FFb!#hi2s4mhYRr0f+=;~XV&;bMXWPaqA)$4MF>{Nvc zWn|QBN}wOwvK7b)c{SbP5aan7R%M);I78B+Wn_|7p^5qmI0vmg5|W)33MgclFuccN1B( zp0(F($mHg$o+w%@w0L@)Ew+FuU(s5;S>-#f+iqOCYg)47Va>!<0K^|52J{N;Xz{c0 zKR1fPVXuFx!wP^o2(|HAawwha!-GLV6Ay3 z>w?Rs$wRQ?snsAH>CGEr=Krtz2mUQi;ER>%)=N2L?BVLZ^=jyCJ_GZx5s0@sW;BQYu1m=#sR@(fg38d`Dg%1Iwqn za(rES8(7JDvGU%qULI_(H+>!A`;kgg;Ks&`_&=JnZ*H%sr|J=Arw@(o(nCz8SF(mP z%Y@khpW)%(`frTrlK~k2_Rg_ZVO$?)HoJ${`{~;gAqCQzU-vdSpLU3evWI!Kl7OaY z@pEYP0l2jes&`>syi)b(Y}XLmjN1{PM-^_8d3my#FxiXTCC;|Ko^Sg0{MuaK@5E!n ztqQ7S5n&>g!@9-(hK&%>Qi9sh+Aj z1)!I9QCxEFS@7qLr`nels|`kbS%VJP`Kq+qZ2sZvqE{(DC|_y8H17 zAu9tI_WTRZ68W$nDXO237d!2~nX41h&W_f)?LlB`L0n)bAb{T1)thFSeDE}Ixcfar z@AcF%s)|JHiEAcrxc)ssRUr3d`BW8?i?G#d_c6{!Tye8Q_@wfowR>@2a4NqrI3d26 z_vw}TWA~83%>h6D0F5TKUWVC3QgUa=jL!sXFk=F7w=m#yxwzfc{qomGU*;R(gO)!b zOZYN%Ksf@xJNv#NeQxLAfkA-oSU)>-d~iZNOVSwJT%9qo<w#sncB zOxZGcf7w)~TF#yKcYRB6d;Q=>(v*wvj`~kRA-8cer^);XKy(%SL0IJGvcUt4?^LCy ztGj9W=!BHq%m-WiuHJRTu%pitFC`%&4zJIU3aP$Rw_I^i?d@;7g~KC@-rWd)4XYU4 zNc|5!c3ZwJ7}w^fD}%^z+`eo7;ckxs%5Jyuh%rF_H8+ay{~-2%pj%zGpeb;`8CZrH zAvTdm-}8~V4{sE!`b|_+-u`iJpB7xi8m!*07~t|v?nf1dny!`{yd7B+=~Y1<6cAAp zP&Xe#&nB9eBJ=&@U1RbF17Qp2C%oH=3=uI*TlVz>}`wvu;@;_=~ z7X3W_O|=o5a2v(Qa!S99kHu(XtAGBCE8U^J(g7n9Sj7FGu8%pm8AXLQx{gTeGEbO> z6+REPbasLrx99~)sX^T>5ro@Yu$@Cdght!Tl_AqiKv(+#rRIb42e-HymGCvi~Zo=dJM5QxZE=N zc~8!C+gj%fYyRhB?~P@Qc^oEBg?@f-F?;uMJ67`W>K5Fnk_-m-b}i8h*xBXK`h9!Gv{@!?PKRInE`2iw3am$c^yn`b!7E;${g%V%asZbRuux}AL_q<$!{7QRuWw= zh+kaA2XGUyI}R){dI0R*F&&*g^Ykf>4CT`3Q-{Jui>S*XNwpi+drEC;8fhidF+a@& zW@p_KC>KtTwyrcSWxtv@hi_jH1)Pr&2166H3&-QY?i3>+yzT{~YBlmTC%4?sn3$9& zH@Oe1QKx3PGRO`xGQ4aw*=NKguH9M6W)G-DS(g=Aj<%YR zkE~r={yL$CT+F=n{&I8e9t$CmWDy`GC^*^=4asih4BWE_nTt}P8zdEG!><$+j5!dpa70TyUyFyrYsu!7x~ zSb#yl^1BA_*B@Fx;)$;FJ8@BH;0E0%bKD254BW0A{ur_w_&AKaRS#3O^{YxNQ3s1e z5FhT6i(|-!Mpm$ypRR*G)`@PmQEV=K*Q=H0ykYfb$BpXZ9h2q&5m2)NyHc9}k3s(r z9{Z9*3BdxX?SYb88uO45+v@entwe-(=OcMg@R!fSYuyPXtRIN8kWayiC`zuzj^wm~9Jl(x{-_^J9A6DPLjO{|4X@>UXC0YJ!?HiGvEDE1!JMg2V z)(!*p*weM)(W?j7e96QkJR}d0U>ZTng{Z15v;as|Xn4k6L!wM$pK!hTVH<035Wy{}UFOO6~% zcHAwSep`i+CZ*^_gZ?$zai0d?|B*`4vuN;>QhpM)A0VC#$M8aKKx5|>GN~J1sYRf<{$(@1#VW_c)r?)JDbrCg z7Bvm7em?>GE%e@GGWT%$EefbxBr|QF&H9gJOsQyvg2kD)dDy0UZ2gUU)fz<{9@rhQ z**d)a;w*B9Y|G%lOxd%!lIz$qXoA2k#rWiKhHh7r`P-#=LuGWtnADbjCF{O5`nszYl^#q_OJ3ijt=uUu`d97B(QXDrxvO`|6>Z77N^8NNt|@ zZodv@4h%k~E2p*N6V97+VG-+Td_*!GIZ5(!64#N_q#70agN_ZjWSd-@T0ZUxU)rs4 zyKWM#wbk{%sYi+btuJjQmqWt{ImCba=Nls%&2{d)Wb@Ztg)Yrc)`r{3=3Jqi~tW zP^nAFMdz@tGj=V3BSU;iP6!64QwCsrA9LmwynG4H-=%#vkSfVZ_|cdPL_vyfG@cf8lv_T*()(0Idl)noBkj6J`B2Dom0_=$OpN$=Y%(Sk^>F8MZ!| z@Gu?pa?T;T`C|ri#cH*x)*zMGcdQAM!U4t=`gak6=lI?Wc)9+QE zWRrv&ReDDEUyJqs@dDgY=!z>bzyPb!i{g!fiy7Qs9eSu_B$*Nt%%tEe)f&seUtPXU zIhVSlws5hq8F}3UvR}KusipH}Ii*>#(=?Q0RqYyTaxATuOrN|S+eqlojm=K zg4rxrO!Ut|luQ-}N1^-Ne;{(c-$8e(2G~sGpYOX(LR&R)cFg3iThz~3&zV0zHVup? z8_zlJ*hy#`lg$cq>8#}pi~Tj4n{%U}WPk@N_1h50mjlhmnCh8}#)}57GJ5NbX~)eD zzt-)CmdL2<$Is>LSl4tf@4ruA@vMJYjFTu2hHE-<@>pB1WX@{I&`BI$#H#*Z!h|T= z52z$;0nfs#k`O}OOSMa0`a1}9bJ_=1T4nA&kKc3N)v^Y~r>Z#wKyQL&nU1U7nbh_pc zF^ohewdGLpZO1ISU!kLq)lN>n8oFGaG)ccIl*IO($h{JYtL)5Hk+d_qpM6R#doAOp zQnAs(q^P@1iDj#tj$4o#{O0$J_nWah5QF{W3d2*e5GV%mwU(e}b=)g9si2+UpHdhz z?3@xW>#9R*?9=EoJK43G+Zwo4Al0^8)6JZdO>W}oh7Bo54OtAjGA{GFjg5FnHyTt~ zth>I-q~;`f&+cR1@ETa)JK)Z*6Sm`JCT{(`vL|2i!5%xatM^F{XuhQ(S+0-0zvG$| zPZ>P%PyTZH`G{qW?~;LIy_mkF)tG225@fen@y_&B??lzF>e-UKpUL0KU{iwu$x4G> zt!c|U>}9O?p2{5mT7(3VTkWTz8dg-A*lD5E;N#79%==!wCLMJ1S+Nun7zu~Q54j4F z?f%Kqyo5B|KP=m*EM3y!oBLVKwB4ZHX%W>#MD+E>p--_(DCj%M4A~0cr19qBWGQ|r zUui(z+I^C3y8pyX{ph!KELmT+6Z0UXI{#fLWj@KjbLuq2k?6Yb2mW(YUpp2kA|vrm z&k5~zwjVdIq*=X zTo5EJXTusq9hOM)7dNuE(%Ifp=if^WOpe1Jl54j~CsqSD_sQieH0dG7PmGolJ$g*bCC2>;m4u-Pt-TZ6d2AC4nA~lI$+p(|M7C+LsZw@ z7~TBdr1BC9$2d2UltDxUbqgY5%lUH5rgp+J+!W?~R7lb^PMh?JvSAyQn>-oyl2FO- zbJ?FM@^@{r-Ndud)d5`_q0NN5si6ZFlVgb_`9bANJ}MOneADClwI zu+^e3HKG@n5UvpU(#p)m>T)TFFyLp>Nx5jZr&sb{QPALIY5;VQP`V*zf6lkUOH>!6 zP3O`VRL*ZCnJoP2Ec`F{#j|8b=QFFVBas2Z$S5dW9o{_~=g`d@7FKM$+FDgB3*|W$ z1lrY7qOe|=u|sJ2$);dkpI|-fnT<8W6aDPB)#hcMj^4^MZ=3ZcgW~f!IN;g|PPC{|x01@(dD)Nco zf{~aSiIm+Ovl$o9Q`@^1-~0)iW`9cnP(vIcUAK9-vlBPjJ0zMC{@rqX2t!XFKyro4$M2!@tlVRT0c`sha<&}r0;Rj2vma|LMFLctR zPQ4=<;Vpk8Nkki%6*~=jrGKDu{*=WbK`hl26-+v4Y(pN+srNTiy>y^?)Wblit>nB^ zgX4RSw%WFgHB^aM)rHf2K%sdZ`*qUMzVEOad|z(BTbLtXP-u`Ljs8{VmjXQt{k~KI z0um6D%;IlGHKSC1T*gzwUVW};!B1zl^!vuG{iU^)yQh$cGJGJ0Th>j4^k6qpF)V2d zr(H`Yu{~4EO3a?T{@kj|QnjWsJ}hHF1bCTq|1f*-^@MYG8Hb+`imYqG)48{DRP*e) zs|Ks1>uUn_r`13J@9Ja_n<{_#w(V16{UX~gmn)4Jth|3)kM_B&t1)|6Kj=vD1lB&> zq%EFF@4=?$kxQ02LoDQ{1a83AE|w`t9wGL zQ5a}oAE#}Xg^UAIG697?CB1!aTGf*jWJi^1Br?40ryK9@g-zeEB;D}m^BNf z`5*5_yDJ29>SX)q?52Mc<=t8CPtKrsJivAM3}+eNaq6)$ia$z{ovp35-S!1l@m&^; zQ%4p-v0@3WEG@dYpE#==azCi+zSIQ0DtQ9YmN=7SwPwkUWXs7@PFjFjaf77Zym;?OlvQo=XLN9P-nm4 zvU0DkdDx}9cNn{~3PmOjz{Be(QlO1AO#RtQagyWh*0*aKnHj>;N^d{4d1$F!%~Nr- zFH|4I^NNomQ|Fqn!Zv7Mw9naTggsHoX>57#7I4E=_NOL^GZXx%n{s0XF{7l4OXm&x z+cANdahcO)j-s(-ZsHQE3AyzuwZ8OS*=w#ocH%YApb>BhVmit=43eBWvJ)r~0RJ$c z2hU}6Ks6sIEZfelTYl~)F74$oD+83u{QGcN0o>Wle4e8>A4r2^6yCOp0dlG6x*3eT z>2_&9)%{h2nN{{AB~wpqpQh>6{PS*Wfq1Lv-QKdb|2~o|4S3M0G0h^Wzf&-%LDtYP zUImh6FyhPMfQw`f9rO0%o(u(9^_Vjeagal_BaQN8a=Q_0Vye$Vq8oUu2=FiOLtLrL zunSC;)Qy3dCCrq1ldvIJYBYuFQ7K4KoX7WWwVhKN_sE_+)-LRjknMg-og??}-!3}& z`W&{7f0)EkIlwEolEI0Ct69Y;MdoNJil?~NR}3vpvoqKET-w?0SNO#M;T0m7@J6gh zuWf`I7+sH?lmvYLhpn%Uio031O$Y>c3oc=Bw*Ucx%i!+L;O26rFag4^Kk?(Xgo zELgBN-?{hPcivrZty#1BpPA`jPj_`y?b^F?k5^#xgh~neorY%34|NN8WjNt@gXw=J zQ8ELs^MsO+K$RTFg;CW4jH5ltizzK0^NmaewkGi04fZcI9kc%zO@dmd137XB-5;9` zhDRGPgT{&XXC`Omr z>O(|&iF}TJ!$fD7ERK5H9q1ixcO|b~MdwhVhLeX8YYrHu zJhV!5Y*7W%2Nveqbp$v4k(|I3p=6Z)T-~&cSD<|3lIo19soRY9m3b~m0syz`H*h1| zT?egntoCSpxBD8VZDkOOgD^PYM!eFo(>0G{)IA#kqk&Lr8;1Se9>r3VmE=%&0(Y`r zbdTrRf`~gJ7;)!70Y4OFV+L5=pyUV z(H3+yM{npB6ZrXSBI|FyFP!e9-=`$F^HA_6ITl=n$D=D<|b{ZfR`Z{}B3n=DoT!b71tGXg-X1A4s&BwxyODuT`@}PgF}Q z^5?#99e~MMPXFbZWW#!G|ALQf2Qj4>ilU$!T+%6M{yNe@c4=lNz*S+67u3;*2esHg zAZTiJ&%*UT{IqbJAeSMql2ZI-(Q~vt`BUGgr$5{`##!u9rK(vn4dewChjPpBUSz}l!<7x)}cJ9;<3@nN*n8Q)#_2q22K@x zLU5opuPO=AX7bSD+$TEqz?W+^ziVJC45%yd({<98nn;Pzapd#KPh+<2nhOWd%&({) zRStfc$|qBesYGEBsw5uq=QDCRU7x;p6xFVT2pVZ=RtCB-6?n)w;CzQv30MVaIEQpB zbA11(4AFLQSxR=scV%TV-&R^dMj5{Bq}c+6yn#!#Mmi9}tLgATEu`GUW+u z_{7@Ew{un!p;4l8zEM>F_8xu1H^RTfW9f&e-cNgcYg)h(>5QXef$qMN|E-J=r2%$# zhZ{C1U^qmq_jA_swFdv+9e@I}DDxqKmbzjxoOqnd9

qENI^qr?)KYdgVhV(U$97 z3Dak<-KlSmj(Yf= z3%MOn6kniyh84cGM6E6ro;ErVx-WNfZ4KVMj~>(PgL72Q8KOfLhe$~!Al_`#6<0LlkgV~&&IQ}umOKekarsU@$M!(5GeT9pQPY@G(v)mXfhk; z@c?>7VwF-Of6@NDj;Caq0);IQGVKA!Ty@V*FE}4Tb}eEEVf!mQYVfT)*HgoQbU}MS4Oy}G%&0+()mh#&~qPQ|aQn*Bo zgq{g*2JfMvtF9X>gi$Rrlr{+P2`yZ_V5y+ck&RRc}o@{=kf9n#|q* z4g%uMCZ@1p3G%W$pkuPy3o$uZxG=78MZECwr={qU6odI38kj7bVPqKcs<)1)_Kgg# zf~`#!0c1GFT;_N4mE;bL(iI0Y@gXlbzq9+=B4M+^cv1?rCImKm4laZ@%Z0`eIDPsD z@7-#hnqR8{T`8(-r`!ri*nf1fv<<*$TI6cAQ+M-d^hQ#V` zhX~IRI|BMA5R=ap34(r=E}AjG1VNpd+k{hs2%DxtDR07_S)m7}l;I^W`9 zj{f*`FaK!OwT=a&Q`yn!*BIQdk@1GqTWUqaP(R*w_oUXnG%u&6d(nvjv;817IA|$^ z+GQ#1n{X!|EWh4Q2S5 z4~&9&q^0X38KKeuKUq~}C77MG6%lQNE}R_fBY<@wDCCzNC7g}vY{3wW84-j>F6PRr zHvP^{Gt?p~z38yXY=_4)Oa;NG$WVnU4{lspq|h9!sZuUWFPO=Nf?2vNQ;|>RVpWaX zw~{NQwJkq3BbTfemdkqiW2A4`>ZSpMECxGS=t}uuKdND|B&)*VSNVjObNbuxK!C8J zI+;<&+PxZC(`y^OJcAGdQL*x1$y^DHK$q2-A~XY~lDbYAA4zhpk3!@6{|5&bvs4cPo`ZsE61er ze_lDrf2x4BarCv*7;M9GA#4{YkPbtN(Ompx8{LG$kw--{&OXBl7*8(Pj{G%6YaP() z5LPc~T_=BWfal_4^+V43G`6=<6sc@trlr7{%RI}R5xSzj^_2vcs?~}C+ZGc|Hu2g5 z+CRqIGm*}(=TW+47Rt2i6mP&eRKI*|UneEvN@+mr4oJBf~u<`Q+I3U<^ z9+z)fls69n{Ce}&V>zGIeD5^!S<9S^VOARv8GGHT_SWTu1s8qzhk08Pi5nSh&>?Wq zQ(_U36qlCjI}&)V`PH=n=Ew*n5mxhN`E-+bFQ+v=bK%f%+lvrD@d(r>n%lfu=a>y?-(!$ePJOi`4KpP@md zYN0(3-|RcUKtbOPoW|rw6osM!>8o+?`e{W*a<5^h>0Q&S_e62IAki1p-&g)<$i(>D zJWI+uhO~0gN(7!t@pvJ5d)i^M7J9KvMPm7w5e243YSr{|R#AAagNptpW<45NWcV^b zT_Q$g#$#hww_lq+q3d>;-b~W7dZuWQ*`kvgir27P2CmzZhOH&C)8%m_G)*4#4d(rtQea*A2V<}XxpLBgU^_U zbSAqv^0Rm|aU9w=a}TZk>G`j!EqbNOQ@5`x)2W2Cn$gU>Z+|rSAOBEfePnx(;QmQN3{P z(j%tDF!Q`VJD(-UxTs>`y$@bA`F!M9rm|gqK6MF5j+Gi6aN;Mn+$OJfz_d%4*g$pr zy>48lX43oq9=%Tp*csgEDrMg}pg;R)rW9AjiWH8JIWcsTDrV_~D&a(8SE&RNPR22n z4paYFAk>1EY+bFX7(yT!Lwa-S@85@?VlwerRC_(Qfd)`F%oc!Ptn}sR{$H-S8AsmM;bbTR=@7<>CI`(P0EoeOMo)?C-j3v0tHloY8#52 za)dv!MKENiq_occy|1b>rqkt0%bn zKq5Y#BSW5jpf6j0&PiwiT&vwR=V}Exf4VkuHr974+})N|ZLxUpm<`-pt7GMZ|4yKK z>7cr;r>5UkpQ)`2SaM42AMqD6;s&!bd^mR9ls#y(~>4z<>}@_tC=z&i&uNwtO_NtBgWG z$S}yd&4jb8&En#;zneom&4%#P^WX-a0?Xd;Xn+_HUGCqi&4>8UoC==YGM?9-6EwOC z?l|PZ1?sNLt9OqV?j#$v$nU3XTB_s{#SLUfofHeH(66V|S9 z?4G$OGa=P_y%MY2@Qr%O#o;XS91^}B4JzHsE=w!7zxZPBkV&)gpAU!y>#IgpJ_Tap zMPE2p@89&=MtJK9Pqs#L>E5(&*+ac1)EDi!awLJO-l9c7wGRpuA?@7XstFFp!9#5w zXjn8+5{tZEwrc)z^oO1{ErsHSJ?KwVx!X9A2$5W#@&QOHPvsAhE1xfijgeA4y^WXN zZ*}|Y9KTTb+pnBw^ubDE^kzyDV*ByB`2NXag^`rXh;{6gv;7&a4Fssvy*QZDJ_CpxUe+0XbVin89&-B;AHH^^q2u5`C7SqtN43Y;h-%fDD!=nb2 zDh!|60=lj-U#aUi@Ivob&+eS$XX4M6ad%yW1f2+UH+Ft?8s@JabTwWDi5!91C&Qd0 zbh%6HxDZ;r&x(*|mV=Z!C+GX&UbcA;HSiX$Qu#BmshvkW2W;9#uc*wv%&l4a;FID8 zPt#r549(Btw>>`-&}gtK>h(>aod>$}_}rs?d)=mRZ(#KD>v3f3|LWBTK_3MC(qQh@ z&~l#nsMUM-)#i?n*yf2pi$+tr$7!b6$BIJovuqx%g>f5`RTR_Rs@h!!Hy3V|uR&XQuK4`UNCw(+M<2qVn6Cxn$Z{R#^(BbGS*XvPxpZ4BThMSKWKI468D>Ip` zHZdnv(l?Ji2FW-LM+?KI=3Gy6&G>s6(r8>{I4J~r?uulY$@*n$BABhdlqMJ$_34+S zaPdYH6Dw{m4aGXR%4!53xvKXa*3%Aff>kMM4=pe6JFRx%aqZ%c0uUT z;KW~m23*17p@(xX$iMe6Q!fRcWJwWVX*pZkW2ZA)QBH05RoPf31K8XB?eY^I!IS8? zDv>giv~5P~D)Uo^8ICn|LI8f;HkHA=4|a0FBD6^TxTf{|N`LV{T@>qboW#NU)#+Id zQ9rr*m*#u#Zagy1-#31{f_ja??CIh)R11jJPHtS8AET?Am6rDhE8HCmdh;&{c75ba zIp*9|HLS)B&;xTh!`BjNZ=BI}DsN{0k(gJ-q6K~&O7eYQ5x8oBS|F9N^kY`F!( zZgplqa~?&q8?f^5ZT}fYW*%fqVE-JJ@@Mk!?VI=gBh_HihWD$t>|e%<)-sZnE=*Wn zVqh!7dRdegI?BkuYXRE7>~u7-xEY+0wdJ;JAX%DIjiZpX+1rn36mH_r?-@$$VNeOb z!`EWY2N}j7`oyLM{EJ6+1Fcy)e;u;GYB5PK- zj=V6Gm(MR%#9SJHg=MbiX}86>x6k$>CDEggl1A5T=B_^Fn@NJt$gZ}g*;t~@BUOmD zL;ftZauSlP`K488C)_plo_G>z_@nj0JzXW{4h;`WT}MKOHsv2@CoE3X)Vu0o#_CW&O z)fJmDd*Ki=5=)=e?Ujze$qyN|KxeHyjN-`a(}zmQ`2Zzi4ge)HtO`F;qeR8Iq8hRo z|K83K7%J8rEpNtHj-h8g+mN9d6|g5cserWn-NUbM`k3xM8?8hOt)d|!DMp&+31vdT z(+BUy6k!(DRC;?+wZXexkKS8d z!Fjc8dBi2e#^Dl3Cu5+arxq#j2zG2;ZAV57QL1wR=h=0vLBMt_hC;l0^#r>`M>2I1-t?3b9ZI$qDL>S~zUYH`h)V@*2azmff0KSK|l zNzVrv5gh25_~^+shr8;byK=EYr8fGP!$>TmG~XT+mVWaQ?o5wwMXLQtNy<(bz5FfB z*(n(VU z9gXM7DpIOx(pH;eo*b?Hk}S5GuH6f&iye}Xx8^WU6b=_!jaMGd)2UlRg2R+%os6Lja$6};Vj2>WaEx%`ajbKb&4cC{PMx=`B5vCuGvW=+Da%2b*cJjOc zA_a8iIr4sE(lNZz6VAVCXw&$Sf|nkT1eNlARERQg7HAX65y128v-IiqWG~1OLJr0X zFVZtvG{`TK(d=nqyuwSs2}5k#_y7kd9f!5gCjFWom`x@`{!2%#^1;_G$?Z{X|07B9 zkL2{(hXDKtlMXQdjzLKFjp08B+E1CFCy(~$O`I#{e|0)xsinyyc+boyfzdL63Wd<7EB=qW4yQ-8*L#ETqpr{9oQ) zyAER%ctv;D!F9?jhM1MLl};m7AXBV@KnPG(G(%rY@irNmedkk=$q;giQ<)Hsv8au?RT){Kz^?M&l{9 z7@&XD?+rEX1}mFJrZpcC_ACrZWab0je1sYM7e(*oCpm9bFQ)VifsvSjC0W`56TI1D zkO6ft5`9xUYR_-N@vdScCI$y2qaQlrkG3LTa_qX(WK*%&SPT>4ar6`18f8}r$%{GS zcQJcu?wi~%5_4Dk^@qc(trHiV^2h{)=}gVVI5J)WMLRq^x`EAocIyKXp;Uq9;7k(7 zd#asRxX2q&#UJI-;x)+C8Sj`v+Mshtvte}TjGFd2u3MFTP2;?gKy2c{pjhDh<(&Xe zJPrfF>;1pVL)VEmezN>8k=~szk#C1&WS4ug>{v{H$zIYswc1DK?K0Zy!6AeR{h{UV z^m__1bXjDW{RD@51)lO?u`%W>4#B_h9S<*`Ute!HvSjJUo{4mu9TVQ1Z8I0NBUbvhgAu+z8p*yR^&cyeBk+UNRntk0#z9PhXZxhPJM8?;&#neY zirS2$3w4a&enCXn8z0r+1;^f&@}DzyG0FF98(VH~-TWP4l5cV96U)oBA+3U16EgeT z1}jzXv>5MG47lYQsVoq4t?M@KQwmR$gu-Bgi}XY0llD6xa3@aR{2vP#M^TdLu~;Bl zliU8oZ#)W{C^|?GZPMac?qHBJ$M^P%{Z8w4{*=)!do2|v6u{R-rUs{>U?JKc$&6*6 zhC%+{=R3rU8^MA0necSgf1FzF4>Y6_nJghGxr^%W{S)4fo8y$p0ZYIfSOgp5)q};t z0QCFh_Juha6TF6CTg%EsL)Z2k446V#VYY$bIBb@7QwBuX^zD$(}t5XnHta1j);T2gh!X>Z>kC>JnYp4gH=|uO0=>mHKEK zrNxtKH%%+nO~~jE-j)Pnu%3*(H&B{*f2zB(pF@Akym@k&-ewBl2;R1w$V?wzf6H*tfO3 zq|EIY+iU$X}t|gv`Zud3Qw|F7Dp~q-em6K3RVW_}a-c zNudzKVOuIS3Q~;h;;#p}g)%{gLXdROmp3EvACMZxh^drBm}-upJlUjJ{mQ&vPG^UYz^4ZmD zbS>SVp~67|Ufr{Gsc8nq%FMJF57B+E(zkrbGZsA$TcnfzhnE>f3Cnuf8-9J-lNu0gfCSO>7Z$-uo0pS*oFarng zAD(YSqdz+1?U@$ri38&c#k7QrY!WqkunZ+Xhf9x8MR;D(Zn?kJjcq9f?uO^(n&V8f z(I}8Qy*&i=D;FdcHUOrvfEUb z+i&WZfPRABE~f)DKQdLFn>~UA{2dAOwZ1tGo;cx7%Y5y4hOh24A$^_kcvpb6RK=%| zvPuWHxuJXpm1rOJ^*gj<{s6h)bKk6)cWQ(F++VfHbEmHwKb|hQF$`!dN3VPa8~(Nf zXwn>QjlP(qVK+op{SN_&v?TSA6-olgc{YLTEy^SW=<3N>Mb-J33TTd_3 z?P%lL$HbjW#DreH!vsP6G=+UR=9xG&O;lhKb1M;7-pzM>0{*Y*Y~R{?w=dxm;PKqw zrbR%=wWi%J>?ZtjnwqG!4hcZpsCBd8GS`fB3>1UWuH?|z(lO)E~TxVh>&+Eu; zhionWz2QG-`Q}{o`ogp{dl87{gQ6k3VDas(h)?gT_6)z`i6DTzQmz+VwVyb=wh@bI z4hytKE)56FXU+L9ZTa?lr^ zHH;P2rwwvI?1|OylywJmIp4}`7R=<(FX?C0`$ZgSXPPVZWL`xMzkZ0HM(p&~J(Z|ykA_#;%VH3^mPbV&S>!F3f%XZX0j;S3{QY##c6>&)9 z{|rWwURMLo3wpQneNh)#ktcE8=xOw!uQen;9NXo%%U(BsRY)Jk&Q|(d%(H6#_M0TD zgU8Gt1M@u`foB{>F`bfj>@mjvJ%V0#3lOm%7E>I|FP*0Fk-sEltztEZK42L!A%Y`| zERrV%mSZ~S#N}AsO4q*CrU^7shnA%=AP?uooues643&x`!+Y}4Lbdb?P@D*xgz_S? zog8WmV|feq@mgwxn>UJxtOyH{q8ml)T>CagJ`{xp)VWN|MileEoTt`#zm4qHN*+ng zToK%CBlcPj@Hh(DEA9!#AyfzV)YueQD3!Q3*Xo--+^xJ>E8@8xp`Y#tN^2=E}V%xhB^`Z2|eA~t0A>J@(dlKJ?_G9dc z9pr<@+NF3QRv-DO02XwW3a4jkZ5Bf0E< z5HyG+yE66b@?`hOtikb&OTc|pppSYud>ZTEUXjFC|H8h}{f;X$AqqWJ&xNNc_0r|68WZ2X(x_mH+;>em zVq$^0&ZAomEp^j-+=VY?mRY;Vz4;=i)TfEb@aS`#U z_c8N5^=v7hEw>?SUC4)OcI&$jdELfRrLk)gJ3IOewZqeJo_}2IVVCy}p08kH4&k7F z`aCDzXX-Mv{1ACu(;G(-?Xci*3$9>r=IDg0dbyY{#GQ`DKJZC?{U>krV((zkX~r(`A(zJyCktH(u-j*L-~{*_j~znAC}smw7x_`15+p%E}sF*Nk9iF(*zVe@-)} z%kTXq{Y`s@!#H@KXQ+%q6x2(Us?KXWoRiM|QYyk_R!eqTPa#jCXZi%OWuoO*i8l{H zH|EYEp?Kz%T}g=3783jlLz!51`7&jl<16)Y+)uVES>hHl5Ibc{`|6t3QO0oI+6k$9 znn)VS$k0$(8=_n;t;BdS#1UkNGFz*5<}zzr^;HmkWpEiI)D4?Q%dD_ zPnr`>rehpRT7gPD<;#ING7_HF7;C0OTqFUNiO9K14KUH=d;G;XgrJ}Ona}yFcLs2A%TdK*>b#s+8B}qJy&tJ%<2`P7mq;(i}x?; zFaL&7p=xfI)b-Hkr!wsIm=%MH>?o7*M z`ydTJ?~fxB^{wU!R~PEzFs=kf5 zC1FiRM^eGVE#vNCDvB1}cFOvwKZLNiT-=p@U3VL#UmBilG>8c&KqcQuastpQqDTZE zTz;VGtvyXT+)sKqCUby``-CXCdeCYZNC;2AmY<(F-uy))t4vz@WyN@lG%6Gn=%Br{ z&rr_!yJm4_3=g@e?&nqicXNYwLk`HN3NA~or*hv1%o)e6h^HWhXb-oREmL-yH1SB2 z_gZKr5+SI-D76*quQtEewg&1MZ03Bew6{8vc1EEh8`idb8dV&{TR@i-X)mB4m@ORu zCX*XB6RgDEdeBMiMdo|tEe>S6k9Iaw>Up;5IrHhEERC=BNl1~x5KM;p$qdsMfB6aJ z;(8GfFy&LGs%DY(T7LAsBxAvfd|eusqqiKHrzNMyxhEsJeCUT8Ob9Z9kjub{Ggq%+ z*|keL+lH>A3mV!VB5A0f&e`7X9;$0^H8_rPEwsQ9)f3Q2n)7?R4UkW!9TFa^URnug zTWc(}LYZ4@Z9Up$BK=MdVgv!qL|-*DBPWJ`3sQJF0r`kH2ysy;Q;bb_Jt^E=1A#nw z0$xKtP7`&~z(mPNb7W=-#5!8B6|NN>?>)74KRyw)Y7!@|e|0|lwGYvK5nX$Nrla|HEZAw%$I}L@F=DA< zaz`KP1#jm`8Y<5n&kqO}xl8AW+ig-Ab)UUr?&ep;R!nFwj7!Y<1&JNl^;s26b{IcT z;sm2zSy^)~x-T{RB@un`*4?C6x#iSGfW<1C%Yirs>N7XjKYYN6_nw?s?$l(>m`ueD zSgV50j_)?ni)VYsxGS^uLwtYs0`Hgqx>^7A{?ObmIULosNGjJVk>fjI1`qB#1P^{L zT7It8m?je!ALGen{~3xBk15b7-Cv4b`c5Ww0Oukw0Q8{M7Lw-&q#fZi9Y1%da0%`# z^^*ch^j7ZBW*9-U+tI@j0g6w5z|`K|AHR&Yu2t%OW^jULGPU@(w$I?8_vvNpHpxW7 zVjbF%LKp~rbg^SIh|z#Gvmcz*ZE9ummAep#-QFVXTBtCOPOGQQ->fZdRqmAJ zP@!E<{0VS~V#XGlA3%r4H9B}J+QXVV^r5G#-}z6iG3h(35?f-HE@YV? z!f}iRScm-X{G!2Lx8fBMeDDrL3bM)fC2q&6BxV17IL~90_Z_7`w?@o z0^8813`56s>D}ZORZ)glFNB%Q7*5$;%RJN{FfGC~c#wQ~V*St8za%I$c{-<*Wq!{r z4Zd+z;UdZV5PH{Aq*{vz}#A7X;BnGer(wZmZHqlq~yc}avaNL=xB zORqB+fjC+M^F|z~Dk~MrcFeeItfY8;?|5qM(u2Lgn2~Q15kYkTsW!q?kv%k(1`h0J zv?=C@Fg8lmEgRNs>~-@LvV-%)h+1pq%N9WGC$p$H6OC>CbgRPk4Zo5~bsKGfZW8_K zMj#s6uV3EHz!V4SoY!>#9!-7w zW<&A%DMgPHrOk3j^hOw3ZO-&|HT-=e(B0IR5pdOrsJMgeYdRVuiVypMtWhfQffW5V zBAg1mF+TM2F-b{fE`L%FmZ7!s)b-nav$42hb%|8dQVa!ku9@0(KuxAyx*s13j_C$> zQ>FDW@`XH?j52}uNb6CycD_y-I$zwE>>gcwgDY&pDNS8zinUySarwmZ0^5XeuqTQU zF=O$NjuWe&Ki(!;+noD$NjADaJOpy7pOA7ro$?)o=X8W?AUT%0icwsK6EndoNAVc! z$tp`5JxM_qu5)yte$~gtJ4V62a>_m^D1~bb&M+lI)95?B+VBoar&L@p6$3L*n%US4 z%xKxRySwJ9?6PIHvy(&LrAREgNbMBhk~0}^b8~9>Kd*5buNauBePAX;%0TBE_3k(B zur4e@jTKE1Ox&vb))T;a-GQt0bs~?#uT|WEC+myyij z&##~dzd@IhUQF<8k#nJpLMJFu_3SAN-LT3X=gBmnR+eKaN7-Io}^`+ zIztXV;)2%k9yQn@gC|PI#(!z2)3fn$-cUH8uhf;!U^!>LvU);NHN*8aA+^?v;WIt` zl5ptwxtdo8wJJ;meNum;+jQ@$5>oIqzc5wPA>Mj_ZJekR}yl_>X@;VDL~kcwHo!UVG~E>^UFuSL;BOX*m*s- z5DsQ)M5P8FHJp*|>r>czy!KmMKDm@48l^WoN~#{D>K@$mjPO(S(7$)FWxf5issP2D zSoxoNr7EYDGbamL6<Uxazn=RaE`BxVJP630@3Jx|T(78EuC6H*%A+ff z9Z5296i-(Ca%eM-qp}=im^mKBU6vcn09dKq;o{b6FLYSZIM$VI76?l%h8x~*!o-u; z{0vp^a9MF_rC>7H9wKwAA9X-AfAK#|=RbTq5eJVGS8GFB1S%^MOSE7lzhfY|pcc1{ zZxmw{*9?ZvCI;_gSK3c&mQ`!32Wfx5#E=iIu2_2MqtR?1|IKRlX@4&mma)wWnWcQn zp-6Q<_R3}8uhvK;Qcy;D`Q4J$<7N=vCcpDqtlin6O5{-BU*&^{pM9-#!)Uz#(R-cWqyNL^7oo3~bNRl7S$vGpnp{iCh= z_sdeUTEH#x!ep^*US!kvGpC8f{oUl#AgDCJJX(9H+&rE?mM(H*fZA$K{kik%@pL=) zPQK$9qgL|E7$!M!A_<22*ST~OF9xoa+yM>$4fshIk{xyRWBnH#HLYSl=dtsv04;5d zLztl8DM8^1xr#cGPL1Myou=!_yZ*)YKZJ9Qc6H|sjtwDasCZE10yYYJ!EqFIr5@T5(ML5Ne>tHVwC>@6X+DrnWZi^b2|&*` zoa8it*42%R6ea?((Y8M6{pq88$2(%iMrrdycGD5$8Bh<5a(?2#!p@UHVvrag)0$c4 z`H0+tz3HB(u%p&*MJwb&*!0z*XKMdCt;)?3Z%5vxKbCq-V zM~HJq%q&!_MOG>ilnasYpWPf2p%wSsr#JG>NaEbxg+a>20DevyfXdtdP_^h3-db3) z_&w8^9JdeWIU%#yAT+eUl&Jk0cfFr+3*zjqSo;$D8$pD5z+O0JzBjW|^`dQxx?KWGecd{azjAvYH5p#?1z+ z@uR$u(j%|1dZ+C9(|>49xGdkCSq=?=L9tT4o?zwItt|UeNcdr6b4Pm33*uh?ehn}f zWDSWQRX4Sm;yQB*F3Ya0dx4g)=}y!yNR4VwM7#@vuaq(TvRrmvoe47KP5JhAJI4@& zBZWl8>!}nAQbf2K_tDgTZFLZC20YiJLuIk2hnxf_kUDIohi_-S>vsRMZPhG@h0uW+TN7Or^#DGUtLf(J_Y4mh?q=RqQ75O@~Kh%Vor@(Zp-U?9Rq^| zmdP|#DXkn+;jF{wlSMdj3g#-NC=ioIQAbTj_&sF6%5wlvzaFGFh!q$Qhq& zt8DdUs%hOfI_o2ZjCizPE4cLPVK6+pV5cmUpErBwBtcgL2o%)?14`V*b z0ou${vjZY1a~i~8I)1TjnUYix%wYRhA6`;hc82$GICgUI{;~In6uoE? z3U<$=culn1DG!a}>N9KqT7-0*o;n3#*;+%+UmpbYHsc=cLUlKez2mXwU))i(B zk+Ve~lu`0oVQnp(sLf2J!P8#t58U*YL~YY zB_q)%>F~th)AHgZpd_|Sf$x0hx?cUeP%I`1SySWRTrsodZl_G4i2%nIk^AB4!>V3yh$4-bjePf zf>Q!Ux#+%0sP9G260jUdF3t2{5z1JIocm*UB)bYF($G>MCMmqvrTUyTasO!b{Bu&` zo+oTFtTy}pFShPS-x(3a8(*LQNJRcCr?Gwyoz%Ns(~;Mm)FQon%SDT2u1Z+nqT|TV zB&-u=|DXv>tpi1!z|C{4U*8?Mw})-AdygW6wYG2dV-bi!+jRIc%Rdvz+WX3K4C5sG zJOjYe*OW9ZW~v%R^I-9g~!Ao`2oiWs$-d zY<>2P5$kn*4gcGK|IePisJ^GZq^ISHVtH`7#!zT>S*He(Tubj$GstK3J|rr!=jI4e z9P>bhpb{!q#bkz;&jOxii_a!wwDuwzeR&3m$beVyW;O?^>UVSgV`S5D{fZIvnGqJss$ocj-@~J}>F^onJ$OqN# z`v!?f`fnWQf9Ba{c_Ft2=a3cI^vx5f1+RNJ8BTDD+)2kCGG*>Y_K)EDlS1`PBIB+=P!fGicngFT* zX}(=(@7dpf5VK=`Up-A{E?zfP0ihofpjSuKS&_o_APBiCLJU?!TS2@G_^B){dW7{C zuM(gnVu(~p_&r15((;s$SI8trz^JWn|6$H^la@rM-%}XS^IqE#M%X)Mu3%;5Q(?v2;s-k^@8Z za?3?U&P`+@4sS%W#-VuB^G}vRbdQ#$Lk~xJT`HU^j$76ZE$vd!);@$;!=Qabr!2un z0`fELI+aR+Hv--zlX}-?;6l>

bPdtoDn6;A}j!rXC8CVWxILC)p?jCs&{WD)h}5 zs0d&*Z$6QsXOzjU7YWevxB8!d9XjyYZAWS|To$DpF$oz?ed%X2i1vlX3gK#$ilkXi z(;|+=aDO*lz8TIN^$qbLZ>;=mU&1LL;;`B%~RB5Llj96C8_2 zaCj9fPU_gi1Il6=3P0l#SSf@09hOQpBI)v|1}#MexkjJt!LfuCT*{rHWUBv>@?ec5 z+i!1+NyH<#@#%&}CyW`+_J2RgSU?nMOE9GJ_>Gg9Q$fx}%V64=VYWh)O&V<};7YOO zBCQTZ14M~dS>FI5NriP3k64TbO!Jx8G7gtF!RBs2#vBQjFu08P1X@L{H>gsnwGH~u zC4i3GZZ<}{%5un=*rCtv$mhG-%eJ8;fGQ%bMH!Y)JoOU;&I925bApV(?JhSx%a++BO~n^n86TLu~)K3ed41oooer?*p0$LP6}3@@#dPO{g^_^J4>K^KQxy z5Bkxo6`Q|hi4_=hRLTfJ|7+Jl5BHTF2lS_X(3x=LT1Of>|JfE6=uv;s>+YnxK>F@!`pjn{^tSvXM8mxD=pglKnk?dmM78Rh2kuV%I-WVqSySzIUcLMPs7kD ziKxs2pw_ja*L4KynEvm{3(IbSK5dFpIu-`jQu%iF>Ng2%n-WVD%ho-vl5QwaVyLW$iy)RY<5h!VtE6cWia!5DG|>N=%fM3?tbWMVho+H2%lV~CFL2P|p7=zXvQc1Oh>WYj9s^aEIXT?(XjH1b0?&cXxMp zcXxLQZg+kAoYQvp`R~*Jd}o^vv<7P~M$H-}y=wF_PP798j7k%5*TJ9 znEzo-LCfrvy|msk>cQB>CH<9Wo@n~#AoNr_O?-wC+ zV-2YdRX6220;)QM5s~`@tBk+Sh$ZNO&QsUP8s|?^A^%hhK7;(uHZzcb?&l1O9*%I~ z+DK>$C%*1$-S^NmyfG`cf9ts4xS#r8N(;A3?m-St2S7E#opR6sbqPQoC$U9K?cFiP zKxrP!HkLA-b7a^f6(r75#quMY`#cT8fR1rf;U1`s5hPa)5$|vvSoPPBZmDjIrpms6 zdZauRn+K$;x?2`=Eq~eQDN2Bt5%QqjS_ncJ56t-Pwc<%gMx13E0`f^Jdj5QWA*Fyx zs5gVXp(c(rh$A5!wx7X)$>v*LN*HiJRhRfuxlvI~0TV`DAc`Rvs+(a3oN0>cX_!Y? zS)}6D&X6;t2h9S{j}xM-wu%HXh&<~Vd_2))N~)#A(H(%;4X8Ekjr25NL}2qYqwd$S zYX9T?y)s=IZtaVQmuF3GGV|`24ufAv;yEBii-z@;{c(?ELe^LTSsr|#3n;aW%vkL~ zzT7jqmcGp&Em`D)4n_5kyKm<8`X}mgYajs3u)6Pye@Un-IzqP44yNgxyt{&F?IxpX z4Wc<`WlOPz+=55}Z5bLpx^%+Hg0iL2%Sj!#T)X3MUAtYocUc<`n$qJI( zB4JEEuyb=A*w$?sYR4?WU#X~`DlM&2&z3crZvB?A%7~MW5|hTivedORnIb4sTwbS~ zwR+~Y)Ty+mP!nWv?|$4#5-DUspyO|$a;i9Afc6S~OToV^92FzSh`?Vra^%@+ZWPlz znYMW;o5$4z#MpR*{D;O}W4Ff-2hmu8T1Yya0igQ!L^2s;4N-p9kJv>Ef=@K48Fj~O z@H`dTyDGoGGo526c#_kFyQEZVd7jHRjwqHPE0s#B8^Xnd`cq=j*&#O3k6yXHAd9H} z*Is`wxhGuZ%j{Sc>I6S*9CDbM2HX~Q?hdb;yheL4W<+GN^$?w%dv;(0p<(|r?O!`&ZirwUP@sI;J;eZ?(B2h9?=gSXHWTFVo;Z3^S#vwO<;`$xzSaW`YH%T!h{Jtq-|XZ`V?P)Q zNn^D5`5FF(1#pwH4oZa*7_92%TaO{?roc_}@7fu#hsjct%!k;Ci5U(;7+Won)P6dl zn6gZ*7aipL%pfcM66d`bj20Be? zfE;m;S?2|IX%Ibahco!^`@MY5nG3Ag&6e64#J#Z90wS|0GV~ zxy=aG(k4D(<{{;I+|1ba{z;sYLp%QPn!diAJ@g*SNCEFg1TP$G)}Bw@#IWEoSDDH@ z8_MXXz@VdJJl~WN@6s*>&J8us0TWkNF*ek7wFL*YEI5QwY^@q9Dp@yvMvV`q!_ti7 zj+W(>cQ_jwi)mWg)cG@xFxVu`q>qR>1*_t*{}ZK2+;h0=@G@*Vk{8y~*DNBG%G{H{}< zC{G*e$QW4J^6@`?3oGk}QybgJ2)9HHw zn1=iwd+J@#IT>*&oRJkh)X$tY#B&iryPu_4I~L(gq(%wV88$;ypCi$|kx?wfa`>Ra z%PmIk;`CdMG^kig#4sTpzhlezW#!P?*W3kBk1ijesi|GE)S$v-gXdvPn+r3=ebS*q zg_OfcVjz`JSXfPCs-!TNu!4Y!l*QvqL+nS2oFcyeBCcK)M6?=LzGzwNUsp6t6~IT3 zCwziP2PYO6VoWRuJB687UUZt~m|CM{B^Dmm#xRFYjZDbA5)GXbBTP*4jaeK9SX^{V zqtu|6j$Sq!S5q_Riw2W7fWY4)96%w|Ep4`ZpNI|{GVq2DrXDW2Jc6dQ>CnfDx@PZhSH3&^gPP`z%P zr^cu5&59uZa`b}`jyR=S#Mf;FICrt2mL6ALAZdd-ugGUqZTwL78-IsfYKJ0gn~KP% z(Yn02xQ0e3p+?G~L5)vajS?0ra*l@&6NObjXNr6djK)Q2vY~=rNm$ELtwNz;H7%z> z5fZNTY0+CWiKH)92KhA#BdnO-EsSP?nicKVDe_9-)i|E*kC|zBnZwO^5J)%x%9;_~ zvMwUTmEK!{9yT&DB7T`&i5ReVoR0DT*kT{_Z)%X*QMZ@tFO=*$u4hkLV8;%aXV-4G zq^0J)H7h?RKnIxsqvlfTCV5T+B`Y<>jg}?AF~iLa)kRtQ&@eGaK><<{4kCF%km3>? z`FJ?1O0bGL3M!B@IYp2&`STXWQNT9zEdbP#8O3O7$5X_$});zlE0Y=XZ3Z;K^Te`p`(t9SZ`?B+1X9HYE_TH{YK03>^bmlav7VJi|*U~*`r4b!N{Cu=Q(KFMzZkh(mzRa{1df2&a7hAlmFrW7Qo~4MqTh-tqSYO zyytP96yX73O|mgUg+2Lmset3mpbg&DF?leT4@(vbKwl53Kgiy0^zkAiycF7Hmicz9 zsvNBdlg=76`F;0lFT|u5QiLWFnK>!V(Rs4BU&H#MaBYI2AT?bOQ6n|_SMw${xb>)3tgXrm>% z>BiJbeH+GtKCjGXAQ=7`n8mwS871J~@RO)YNxZ#-D821q^pp%^ltskkrH##IYKI(B zdud%cJXORP!JvZfvKhY;vmBO%RF#RtdUl1^#Kv`Kv=;p(RH*lY+04FZfm%dk)aTN(POn?P5_E)B<*-%o35?1wD?}ULvE>? zfXet34t#?Hj5e>=XFQFXEJ-R8mZX#nc1-Qv~y#Q+I=O^Z1}< zA#uUJztr!V5*F$av~ctO67|{&`Ll`){{-u)1u1ooWe5%JfS@QWNUf+|FL&+WpeShI z2ffYPgKjr)iISAczMxny`S;v5jDh@t+el5>u%&N^3Uy?`74pthZlWu}7rz~khV`|6 zyaL)EsUj@Vi)-52*Yk@mlA!gQ6~hy7wGE(uR2TdRy3hJ`C@^ceL`%Nb?$vuIPZSS+ z-x|ym@SxJL)IyZWQ`P1+Ev)kBx@@<|pG0{bHk`Zr#^w;N7 zFT*BbQY>v$#5xTq>JciWRIJfpiwoLHHwXG{;~FD5b@^Q1Pjr@yRA|X?p`Kz?l0OTE zJSX%$NALPMX{`%1Ct(X#!_+@{`$s9PoWgu}4NPYtl5_Qr9ADVG^}xHVbiI8JYdsD! z(~e)%E&u5b|7tdFX%WJS)NhqhjONS|U_nLyP zJcFBMiwfG7*IUC(2GJvY;lS4=e0}E!YAc~++GWX0*S6;UopWxLT~dV9u(}#m5)RAw zGPGMoWMu#29hZlga81m<)J|V>t$0*)8T12V05jj zco{;Qfh?nN%qlQ8%z29c`wx6F{{`LCw3eYIYCP4=w*8y+gvt%9SFdREnCY`X)Qtpp z_Ji$%Fqg{8<}@2DD=Kl~AN+*=>|!PoVZossrjv#Fr0N&a8xGBhf5kshxAaY2qjuiH zV15Z2fP!Yp8ope8w>L3bIKztIpYfpm|;uKX?2Qbrh8#6{P$b%3CO2 zfv*b>#M*Nh@$4;NKrQjJ#-BcGU=N|DQHv`#wwz6SdJYP%WUO-&!)@Ct0) zKvK^;OSyaXpMicU)mL+ek=7A*RL{i2zNc{zt$}wzM}fC}ZWuC8IN6iw&aw64eYoec zY3pOxn<_Ru1ZT!&;V=~u;`pniq0GeM071oYmqn+;Oaa%uNLu{Ob?}tVx;Z(aEXN_G zQh!2&N2;?xdwi%75WP%%>k_B2iP-3xfPy~cgmrDX}t zlFK?H&Yh0X>6Bh(xnF+qjGm>)I-U=)s7_b?m5=IFgsT$SqH#>5an0u~A$%~7ld%gg?fOZZe@Dr6 zKFG6u-p+091I?| zzqiN?wT-Uxcx$Q0hUPvs){Og8Sin*)F4$9$C$oW;baK0gNcC8Vx10aH0f)U%JD!7s z*WlvEO;>;wv9vV16{MqnsE$psGNZEEgrQ?MIO*ard;M3Xx9;^t2;Gq?#tUviv&QllCdQ&o*Hc$z(KU8=2`aj zQuI!t6$kELxdqPQuB~6l63&yTDlqInQhD>;0k+ zg5RPwC!0oDQ5enYjBHUzQ?tyAZl=l0=dXOiJkKfD=kT50tUK@4(rB#JA||dKgFp5> z!00{+q+2boZB{p7(bMo6%|8ZqzBzX3WQf%5BiG_w->c{-v^}NVaqYRckXllF13q=Y zzVont!7OnczrXMwJmsV<^cZXZ+{xwLZ9@036b;)p9x{!gb8~+Y)foMs>O5jHnZjjZ2t?3fXl>rP>`1%4Co`Bci@|XOutwL>cI>?`Tp2GA zvvce{?lWqq*Rx+W%*9(=bUd1DXhl7_qcSWzvYk-|g+{Zc*0i-7u*f8ZID8BK>Uqx! zKCwdT&13BCFwQIaksLk@96o~(YL!TY*TPGdOz?DyoqjXQ?3H;Bn+;$wk@Hh;Gw^EP zKRtzKRWYdPF?c_TdCME$UQQ^eNT%UVQ0w11ikN(VPE3PG{uYojE@=Nzn@=L+*Z(Ju z_+1Lv_97o7I*AAQ15HS#_D`+1pcWqob-M6gmCDDTgOA2h@ z7V?s6vgTfZ>xYig9^-yJ>&b)4I8qVP-fV|KTQT&e@Q3ZSvVFwZSW%h`N^MIlRj2 zr@Ln^zsdM>FR`H|iS;o`6F)PF4>b>2p{1S zYXh#+f4Y98e_a3MQDY}c(3jG5O1m17Zx!msL%$e_zWL6gn z7pzM%OtI*By!+mk5M-pIERqG^@mRrDPiBum5#BwfqlTkD%()ty7~#0wiLV*zkE_L_ zKe12l<85KZy*$CaJmex;knKszh_b1~s42Jl0gGr%4`3YL;@oNuE}h2O&awf-D}%vs z_Z)Dm@aiLjDKz({Ked(D*3lvg9xNB2!*itMzn`?^Q8w(YLZm8ev{pE zkNnK7=NAvZ=X(7?0L2AnG}P}>8P3aYkR@Gx9MR8V(5AHK9ltRRinu)BIHcUgLUsHp z;U;zFzMSK5TB4Aq7?aA%`$h1PGky1xzW2bns6VbJu}ele@Ss*^9~Kfn?ehTb2AAKVtL608l51en%mrm3x z9K05b&eh{@EH_m|ixnHTN>wou$8%&i6=6~gLySpH-A zj{h6B4QKG_2?|2{JAn9p5+S4&tA1*TjqimgmV7#AAP6r5rntbO&wl)r$;h!?KNj?@ zpUJjfHZ?H_ehcdUSWYWzGvZpP!~WShoHd>6G?T;4HR#U6Vu|R=?)`=WMrYBTple>^ zIO>|gIL_Uc3%+7f_~!+BeTaMpPadXa(4Wn0rfikkN>Ec8dIs=hJgV+IHSQJljf0C< z^hg+6MyzduaK`4gM`l?g(Us;GzA@LH#swKfsj;M|`^d&HVXw3x-Jh$#K9-h0GT(-* zgiIQKMp}zCejU4o?172+R6;XPg(`j-106vbv}7{$v1T@gjwrR>5T6v3?OO{H*S_E}(8C-DU814(^@h zo{-^mM2Q|!5G@yb=E-~|GWLmAXn5$V?W5u&BrSq`gg#f!Jba;va;FcqfmY(LS}f#V zoq=PFps6a*O~n|xj(av9Zs75qpkMFiDQpkaBPN(5z}Dh&8rZ1YB1q9+U_?I!t}Z83 zq$ARiSxO8qY!=oo9?sjv>-83HLwi;R;F;bBpduA2QW9)P$gTn-!7PQ+d8#%nUE|9! zlfmwJyFKnQ*<4w+YV`zJW$nQIu9S&@hel1#xv>Jcml9iL)cuR+LXbD#dLsP3>8^SNI;y6H0GdC zn1NYVZUJI+uBMzxJ8tTzK~!Jr2*5@96G~A5F$k&T#uOcrlMz@8WGDv+f%95Un+rPX zM^D9@k`$5g3;i+V%l%F07=~sspfsSbP>T%ccON-Hx}b!Jmhk{jpc*=mG|^B#g?Cb} z9fnn|&qJyUc?RcLP6@CI1>mc#bsJS1&YI6i>c>$Kg$f{l$yPS~&OenmZ{^@Qr@y^u ziD6)zvqzL#dqGkc7ef*Sog28cC<>6>iN~9D0BJETnpoZ5kx9|^dfqzc}+5l898A&2}u|;ACX+fPaf`5-c;M!e`Bm5tAH^0 z54`7z!_W0gQ>OFdg6@+aBJ$+oqaVL}GbuFnhm^?!gs%_2BAby*-NvX{`N<3G6M0+ZqKa%wMy)F@Y#QUUS>6I6!YhSii6 z&7`!gOfllA`?~lm&5GyE9p9@$p4Sw*tI{DoY zD}KZz%-~qToZO|cx;PxvqSe;vD*z%k`TISnVf-!-p!|J@KAZ!6-?}CHJ3gxJ1J3$& z%kx>l3`pd~xoCWHTZV};Cg@N^zm%Xu!4@zisq6jeuVmYcpt}Tf!7YiY#f(teZ1kJhwn#@7!$vUADf_LJbCdc|T_LxYzm>)L_9=tizQgep$E=7@{&J_3 zex+qx|5dr^WJ(U7E8K@zNKpSgMx2W&B4?LW zu&gH>RUR8)L{E(qG6oF+TwKb{gh?n7Lkz-c7l!Siufc}`m z<+Fq!3UbQejNlIf0=6ZS6=(}!uXGFzY6H%xHEOg=mM{xXz~xrhi#nq<1jG&^^sSEyz~469E{*q}3?b^awz?p1 zBWjpj$KE6Pc2k?2|l5Gi1EOoYO9L>O*&n*N-XUmQ`%FXlL7nvS24MbiogT&S)t&5xWp=!*NmI6wP z6Z6K%Y%drlV9b}EEjhWAl(Ggz{6Ln4xg<|Nwe=r(BASw3>MXfbG>?#DE`BP*7lu_f z;Fad1<_tU6di*O;01f(4_1wAwcDA0EeV^^8>lhu!FN4*u&a_!MlvFiy@_PwIxZA<(8NHuRd50nysR8JPfc?D1Hx zfc-Y7b!?0m8X`KH)%-~Tf;y1-CQ9Ce7^Bkw~D23 zX)IYwr@9Pv-!E`xYmuNX+B$vugnM_tHf*xhVdL$V6J5u<*L`LuycJ`P(hw0UC1+rd z-3guy2l3b7*oSTW+8=Yu9WX z0*qb8**Eu+8Rpe#wAtkp52qm(j;(&Z9zJrCIY_la61kscKP4n0vxN2Y`OSp#X#8kY zsaebLRLTT~Qt&8KNUlv9Q|pH|Tg$S34qW+JO{No}Hi^DAjvB4jbsf&_-u|gNDJDvo3mu8r zNwjeBpkI!kJStR?Z#^W`ONhWlV|%`!pke{I%6uy}FfSy0`{?_AOt|hW%8AN^P0s); zs{bqDXS)P!|3hC~r&tbGD8BeyNzNLzD29V_@gzeciIQ}E$_<;gom)JEj?x@&NzX~+ zVkTHRK`+m^EuReCa4l*43zi<@^m_Dl0Jf4zUN{o#qRX00f+VEJQeW;bg2!pGRa)I} zPI~)JzNEvdl(-7y1R=@q(ar?2^Fj8vL;7ud&cJGVp5bLcq~Hv^hHK=v*hG|l1nR>V zO&++S6+$9ZL1(FEJVx9|Hy3VWQj{EJq2BnT!t-zWGerbF;oLjQL+`fTCMKU3%aS#< z847gc&X5+c-ft4-^+#LX`ji{kc`h89Hc6mK>CuE3^N^rD#~Tkzp?TI8{<9MA?ftI+ z;*gkR2H`3WJ%g2eh-~aIEc`{`gd#g%0DEt*_PcSeLeQC4ED`tK(prRn&V*^1tO*Mjq zvQ)n_opAX;-mZl3EEYz1+}F?fNWNbg$Rdb>(tZ2&d-r=ojPP!j;>I|!Gr?>)P2Oz> z$2;4$)y*$*dCG;su`ZEB(BK_L?aP}ymPD9S>pc00M~V}O6A%_PHz!S*bd}d~k~_pLQqdhnx|{}1FYv53HjjlbzAkFfoslGyy)5q_8^Ee#UoHJO zql}ZL1R`XTlRd1Bk!i6If zcr{r`J^^TYfT*eLrROvVF3i>H)o^gy9s1Q2swtQ#Oh8Z}+%-I)uLXp{0Gu>94B3laJJudaIn7qk;EYO>geDH4d$iO1_ZN#Vs`&wNRkDdFdtJc=%mdeR$ zu=Xwcnd0|K_|p1{tObdYOi5!4exkxQgwyA?YADw8HIm42=WpbVF+j_OvmH%^d8w;! zefJiDA(GP*oian3gu6EGb-wFz+;fl`lea87R2Vo-tDi@`Z}oT->kf+{5D47NSvIFX zBDZ7>+IJth(RN5h1=@w`OsGYha9L7Yd`EHP5zB%q6Aa0ms-1Z(|0c$q$GJSEW~Ix$ zea%D0WFaY$J41hL*f;JvVW?1hjs54GHcws;miKtpnug7kHx3=v_`zi@ne<&aJ_*pj z7u#w!0)vkv_~<86Mg{am?$D~_e-@QNZs2qk5pa)|$Qgyc<1*&rCWsB{3Ry?bSPm=m z9ehbT9jb~XrIDXGPEe~TYLCfVEm#&00rp|M>SCV%ox!mAnE#Ija)lQ(JpRVW^AXNG z-5cgIYm*ecdHt{{$_33rM{g<#0!ANWZsWl-K!Zm+e)`yKRB=)bhif=aWGa|uUavB* zzSuOq+@T>*tU$+$Zv=I~ zqC%>frQqV{avi91DbYc<3$d6gaiF7hjbMY?sSKu(sG421?c3f9Dtp|sTLm-4e_r_@UA667 zkxZ)Cq<_9+cF_TFX&IcRP}0!nL~iiz-sWPYox5Q5;k{q^Hi<90M@w#USK&rJ9o{W^ zW`DsBD5o(hKzv8?0C50`fJZ$C2`_Cu7Uex!c2js5-i&WnyXLx;lE%R(U?>vhoTX3U z9~ZJ6G!V5$H|JzDC`o(YAk(DYDl7?Fz^rjt%jJoZH_mP_Gfv zTb(ra(>9?Z|KXMIEJObq-OvCrIxF$J-5DX8D4eQLez4#n_e3-#&V}2b#yIyU6n7<_ z7B4|$(Fs-+QTA;;ufg3#6tYUvj~%fhi4&rHAp8=({S@!kQ&q1TRZjkkDaoJ-d%>6+ zr-rT1(}U)kxOF|hoIzyw$(PF`-|cO}LJz4R;{j#9`_o^k}t=zyu=RQ@#go^8|Q)f24rx70KeY@H9; zw;^i}E?(;${7eI{@#!5{^ao;EYnXx6u(eMkgEki^4-;HmljGso1KW7(`?=iwp)-W7RpLu6Am^ty* zzYaYlOKgoZzxgBLwpW11Yr%*P+EkVhNNOA|4^c{Ile9tCw5(_u5e&B?xK3D@`3;MU z_wX^RUrzv~6weV{oC<(}a^~KX`U3wj>W@>U9T9ea1ozU$#kO7B={X>$FoOW!!Ar6b zWF^PwgL!@7J+Uvpwl)*qJ?7!be=;&OX8?paU$j+a2X zsxBLfsL20PHGXYU2satcenN^P2x)}cd~@ZQ`9twBLs%lzlS5-fVkIQN;Mr{W+Eu#2 zb?@XLO05{+l-e}Abu_j6Xqnj{eKq4d6mJTMQ6_Zsz4z>qMR7;IYXyyrU$ZI&pq_X0 z_~|GVOC;Q~mf_NW_U=anD-9R;tN%h|I>y7dZ}XX$exf(= zfF2BuuH>J>dl_U8-Kcq**BEGQj94Tc<(CDu%^c5it{pCqGDd;gQ7@kARA2}EqG9+w zH;G+&ZRE3i@hD-IzDu%Sw(T;=G$&gnHM9WfqD)y0qz|c7qn>u2Ge!r2Yy1Fa%8ThU zTsy9{-Lrmr@?hK*KeD)1afbi&k(ioW`)m^;1hRbbAkK#?w{V>xlQb*)24+-{gaYxI zP4B+`xhJ~Uj|6rOavt}9SBfoh>|FgGu)w!VQ7)5^u=<=W#|KUa@}Ev@=1)u@%#y$< zLOjY8H?XbdtD|&-r9^K7dQnj%MxWzrnxG1>WLO;T$b}f&PY@tgT6E>B!D?E(e2x2pT37D&;X91#%Mkorry3H@O-B|4R-EcSi`G!vows zAgNt-TdcWN1}pYY6x*ZXHoVGNrG&DZ6hszRo;1{VtLGQ5dla!*3{ij|RQlO8XM$(W z%2_#swlSI;e8Pjlc}!g^4*d5&{6a-v!hExwex3?mF|5SkSS{}b_4p%U2g)p+O<}_w z-4U($35x|>y$E{smv-VzvD1?Ta6^WQDENcLd}cVBUBTdbk&#j-x5zMIHlv|u=z|4U zrx9z-XZ!vLG#c8d65tILJwvp6wAb>czt0@AEZevXW(YP&o8kt)65WT$V4s~g9zViF7ltun`7h=&E57dg zSl2T1TzC6AmzkQ!5P?*6ns`fT7dYWI!chr;$1LXiDpW^Sp|c|)_mflMsd8|8`mU)a~jO{|u*9s=AFWp?QY+ori zP0`}b*jT4qxX2)HZ7!z_s0YiQzsT6Pep9Vo^}f<6uYPc#AeFyZF>U6oj3~VOwO(I- z^4z!*cDX`Xe<}e5sw=dt)FWY2?OAK^6J{(__s`u~!xL6<-Iad1Id=NfYq9s%vewR% z@9HC+M0IWd*ZX>6f9(wa82Dg*_xUmRiXZS1`O*B=i0>hapI-Veh?hKY$;u{#zm#`} zmFT)IKTTfrE*G1p9yI&#NluFQNP{5uWN>ZV<;@&oPfW>Fd{G@&c{Oojql zJlFfu87y$Gc=_SuZPwLU=QbDe%Ngw%_Iyda zLpQj08{@v-gey)sk3#NCX#TW^)A@^igJfqBq-ZEIDZCI^xf>4>l<$`ks#9vU!##(a zM3)pWI;6j@=2=wN(2##$L+J%w5`uqS7fcY^M1NiOg@7yMza9|FDdm^=@6U&XteL^I z{q=dlJ!yq4(!U-+^#A^af28sMr0{>2!mG;qr`SpZ8juAXXgewROS9V0BdRM~peu19 zv~N~w7HXml4#Ls0GdxIVz>e9F{+s3BMth`*G& z_W|hIp4BCxtrHcn%kJ6{e0eO3_V1^CzS6`Pw9|HC)TRPK$ue3rlYuuTTIwxd(04_g@Ov$QeqYP_g2`e-}1n#*wr7_c2dy(eo<=e z``6+^hFG)z-^L61|6zW{edgg2V>@*g)FbFIE!0Hl<#1w20A>k#W&P?e;w$@X-!e{- zA9?hWh%;fKJrO|f^ZnoUE4EbtWW78s%If>XddzXJ|GfH}940zGgAc@GowAXQ&s=i- zx#2?Cye&$Quw+f;Vp}^L-}M0fv}gahId$tGSuN zCdSs>wzu~h7tQk)A(NSIJALs3M@F@rc!p`emDc<*2dI})U`R%s2s`wZJ&t6*{N-WE zoaqYH{oh7?4FEF8dm1Q0^fDQl*SFEU+l!gp2gk?kyT@s4QuG6!hk6)4jp4||Fr`e& zcA;QZ4XRaB>M6(Far?FYt>rG^Ajk2v(5E(_)HrBDUYMrSh$UShXm?YRdS`D{O(JFH z;*M%h$XQNSdr8-M%iRB$;-@<3IDrf9T_%b#3daK$*YV<))0kc^7EYffR4vphh~0aU z+P6dQIQZTE?Ma5#bFs9EZpw!v^jLJJ@ugJD7BFuYA+$3Nx7}znPPbdWMeS~JOpJCm zqEqWebgK#^U`ZMel|Aj?zhZ&rwxmq3Xp3bFAw5-i(Wl<_Up9J$?{2+TV|bpj zoV!k!@tUGV-I}Zaw{Nra>;t-v zA43UCh9iz#JAd*7To93ddF++YT%4=0aZtn~v~^yXyw{gOpzyI6aA4cpEo>KjM|y&< znW448QDR@AQ-I=Tk?V39B8j0GXIT(S9yyfnz}4N z7O55;To%L)X^|->pQlPw+pdtSJ`(TH{SgppQGt9)OnH+zpr?VojYBhe*j5 zu+?z>Q*R_=WCw_ARh%WF&@*y;62{iM0-x=3+DM3r!onMEtmg z7&E|VY>>)s?pNN(&j6RnZu-X?zN+Z}?=ofN-P1!R#%$xe&t!OVFJeZU1KAUe`wQi+ zmnfU+>|n$9pS74IG(*Dt6It4FKbIKQ_ZjPO|baa{O%wccpD za5^_a3Zo}Vo7j7+={dOySE3<7dj&whQ5+SzZ%}V@`oQD$zSNqiTz#Dl#@}tGCC5k< zloNc$7`u1JU2+xYc!f`&OZyjphwas5Y!_gL`9qR8t>JpJw~XPyV|w$BrObWsD$WA?+1SmtGf6d_g(HuwH@ zrFSpy$7beK1A?mM8!_)&Fze9b%^nr|&DyUni~bsyfvwu;Cr8=6YxCgqU2t#IK-VX( zj9U@fZ<1kZ_o~h^>~2h3W}Lmt?J~FNI0)bD6~QvM=A|Q_N0(7|=!ffi@60k~@aO>e zo4elaQ3@2YRc-!;J-Sl-OcxC9uh1$E!GlWCJr9p^I^REfyGCo)`;MNj(Y-=g!Ys$RZkX754c`f+hEF6-n07M+I34Jw<3#CXsjlaA zc=QFlc(nb7UApnoT;#7Iz~VM?oZWXhM_+eLY}{NYFFPoH+7t%Jg)?6tq`CjGs0gN) zIprk+jaGWs3l*Mz`JS$TH6U0<#tKI}*=&i>d?I}$r6NfzVEKvbwMsI(dVFDn$ENMg zSAaL~w+pnUZ8~cFM6!0e{rjknor0U_L(x!bFpu77VVq9)Ns!q~m<(^nf%*1s`!#sO zhV@lIf&jDa#r^faT+1dQxN1ReNMYvK;}zw0B2|;#R8+L}>-L5sda?J8V4$|vJXn-9(n z?+aeNkmScKR;CCozOwikUK6U<65wd$`O*)JE|#-LUBa9-|1<6&p{*+fZuje6+_tivd+sA=G*e@_W#I=g$yF#=cL|>-zs#(cmAdDmi?7=9hSX@jW zpmyJ%LaZ}s_|9GO=o!XB-jabhO;C|c1cWZn$-gfwqmKETvS+zCP27@uJ%hOC zxpoWVlkQyRqbgb8Ed9mMsSCi}&n;FO`o_lGenhI0AKcem(^fUEvg$5~PexAje8il&yl)Dpe7_N)~?eS^!F5oz89i20z`TvmnqiSP(cs>6Kcnfib7v0RlN& zUd|3$2UY?Nxed6_>Cc?ZHsP8sq_D;psCOfVs@QGH{78Ct;Wrcm=^zG7# zrOufbx)ZJ)##s4%#f0OZQuW7-O}ia%t;I=gJ=2s!HE9Sgzca)m(iMzxSOuM8r>jer z{zfvvOM9wJ@CF-TDPikd%k^LtQ)5el$n@EDwuV|$wFCK(Q9r-M@ z>82(;rQX_1lT>J!Op3vVX;M4jI37G^2WX0Mcw|-bsQxsdKMQ;!+BQ$JW4LYVtPw=7 zv2jnfV6_NHo6qhU;n&Z+I{F|^7F`C>9Z{A3_0O~ZRX0AUhh6^ zWGfEsf^wx(`-P5dUK|wi;l@X^4)(#(?SquP(U-ag&2K8-E|h<-i${qcS&5e?V~(Zu z+Y7lQF+>TZ8K=va^08yc`uYgCQ6Q{=@nwq}Sfsx7#^{&x)hp#G|AE##+_E4QPGF7`D`{xm#K4llh`wceXPA+;Qbco7*Z4&7KH9ZZndp*zPcuMF zIj@Rh5d$GVa|cv= ziP-Lkj$Wn8_{QL`AHLI$tkFOI!2c6QO^_+G8uOdlSVfaJ-1KBYX!&{b&KCFKPK)z!lBraY+P+4>?gRK_E z;&R#7I<+Q-Ud(X%pfPXy&cW_w;Dv*hrb{h6*JhzItsE7vVdK0ZgGQBVIVkm;UDii& z^(qCI(3ult8PKhPi!U&2#DJyiY)T(rEIoB|?EppVj}-v{!MlmWXeo-N(B%dFrRZj; z;4prz#afp7L&nqz5*)b^W(mFJip6y&z#bgJtyI)_BSSUeIAhFKH{Fl@1_y4O#ok*k z&&~dRaJpa&RKTJ+lRB+;zF>1o;r}7(Eu-4(x~Sc!6pFh;fa2~B!5xabySuv-OK}?9 zDO%j!HMnbWcXvO$-#Onn|L>6>xs$c`nrp6kVLf|k4VnB%jxS;!;o#+|bZxs|C_>3} z=Mv&hx#6;nY_|iEjyhP1bgX`3iapu(5zIc>_Pw7>G56s;=?14oGsrvQa=pFTpENhE z%spJTuk65&77-K;q$Am~&D=KX-;Y+oLav+yNEUA{^O-Got2Gk2zE(+Kd{ zMZ*IR_R{$l9NyOn#K63GfX{Ekl?c$b?3wm+%pq0?v_d`U)7bTkO7b7nk`AT3IzsdKr%_p3wfZ`_>89%nr#J>cYWd&-N+W&oxt zzNz%=O6Q4O)mJBnIR8m^r_VLyf>a7p84L~QK|T-Z?No}0i0wq02`87$_4icOG7G$ zTvGF@o{)Tsd*<_WQ}H(Q?q7rIuyVi6-m!#pWKVKpH-UM&-`wEWQsv(5>@0vgdE=C| zGLZAJ>mD*-QcXn`B{>FK%oO6?+Vv5c+~`}jsqyq0%Rs%i+Se}?`Bp|+c7?oHY?(Si zi~IS~3i`=Lo$C$F+LEbin7}3K+(P9=|OY0dz0nQ5iu_3nX z)lL5IX917Loe!5lr%nfy5C4_^iEbKC>0Z+1Uw(^)!r51wx(dGPnV z3G7|fY-@bI`mYSK)@+1_V@R`EPYj%IyvCN7gGLei3W!uUN~eHUN^p#YRs52f8!z(z zJCSxB`@S$=DBFeC-`~8VI(ofAlI51&zt>S{57NoD0CinlvacsKS##tuzrka9+(3KF zjfVJaZmt{XuB>#^ao#s#^yB86#kQ4rZTe2^ch2a{jMc_6+zeI!U_2SCjYb}AFivv) z^)`Mswr_wpGX~ZkI=z53oj7A)-&kn}9oRuJbKk;`l7eERH1Dm{^hbW0kCSwDHa3M= z4vK=8$D6;tu6zAHu^VK1UAdd=YR|5$TsB1gR0RxNaYk7PruC7%xOfkiPcx@RoxA*o zl&y1iMo91U_SCs=6kjmEDtu}aFM_abE3c;zq0M5F_DO}ZC$?rE(s8z;I{f^j%qzDM zCpJn#9OMu^)44AUNC z_=i*vt}t4&Y)PMv)6)oxS1FTrrB-CRFs{QMcIh`+-8-%wUw%T)FEqv>jCOZkjo0&z zGBJ%?dt)RPT*^yR94fX2--S9?LB{7po$Y^c>al23Q4FVlruOkYW=b6(3U&6Wjk2!aO zm*uc#zc8FVx^1zF!~e0!pduHE>JY2fBIExdsLSsQTw<)%ui2IVfy0~{2YXxT*xV?A za}^!ZrZF?++(REUThP!kpnGArgKX5%ZnAL2Lm1i|26*wNDR#}9;O&TVZDj{3zY{lP z382K$0UTO(*C@-jLS>5koN9B0;{pK=geDQZpq@|LX#Cv+ToW^Y#Tf&8@#gw3@4X%&-Jh>q~~9uU)ygGHDH6UPdAJx`ZrfWM!SvHK<5W!)#E+Go>mHx~=Fxhv3iB zm?q&NF~#OkYnChjom*Znt?tq#~`Bs^*mAr4KV?Oc$MX*j+MA(xo$A=QoT~V$`oG+Crxy!WY6ziBk6a;c67`wW7x(zLkm!Beq7$T$ zs5_V4QRK)5oEXWlzB9HxlOWXQCQ9Liz36Fq(6mv%Q5u`JWj|N9v)(ll%`)#|LcF@g z-|Lin*bJAF|FsqbtmpUm@;FHQtWjo|2$-SJPY8uJS!-)J6&7JCn_za^ymRcEr~|h( zGO;G<%)cf?)`D-jcP`hon?WQl&%Qw9YwELejveOJQ;KU~%dFOu?oCMC(WvnAQ zfHsr?gr&cT30#_(DP?JKpJ6d=6!ON|`?PZzr$i)5Wub|)qzvJ1KA6w>Pelwe`$m~Y zb>UhSBL1FnYd+HKudPtcICkOki(@|e7JZ~|SDEh&7Fp@?hgtNPSOgr6x~J#!)@leD zU`DFm$~Fe6l=>I;>=>vY?m3_r#~Y)-4A?eKk7J3EKXMcB-*GnyLToWIMPmkTIM>Xh zUyaM%DhlP(EI$1WmgAF zFJ3HB+>0p{Bmp){`75@layP6bS7ocz|wzH~VOH?D`6jjhgvs^V_BT~5n#^XBYJ zJC}(n*sh)Yo?>W+Zrz5L*S}AKch$0tnu9H-ljvTRauW}{OY8fVLhqy>TfP@6^eIjs zMz>V*s*jnPR1o+2oe-4<8*%ma z8R+wzIc+9;z1Ps*iT&-DjHcM&LD$YQsB61PvGQq=T3@op*jiv{ad`fo*Y5$;{y`%5 zy2Pk5(t?$Vhl#5Fsd0THs&w#MeTEKfwZJhqTUh^QAdq~*+X1JM6t7WM4c{(`I)lIb z8a{kSF8DF;`1tX@*90V?bhE&ZxVj9c+xwKw03Y!eABRM%l1X~;Tb6QQtC7DOJNq{M zMw}+v%ShJ{8F8RrwD*12O@<-TOH20*r z?ES)x>LBfEnely@%K4jKfX8^fEid8e)50%+E@SS#&=^OBD;)oeT1-R-L=C>1n7UJ9fB0ypj`C;^by#TV5dXuD<8s(LW zNALSh!21i=Sp;65ks4L_zq{Ow99N-kF1?qpV8cLz1Dl5N$=;yvBr3XkHJ#6i#kiMM z+?1<#ze`w*+{?CjvnN!Ijv{0MG;A!{W^E* zwIkk1CVUKwXNiW_G#%7bjbg?L_z|6Wik9t5@`dmgI3`ZUtVWmj_1aDu@)#a;5{mNt$g%VpVcDy>*NEu^P-s$J@A(|+B1MVRS3!~RuARm&7R z{!!j|-W&WU6PCmPRIUln{|;@dLWN##|DVBcQGd|e1q`4?!f&z=&kOq8qR5Pmu-Zy! zYkN0R%F>?+L|;@x%9Vj1)-r34ykfecL?RAuH<$k7MQTJ z^EFd6+pp!5YXxn9xUCI3Kf=rg@*dRl@7Vx;Z=1f6&98y%)_fB{s3W4cq&{xhC$h7z>$|p1z_ryd?Rvi@DR**POxmxtsPC%|5glIY%?8^86<%&2nR@r$ z|Fl`OiYS;T3ZIErKo8zIfJUv}yKZ^1TLSyVnXIhwvMYqNb78>n|v?gYJ{fDDZ&)dig(C;!HRglM|2~X+4aE#;Y zwfFS^#(=1BUHX z&o<-CxB1r3>XyuT3_*Pgs2r&|sjfTTyjA4!b{pQACGIdEp>UR=-QLfzBNEtr^o0?0 zI>9=*w{>vq)Jkz);N983<&9Hp1Gc2G8(50-Jnj)8jKi)5zQ_l}PwrqNdZ9pThiZFh z$5kC7Q2_~`>d((^E@3aOB?-3VZFmgBP{8{>f{#DP#(D zd2B}E$1&Gd$-fs$N&g?gBg2{7SPiWi%atiS4(+4?!O<_}Qr>8bxx zF$I3U9U|n=CVn^W{t27>0kUPPTzs9K4K*|<&a5&rT1r%4*n2#Bh_q+b26Z z;&a}*_oQOQCT^^D8II_TS#_=-`F1~udRJY&U043}kdSoM48}8j?t^fW_~Tqf-GnBz z?2!czxmW*zT#5XAAi;Tpn1XD2{$M%uGQk-9&-6f6RWfJBFiV->H8^{OA2+DTn_MNg)W^;Yo~y@kp0HKXa&?|Eo-k10TG(cUta_#STzxKKJl;@#vYI zb^X^Nc=jH|6YH#>;!_AwFP3T(GUCW^_N$)kxyQM)F<0dqcZcf(G`K``oH53t-lt9+ zlrNl&i(YR)*)me;{Qfb4pB*?cJX`#FUOqZ(G~SKle60k}#lBek6f``5UpzpCJvlYD z)>npio!0X&kzrJt9ri)uiEXqfQL~mJ!(E#Zm%)zMh0@Ys2P9*_Dnlf3+7Qawzg%-Ws)DWfoHsiYGwC`>Imch1dM$m?qLZ`T5iv`eksa! z&g1b4j>Gdy5FA@OPLHL3rAqKf=O$w=e)3goJAPJayy_$T?$<#F1ONbA9hKG{-kKT@ zqG)CZl<@$DCA>^K)r84{jAZ8W2I`**m;|HRUo^5>QnW`=wZT-BavI{JiBA-yaDb6L zd)Wq{_lR243CHM{upl8V-LWs!AQG&SdHT~+jkNycBRqs8s`)H|d(=j&B){w7Ot%GN z-uA6p_8p5O)6uqiSrlD7y+u0{=pb%?RwxnIlAzQjRhz^FArZbArFYof*WL(zkc z?y-O$jFAQjJXb1a;Yd^kjY~jZrA}8v1_n~kd#TG#RhdO*zd#C7ImlRvm4Oz^Ek#0f zvj%F*?kT$|rjjA>i&-!UJc`Z!WB17+2X?*54D38(+PAQ7!2sPSd7-q7`-pETNX4{b zD`bm7zANVz_ZG>&wNuQdDG9vYAnw;mWUx>2N{y#$Q*E957I>^|*ug$w`9*z2c)2~T z@aL!D+KXG|Y1BP?xw|Su$rSl0`S+BchTBEwFU1PYL}DYNc_Mr+KE=V>X#UG?JJMsr+^T9tT2Y^P157xeQ+Lrqtn(;O=D{tx z{iA!s+z+%~;Tp*p5cpB=XnCn0u%=9i$TklMcGx$}mWckG?8|iKdFiM$(C?ZH=XDAw*XF&)OciSwjs>jRm7-+;6*qD}wsINi*+h!9M9TUmRMMr- zF3e4943kYycF!K60RUSh?%MRD4sDAXJ|u%U>OR#N0G>zryGTTk9VL&w6f z&2}sjVFER*NxW9S#6hZatA$i)E0!RA9X;$@Fgv)=QjM_9lkuO4tv`d z-lEfpQ@d8jw|tea#V<^wLAU|WE*tOe-KflmG!#3sA^LOUGq-(ktwfo|ClM;@!98Ib z01FIM)tI}wT22&30bc5v{~xpC^>_yj{9~Tp#>JJM-QhHi?^PXY9!h z>FbM!b&q45>;{^%MDq}t`F+A~vGmq?TzEW4>ykNY#D5RSsql3qLViLVHl3fF zEz1G}?8Lj>E@mX}IEhx+O*6zSQ8@yR29zAY>NN=ka zB^8+P#qj@$?rIk7&nM`XH}nDQUj0%biboZo@> zAw_caI>*hoEW1ljh(dh7OOSefo;D>G;5Tp7s@ADuZ94JV3kDc7L3$=|ic>USA3Hps zCWW5R8)Ut21cMI#XN^JeX!LrBd5JEyHHfgTr&eL$F-K|FQOm<)LNW$5SDb2nzW#qrlMX?dM# zbL^$O6?Y7G5+A5K7baZ#u5 zv^*+Ls^j$Ub9dn>HEs>+rd)c$b-5ymM#}#1E!;9La8xMpF*8A7>^md4ESZPrFIpkC)Vm;9^|O`UCcR47pQg zo+C*;`KZMEMxKC&ppG>t2}k*b4r7<+)Uq;4#X*?s7%`MM&n4^ql2+SO6XA3 zj-`7_a<<7vO=gBgNcRE(KB8VYeOQ6|e6qLm#C+a(JIV;N8~vZP6DnR_fJ^pMNUAmy z;PqwRjtrEL&OPr;av6OO8^%)`1vlr3OPe3e%T((&SMPlUuoXe=Y3MO8^*Nr+t*u79 zG?(>(Bwf>a0f%+Ae(UFoS6_K-I~AQTh)DoVUTt7-+cDcwHhZc)J7eeYdq5U9Dx%u! zukTq?yeCfA+8JJ(uJapY^z$VzU+$_9+l}gCl?jY@uyo6}h&o5_9YS6jf+F#MPdMP+ zf&3Oai?|JxCqz1%!&2mFswR%=tDe?|Q{BBmuUB7D8c$Z~y|J|7866HAVS(OUMH~84 z_~+X{JmEv&@3eCqEQfB&{`c&{!@5aZvL(Oc6|TNT$AO!qT=w0{Dn+&CL&*z@$V>X& z0Q>VSPX4y;YM16tOdrPl_g>w&BSR0Kr*?Uck>=ww(-C#Lm_!!*St0(N*In?adYFt? z|9S``F<^m~WgrgZHqo}Kr(e@#0nAeiY!MGSoQ`B5AH2XOV-&1tPbm*u<&Vh4S z#|#cgcgrG+{@rU623`h(HDiE5L%376bD=qqNb&bm~GQA8^EWxl?Wi{EXZLX$<(nP{xVK%qgLv<7CjL46BPMyXUj>ve zHk^jQLI`~&EMj)HQ2v=^BAwKn>AQ3VM%3_vcn6~7WWeZA!{iYI#RlJQ&zpYZ2(f$M zxXYBOQ-Fx2&+uLy)J~Y#w4+LCf9i1XyqA39E^CYZ^#DReo*eH%03{rB_gxPyriKfk z^OLAPk*hSwygLNT>Tt^mirfjt-F)N)a(8>`xm3z+_K?I%{2xp1>lyE-Fw?MIyK-$c z#(qj+-ZGL(m2924H9_8*#rffqet86U%})wJb!}I6=N%CPMx@>@yjoz~?50s_gE*o{ zQEk2m1yI8ZMwbti>%CvCK?!+xb^E%<=`Y5+kyk2^3ljEL%>tx+dEbj+zxYxP;rH19 zT>U$D%@VI=s4cSjfHeGmRp;;6J&>Z{VV?%t#m7UweBH4Gew3%&t?Qle5R!p%OZ1$2 zK8-hpn!!ywt??F=3#hxZlKAo?OWuBdBbBP2C|4|zmLaNZqSQhlk z`>ypL0>za4M8Bs(;s^q&Cn~qAYn#V~VsR>vw2F?6yi4S?q>#pzbHHKr{62P#i&|AI z-8jYJaMsl&(`3x_*fI_!Chuy)YGDxThH#(O>d=qW;y#PVfMauX9PQV3}escAB^4?#lY#ROm?{QPvF^OL-EO zdgzbo8pV+5WppVi#%Hqp^eU~@*p{|5ws8gNe_}dPsNRoWLn3O^mSVb9a5K6dH^_@k zn6E5mcLs=QxL9Q{!!Doq;@?vCdGDn83GuQ2R2cnM%loN#+qy}`zQ0i!(08v@;tdKC zlBc<$S|P>q5J+TxljEvtV%T?Wlt`iY<&^&QS;}5gS;B55^k2MUG6OkCCBj$guq|67 zqI&>Ur(TjedLkTn>gy&SVpGzB4S!IT%)o^owew}VQeI3=yj65ZX#}r|)5PgI9z^{$ zyOE+sKcNcQp~dvoWXw&q<{#Bi|0T`PuPCAw`(C@GkP8ehST~e^fF>;E{Tf6JKhJ?Y zm%oV;@KGhQk!R_4f~Fab%Oi03sD~u(_ygD?(*>jq18&1J&clG8^Vp56N_Gch=S9(u z+;%&pHg02^r>PIwD8B|UxFg`|kS$!s&A)N)!1N+|b>VKS6YkIAdvYTt*GjGP6}kKM z=0o%)MPmQ0vvKjy(1Wz8I@c<0lH^*%HECD&f{(^Sk47waJdPd|d41eQ5<=p$|0y&2 z>sb68T3A)U85@Zo7Lo4S=w-hfjCQXHg?7$)zoIQJwh!+NPczGI?=IpuVC4Q0g`H6# zWBk4PffKWfDSheRmUFBT00K(h77;b-M0xREL8NcxhT zU|fQzr%Qj8hf@)PiK6ffbdc7uiOF|N9j6}K=dVlw-^=9mER#>f5&~^>Un#!ZV$n3N z%CjXhmDK;K&+VK+x%D5fPdu|5S$o1xI898N{lm($_jrYGsh{#Ep|&I3*+@i~?{&Na#77gLW4(M6Bb2^l8656}jaQwcZm&QnL>Ew)kMasa z=wY(K4;vv%Rb*>*mXp~6zW1lgH{>eJzcG6;9)mSh(Lll_v@Y^_j4P{T`!Jpf-~)QE zEc$@yYwrt@p{f@!s;C@aNQoxvx|$!5zWEOOAVR9E#A`+qs73#*nVhB!1bF*BcJKVG z=TPZ`>JGXD_zC@QAy`vUfcO)SYrEi?j7um}OZ((#JB~EYP*bpg(RpJ0?>VKEjX@*5 zEzIHwUi@(CsLjub!8#eE?W@}W)wGsWu6Rf1d0=hBbl}Y?OqGD|`^b=c&*$X4vu(fW z)6Kbfbb&{$8qHbXPE|ZjDs=G{8VGm=gJM*59A3FKyH0MFIe&haVXuUEnpwv-B+F`2 zSPu?7R(g1Je4<+Q{s2G;)A7`(3cp_ev*&1<<*ll8kiN-0X``JqFuXh}a}8hPl7c0d z>>&OxR^?WQQ2|x*wvDL3PgDEA*MxqGFw$=L>ptp0s>yTu-gVFw7Zu^8b52%WI>WFtJj%r3$C}X9>;uVl1_+(JrMps5 zTY(zbjL9Gm+L|Sg{U(?%y`30nYk$@j4|lZ1>03XU%`!ryV8Ol0=&_m|5}ScFEk;m` ziXZby;+oRHKO{QQpYXC?j-)z+Ma2{3$=!&sf?*OQZCp0L+>93W(sRTF-({LDT4vr} zMs=}#RgqRge`Fn(1m!!78B-VR!9z)rCB5FT)!T=0@`LRlXYHE~WoUiqSqZA)Z(Jcm zg@P@BFfQjHf$Q zawq$SEVC*6TmfQ0n=y$(_6~sfl{TM4bp!>>kpTqu-U5$Ry;zHQzgzLuceb!UVIXXA zC@ZH_bM6)px-7ZD0e)70DdqisSb)?Av+N+2R`jDW*j94n6}9-=%6@H(h_K~NUpk5CEv zEhH+T)1VR@hoV@c(sqwEjC{YmJ!8bd4F|2~%DLmH{@C*2wB&YP5^&f~CE8K%Sz8bjz+>eT$ltJ?M)@kbC?Sr$ z_MS-{mu{CdYR1$>N_R<*OPGq3&t2|`-()Lt+-Q}R$pREGT8glGLdlMFP8SHbEZ)Uo z;->ZHB455sqd>2K@}pq0+3}L%Xch3Oky=S;#)v*Z_Y~beM{(3Oi6SbZo8A}XySL}} zuA(BW7;Y#O{|*I<-AbdG*Lkz_4uJlPhJ9GMl);jK&TV~W{=56LI( zlN27+q@jgh5s*ZUO~X+&=vaz(IA1dNPj2a_M^ZOu5hc?jDFgtO`8NWVTwR$L{tfqtRCOG!=h@uP1PRz8oTw9v9#(>deT?uno63F){;FcBE%wzGe$Ha=hRi z(fbHUKtlEfZC+PES z_$7`;lhe7e<$My2%71Zl@(#KUIS<_3dd;;mr7evd@aY?kqCdBAEpJFvA2#XOdVNmb z@PEcxP`mN9e+S`|3agP+&Ye;{%l`?ze3 z#Ec7fk-09N5GMNQ1w7rV*QYB#6DU#&QQE|OC8)V3rjHc3t?dD#0@pzn8D9UVNISm0 zJ*`^nXScxhox^Fd@V?YO;Z5c0N(?TUi8X>5g&!xkzj~Z=3{>aIqSSN$JwhOAgEQV& z!EM9A9LsDn$XO%T3O(f;Zh4`?&7{x*+mfIVjj>$(nRq|37jLQa6{lp%9~v+TS>3?T zV38H^KfT5MHX=5ZY>^Q!MB7(H4mElIIJNe$vz6TRNFeAud+9c3EM2Z}dLdH+l37$0 z#-Q_vkR9m)6`GP7^Q)NP6f}&Z7M{+iT3W8O>NHvh9Ry>asgtP)Q1mm%t1=|9HlQQJ zC&r18XDePZe^Qy>Y|7~}Nf`Zm7Lzf5A#OmgBvtfH^s++TQ9YV*|K?w^V~tH7o=VZ> zXgWsexbZyn@7DY8OQbyuy5B}!Kfnc=)^{6W@^0BHaM zjcR?+$g`KSbn(IT54PL!;QoYDm5}eowH2x7dhH%@_sgtV2KrOHZZMK?|JG1?tV5g; z-X#M(g=`p?0IKIpRJ?UrxrdzA*??&w{26tZ}O}ALDlnrh{< zr6fz%>6|u>i@Xp(OY)GFSp(%HDn3Q(P>hS8x7P7_q;$Xl_!F~XX@ z2S6dkE)F0QMCS$n%?dur^%H6Q@|Kj<;pufmRAxVT*Q*2=cw8fPW~YcJC}B+drY5{} zGGk?bRfgZzTP>%mAt9b$J{cqN(?z*9R_=+3bt+)lhxQyoy5U~4ifACpOH+86SorFk z_kX7?(rCT#m6y&&2 z4BGIbGMPnU?1{I9TSXjt7f5GWyBEDx#R-1s?My^6`6pY0DDvc9R+QPTacm46-lf`> z>8m<)+LN3%_9aM(_r5$;(BENX;m8!xXur?e9l&ri0dKmIh|DI7Ine+lK^JqwxH*gS zYjL9=K1+xOzW1-c@Q`GZlnr|xx; zLIf|GNUm?rUx0Ri9r_QUN2>sya?Wxt!t5Nw+GRWMX}ix&>0}>DNpR7armaJGll;eeBr2%=Dv zFOt(&usm$JqN=|M#VRS2F41D+cA6g%0Lq9tBXL(cU(*8yME1yEQ+b@O${cB2tdW`Vq#Vyo5|k4%-UYThzQ*r~GQiQQM&4t)|WrGgGyWNWF-w0J~5czgNV z@ml6Q8;}r2m=RSkc~oW(x(CkwtJ`-)vF50r;^?iPYEgtM z5qcCBVoEi`q8{p|fGBuYuog6bjN%X5_p4)JhO-*a<<6e4@f^;g9DB7i5mCbx;G!W- zT4{!3CV}PUN?k_wT_7jgL;7eKlE~YVAs$UqO&fn^qpe$mH`^qAf_);guhC*d65aK` zU+sese)cPG{?dVC4U^O)u;qC2n6O|oZ5ds{3fChnufbC_aqIPIHEA7{i5Av&?Ha~o zK4u3lT^=g`Y${1ra|Jv1J{_k&nTrF$`eSgE%jAmJ%$AU?cu3bTQ;SM?SP2{1Aj#~F zi~JJ@qzNo!kp$w(A(CwaMsxW)_<4Ea8PVWGiWL{%cl^Tj&yldf+M-BpQQp5*rE;9; zlTgOG6e={LQW$hs zgigg z(^ar)oi;@p+=&%cGKj*>@u!v!9N6o4HzdeJatBw;SwRB zUayu?*;~4-U&lmJ$w}DJv(|NGRJ&@%mfGft0q<%zs%{X5TUkyT#F%g`w~9eB1ZBZ`*>3_f3*>v zSSMq%Hi1C}!^sR7U#{?CG|vos{@*2ESViOAO;3~1WqX*%e1?nMsrX}T+;cdcObcy` ze6e4v&#OpoOXfy6DsmKhPoTI%*yE)#4H{HV?gLqk7Xk+PqV*Mvh(3^%!3K z6ZTAF%Q$%W67d&msGElN?`a3g%PX$%y$#>`<7Os)Cn;u*T z&-bzV;PxjVDP`xu^e?gCZm~$8ylW6M#4HHGRQ7r#UIVQ1xEan9E9GSPATAw&MCcXp z%aoxe>HG1pL50ssP1KL|H3D*Io}ayrFOSHmi@&AMVVEmuRm2+W4rNJ+QihzZ zo+5`@JhT4dnF;sRL7%l!rK(|+uqS%U$z#~|U}c z1D{Est%Wp5X%i8A@G@z&KjdBuM9(?QNqhL9OV_KysZV?q>Hgc>Zm~xpW6}7YC=Mn^ zlzHyJ!v315HjqA07RUQ>NG`ah#e^YO+6A##$A@X=P$YJ~^d!E3ArhsuZn>zQ>=pT& z+C07eSRewnY#$9Mj?pJ)A2)EQvX^zJaP$wc~PM2wHPlKe7B zgiv0i`{1TyW9Z=PtRq$jv}m(5wJ@fFBsb)kwQ^uf*5eaUJMd~ zgGV9RY)l8vFz5P=-<>93orI3|#^b|oHC^hEOHvp683_=h7-{Vv(iP6Yub7RV)=_dF zOga^cui1($=#(>6x8~KemW*3z;!C7$DJiQdZRY|cHW#2a2|MJ1oB)|qkSn@^_zRq3 zZ)Rd!`x3h|=_r8A;ZW855Jto1UTZTGkF06=ADTbq$5bka22fHmyPAATJ-Bp5kLN`& z4gGKq+3Z0Ikdw3!!PlZ0E^Jk7FO zEjSz4&5>1iE&r1X|G#qfDy=n@RLD@0fRZRI>l7xTBd06^wgGeOXdJv6HeGJ4whqo` z$ZN?Z~auS|3<9d-K_M42eh8X?1qV!y_Z@qESwIkP zgn$s7Z;$O{HVNKKp;fO$=l0!xq%&XPkz^)UEZ(EOk4VbY)d@Vkes!6O0~kI~3%K(T zrRd$!VWR5bR5;pwag}gy8Zi9=f6+wDiuBxn&L8&kO}eFpNs@j=3#vU$51YffuP(pd6g1Th1w0&fxPP9-cG%i87~VP8mUl~9zAix!!( z=%#c?o@hob&L!PnBrRi>r(`<)n&rJQ)t?^Zc^D$g50oXe#6n)q)7asO#qjcQcK6or_zz@J8DlZ>@E~u>x7=i7Uk1=X^wg_ zD)DBEdN=rx@QYTfmvJRl1%1ffxDUlXpS)a#ju|rxtcf%xUxX|()?ZRp+TiS+>IO!Lc zoo7E2ZPRFpmFJF$j{E1Qz>WbU0=(;A$O_wtrLh?9Yw=)hsu+Ry*$Ep)F&eb!{ih71M(9 zPa1t12p{2pAumwk2$WbbiK%=+vh7-ewe49T^C&?owjwdd%9M z0}kEvf}6!Ao#EAg%n`COKh>8eB;@Eh~D^RE7m%rI4SUe+_X|{uj|Q8QSFjas6by{hrSG%{9mLLqQTF&iM`CCdjpb|YG6j`LgD&to64`Iz~P*x6i*47&`T$QIEzzsSlV6P z>+y7ksA0EX^dD+=H47$XNTr-*%dr_5L`ewN0F($o$Oq9|G1|lLa?#bU&L#JfW9))n zg6NX-3rj-><_6a?S=ZXLSXtV7jDm;h5JO&k(O{*96&>1F3)p!p^l;*es@sg5wt0$$ z=g_q;uy0g-ONH~1(koxqOGU8}YyBW>?9C(FbWWXuQ`Oi@V6ruqws!Ll1==>y&?k$l zZ|ZTuS9DEE?vE1yR#6*B>74b_>Gq-4@qZ^Cbq7-U#F&Jp(kgv^=32#2eiCRr8~~X< z8x1HusZEJUEjkoJurkeMC)R_eklat~V5`kp5}NEmG=9UXoq#G0Z8>F)F-sWcN(8|S zLZ+Brj!k3T%@%Msb|~KV2#X>ku0#jc<4a3hr?7Nxp(_#-}h#}i9D^JXtyCus2Rntr626?ogbuqxl)JFAIqSJ zw?{$$j?D`0g6HI`Vs$a-yV85Uyz2L~f%V0q$D*^>)Vbi0M5dqQ@JT(KwB9x3^sXA_ z`ndhM#;#H>FZe0kh=Q%^iR0vwiaBhN^eF3eUe+`SpOFai7*r*4N6>DtU(PB)2Z*5> zJjVJPl6(lZ3s4nS4T4jk^%58J zUJfmxL)ay;&7O%SuX4iruf*CdqMAvq{&sCfc6ma}lCHy1MW z;#?I+*b(-;Iv*;>#zYixiB7$Q6d#HeDw;9MQtKfXjf;jrfbsD!zn5G6|*;d%vT6gc~ zKf4y%gd5^Q2d96^NPO!VpUgoMd;1o8L_77|u2Yp3&vtzM-x}fKh|yr0MWZPE;WPHi zLHxi>p3pv1g=_%(G5679WOs>@3FP%S^57`>q%9HSc+XcoVxIC}YobHky>ZHYn(#?v zDV+?r>O#UN&Q`O%ow%-*shNP&nGe00@~bW>0c?L5B9^(%Cg(iV#XXY)YQ=^(%rE>XBL%v>L7+)}X9&G@eUpL08-Ad7Qd7GauG{&#sJW z*Vfy>CwfVL(Cqc3?SQC^TCNk+m)an@KegB}$p}s-LDl%5hf zmYq^;pcc_yFg{K;qUMb}cUM?7bKZYOMNyv28g6E{zP`@wC_maC6cKkwV^z9c0YXmv z7&Tn~e~B!c1UafP{BFXDpPS(m^sSetHSIxZ&!|QFOH6_tiPz!jHOCU=gQFbN@>nh0 zr;=&(qyPb(=g}V2S$3rEbh8l0xvM?!WZ(1QrF~uEp85cljRJTd{ki*H?NQ52M`au- z5TMyU1fAii1^dVZu|_H=%*8@^`lO@p&ZgvLcE|XrTg#JI|z2#S2P18Oa zf&?c7_dw9#Zo?1)1Pc%#I1KLYK4^eogCsaikU)aFYj7Rh-3E8pGr6DN`#$$Mf52Jm ztkWO%mzmwNyQ;dos`|Pb=na%+^_$lY-)_>`$6vD+OftZsEgL!h{=<4vvUoaxIPNh0 zhhbE)Wm4M~Rhm3^K9=b>`+SzTX(+YA#t9V}iglu`E8TMSC4^4y65MymFksrrC{1lq z5P?Vm1Xm>e%O$&$$8)IwXL_fXW6^r4fgdnlaN3TNXYv5-cpRNS*O)o6&M>6%?5_y= zxUMk<|H|E#avY&+n*8M^(W1@O*szvI*8mg-_pjXVG3S z1tm!df!u9#?a)y{+gz+}6BjmYiCG13p!$UIUYI@BQgJFp ziTjB*?q9NEk#Jx+2MWfm?o5XAu9>RlWZFb=n$OJ?PT5SF`?QN$PyZPDugEmIlgiM~LP>7_RBRwH(=sC7#H2tQbR7T?`MTF~swT%|jD<*Wyo9F80-wK5r z2P>SzULp{%8>{?+-)hjSL4?w1^T-C+j1rlOY8k#9BlY=guNFq|Fgwas~7qiE?tq&YG`#A@F zrvj8ly!sKkpcOoCJGOhVjTZmO>RiMS|L{{+V&NIXe6?vpE}I7#)fCERYPIO=lNZ?Y zz~Io%S`2SNi?^>f?4(;h3K=k13ceNuvbRdCZ1IUn=Zya37us^1S(DgbK@a9**tC~! zSu7heDoLwD3JDCp@YM;--BI7ziJA268pi4Kt&O>3<*nc|tCpR$&6>DeBqqef+X+Ez zvpaYx*`5PT1I#(6d6~pqp_eo@3=JGtv8@s15nwmDJGhKc1I#Uu)DOaH1 zg!Y81(m-mmY^`{D(L}kgChblLfVa=H5f}S0dyy2U=6IVSb>s`B3p3Wb>7S8|Rz|Ia zOrseqgn#f-I=QcmV-dwvCG%v-3)a!vUQY95n^zlot?%1hF#4$0-E@tG8?MGAo zb$0c|a&wl`Cny{tzIvqDyZ9ZN{R_dzHEclyWoLIO-oB*76~VPL7-K`p&>FL6elCNL zira$>fo2F(eu>vY(orJ{J*hCRsQKJNkIukuc6{(ar)yJwRb78NrYaxJ&OiQr>*Ax! zw49H0U~M^Q49B?;}*a7Gd$?tGZJ^+{d25p#ulWvZx{b7ICHM-(mC_ zXQTIOKy9GK_fnNVSqFkbU-53N;9}s#r`Qh5fHzMc()Q-lHQtX{^LaYDw*8pTS`0q{ zkVL-Q6FKuJ*Ky%)F>BFr5=}chIn(TvkZmLzx8`Fmj7Ma@oX#s}p3L4b%tp0$Lk$>mEX;r*%P5IGbGE){1mgZ#Q%~D3IR1zroMY zNJt3mSacp3xZvOO3Wk@4&Ct=|)cjgZ*Mya%46;_NM0LycbLL8(ClE{IOWv(=53xpj zB$TB^r93>=HZ3UbaHi^&y@MI>Hn*(J?nN~gNXD7*#Svzgs9v`6y?+Mm$FM@7?m(mO zO1zoG;nn1Ez_vsMa-;5Ru-SKiEMa0(?QJFozvRZGZ2DyM{_D)fw=|-@hAt7Cv3~x* zr80fa`o?)@zoa-*a_|bFX5c%Wz$#)Zc~|XtII0)fkHu%pcssR}2qaWvZVNBB9>BNr z4q`e&K4v+R>7s5;y}=c4idQ%gOO-fIZym#E`FKzH;u z*h9bJ{)uw{6lHVhg!Ezd^W`w>jWxYa(<^UiUVl$tloK(YK%GTFEB1A7ae7(x)y~_H z&*+-X^R{)9GjApG6E7H7oV^1`JVU5RwC(z~YT1}f^1hl$ONpXyci~3A8oBcO{Tcn| zC;5e*1uNIM*uf*MAx5@%Dg`?g;^#JH-U@Mp$k=C6PIV{VL3bvETr7LeMjIe~ARD9pk>C z$@tLqF+B2+>^9a~+dR!dj?#JsN)F+yQHQlJxGfZ;c3#*Nsb?4IH#huP9&IAlt8zA=r4h)ANbxwz^1c_x{TAcgH&PCfFCL`0%nc>aHf!()Gw0!5aZ|sB`pp}T{?X)IGjYg$ymCj z&Rihn#JcHya`IA^G)CaVshB$ROAh(2Didfhu^v5Z%{sTsveD@FdLxJ8^ru5&+n&;T zN(ZlM^iheho|rWuY^z(U`>g;8n=WmWKhAzVn@pUa=4nrVDa=MAa&uaK{g9t6&KvB3 z>*+_;gF56dNUKxR&yhvSZ^zL)Ykg_@&(KBL3!dGVxrcyN931|&-8>5WiF+|Jr>c=y z#7B>c&hT!&!QR6v8OCd$k4m$saSvwF`Vzqu7QrMbEgBp0e(d0F!yab!FIfvzL6<&h zO?>9aqhL9Oc=h&tI8ke3b#l$XcHD{+0Ugd5CLwVaj<(UPdE#L8^_(i%uc)?9J?WyD z02Fx&S5SqHCZKWB*V#rZFSH-YuC=k{5Sor>(#tb6sn|VGZzlRooZ!9+Z>XeAEvPN& zTIs<;`TY($CBw^8Sk3o?8~kgE42k7RxC53Q|DN+eo;>qMIeSdT+4RiiEsl(nCnf)$ zvmxPUcBOLS+ox3*0R|WT-K~`O5B{f1Psg$>JJo5v#~ZZDH0e$v@*QA+Bn6ljR6xlR8hdlDyhQYCvKL6}oMJ=wC;)oJ36 zYQMZmi^^LwJ_>X6A`Mr%Js_Y;-`^{%?8<-k{P-@H;+-O4Lg8 zpVrmSuMQ`%nd8!k?Ne^JsmFk zxqCU3Br?L)%eod9wCcXpP>|G4xdCIMlr9$7o%p0Iw*Pq4!9|TLsgfQ(1babOjXK z5|IUH1&tMVK7)Vyi+C0u>caMp!Ut8ofE`933B6Dx!r?DGhuQy7FZ?I zb)84det~u5-~`c|6Y^i*5UnTp4MW8@SGw0HsPK48OSxuR;lyg|TlQF+G$nXx!N3@$@y;F(@SMe-0( zmqHK&s}mAiAI)LK<={0h0e_v+N7D;A~=|I{VdKS-(Cf--ZN>OEjssKH5adQ5%nv>d*Chb8FT2bi^ zjnPzzRwjc|m$hCcLmrPq5NNk%?nxRR!75BkH{pTXplaHbd?YjZD|hF_J6MVv{C=+wXh2Cwn1JzVe8flt^J5kc_Jb{p$o zhCHS9$&-W6G$m9dN7=d`orl+SB@Nru`8TUp_UYium#pBcrY#iN_|O7v*H4dQ9r=R% z6F8M}eK}0LkDGFRj|H@B?AM_$^dY^q^jWkRY+1Y^!=F6Smi^Xk05=#2SZ*&Iz^w;wv=83^DHu3bwWp z1jdmH>D5u8euuV2F8&^Ai_!dZ#ruV3-_^Si;NGy97-7-Y+!BN8%pTTaG68#hhvorG zjbrcy3qdVxyf7ko$Hg8O+b0RnvM~eU~wP7#? zwBRV*Y7B_^Jy(cwfOOEtcN4t1wAj0bhau3V0es_1_mm6!?Vkx`F;7H zE58-YTZ}B5A1oT2Jr2J*!V&Xs&xUQbZa7MOXaFaDH_$(8a}#p*0YHiLXu$pLUl<^a zu`pq)hU-GAL)%u^q0f<{zeNh-ZgXfM%4FC=v%^goZ;#_qI+KNqdF7j!%!QTI2KLYf!Jf7GE))a=JL%;2*PtNfy^n6$G z)!Goor`}|@bYy^Nn$l1HA(+h=F zKS-{ORAO>of%(>R z#$jHB@ArwKl$BESIzNta6b|emZY-65qpu1QxESX6DY|ui96Ejm@w#vH8>T@GsR_OL zwsVs#$KB>C(^NTTVe>8SOXBMYOB@~3ajg3@rlE#~VVrtVJd7j=r6(OL%NTv|5cihh6=i+M6+4!wmen1$mp22*AJ{kTb_fl@Mr zGewE_&!m~V2&-)jTGUv0j9`dy(&`YujfLg}6V2I4E2Yb^mDFzAceT^Bpn)_}8+%f; zPcW=45(f3Lc86kK9lJU~;f*T5`t7*?8U#j%Ya%o9k=Y_?;bYde{fGmOmhNul5?)`b z;>XYBx7ghzka}Ow?p6>(XJC7w%V{C2y}izc3Z!~8cGVjqF&~GPIGI89x=rtMdK8WG z=?VUmXZp+d_3s4T@M*nGzXA}nv5>XM{(OfCcR-sPzweJi6vglC75sHwPU^$H?&rm^O+!p2Ko>@+R})F)9)!rm8kF*b~KRNLj>9 zYM;RFP$s-Pg3HFhb)!p3;$LsV_SWf|s$%J_sDtvcZoVlA5xfgS(}&>k!4=XgcwLh< zY(ZE_wk|g$w3t;ZVr*v?d~pF(D1Hf@#iMB)34MtjF2XO`IK2`hR0tszPe0jFX){78 zp;S|1(u0#7i0%~~7L_kWTP^Fw`?C}2NG%Ox$;s@UpIyy@MS3!qKV|rJcRYc6CH7_S zBTly>`m~3>>l&9e%Gp?buLh^Zz|nDL$|QZU0M+64B`Q%I191gv~sZ1SeSEbI5eaG>8~*NR^!eP)mvVFz=395gd1G3>C)y7 z8~TMUp1EIp zoOQ;Uf6dINIciVp*?AW+5RW?nw903iR#p=z9vzKt#4p*{b;k+$6P)_Wc~SIbycb_; z@T*U9{id}6@!WF}o6uscw!6REY1=}Tfmwq+gxBxm7_6=Jw4HoGE|j-bKC})H%B#Cj zqhBL?irIf5#Ey%5oEMl*=-LdUvyhbFBzhz!d6VhEecV%|yLslIO({?(Gxh0@%7L(8 zj~mBhTz2uh7U@NuZqoGfs)}z17wS`qiJ+*Hu}{Hxwj+&ZRcpko$x4NKZ-5MR4L}s^Wa#iaO6FpkoY`0FTc8qz@nVWnz zNq3wYRWH(N%p><()F$MB)^3@pnp0_u5yk)Ol}lEQ<;~%Fhcl@7!b-;Xbg;{x)n#LfYnB=p$b`GsQFG9*6oQ9G+Sa@K*qp-nGhP|1 z_vA?8}rXO#eX(CC271;x#q6iNf!qIwmj~jPF$>> z_7vzmsiwh)yJGJCcR(8OHSU$5H=8t4pmxVKi^rkn%uSU0weXOP@~kMC%jqDG!*gxg4KIh}7C582yzoyPQ8_PDd>|j4B~r#uqMF*8*~MAIW{Nme!#m{Xl|y6Kv`iK-PxsR(Sf=2hqU?IDr3V1b59-KI z2SB&lk4#kdk6fduosmi4kRu+AgzF(rFHh&`ktgXDllj_56^&r1N?}*gJ@%a?w7FS~ zIvVSg{4G|u&1#QdVy(t(lTTPiG}3^R?3}Bm`{5_;o|6knMjwc$>x82kTelkzM<jv}WU=cwY2v7SJBurFp9)X`b%)KQ1q!55up?2so7 zuV!X$&d^2J|B6j<7-UHO(vfW2#vEN7d$~*Q4zU& zEt^)trqi;jJ=llUw#0oW-+oly(vE^a$Q=$fEP+-$ZID5jaK;(iZfUK?Mr^($Gj{7Y zO4FP#*DjW8anh5O82WZB9H4gXVH7o4OA#1w`jW6Fd4X(BuDU=Efj3J_QP-0Nu+qjDaGj-<8M4esv{v^i-dU)bAiNa_J-O@Q z&qw`8ZdueWG_Y&kmNArEJB19~_gPS+7@K>eM3pl!!prcq?lg3yB%jBz-aHhLQPUGl z)N^jt-oYgNq|qj>%azG+&bCN8B@+4rE}OTa$?t}`j(M7d)5K*+C1>naM)cB?9}Xp= zM8uUE$?lgYv#XP{RP{UO;;)Cp9faZYd~UHc(aT2m2M{BR63XIvauTbTTow8BlznY)<9nr6?8KG?0|yZ+&R ziIW3^$?psT5=ot4s-@eDya) zOJug5tSHG$#JI@72ngEcoZmQJLUh`~Tc&!>`tQt4j(Hmj=Mhp3U%JGkSPW6pS{Y}e zuywf##;DwmC^QUn7Frv&{y-cWB0<8-#dbv4#BX;T%0&NST5nKZKHznSP4Os|o#Rsm z`(g2wmVH>r@XBm>x5!)$Rn2LT$gW94S`k#Su9w!~`7zzv5@t^0QAK%W-=S*S&(1hL zJImq<@R1+*4*EtQji0d@XKDUm1k&7EA=)b#z@7ZMYtME}f5)*_POIEc!yDr(Dy&VD z>bVXcr)>Gu2-SDnkC?W55i7p^n?>1wjDwPY@aBq=R+P_KR40vm74y|K`a`T}@nzMz zO9}twmHl=MR-WR!bqmvs*!DL3Fkpte?0A*dL04{;@+Ul_x&iNmQ;8`H7hwjhrX-qy zZY@WA&`eHaQ{QDU=9BLSk2Q2nG`4*F17><$Q7PQSY4}8UkGYACpzv8kR@+K8hmnY( zmEHG?N!%Qzk89e`2_yw_n?IMA>zmwvyUgA22B{%+OJB=D>cmX_{Jvg*>t;BH4cCZI z#u$TnR=|F87UXgH$Rfc4&4Ds=I~5h9pQeGclTl|@av;g*_Q*!51ALpkHv*w#3MtAP z&7D9i>=|?t`rNo4OVsC@PGZF@=-ft;Rf482eqXD95;spUol*obc9_3GV`XS0P;dpV zy6W(-w)+x!;>b3I7HzMqIn9Fdc2UpA!(U%wcn`28p}M50+-m9#z1`yfj6y0-dx%R` z1kob9AT`IgnpyD?BZrkY#qqMT&t9Y-R?(GaapRIyV^lIECMM13S(ksi=>!K`v<=+4 zCfaCN;8aJ3bLLkuF%X4zdwT8!)bSd7nWYRFb*qxU^jb-IW+F=89I<&J92#(JtmLi6 zQ;d8pSuvQi%s&_zlI9fo^E>qe{p*c+a4<wy|YwOD_>K==xqk3+?lR*__9+*&973`8hTZzxWl{a1SUXTGJB8_;3NEm(fn+ z1QIqqb@dm?i$<=sT%9K``Xq=W zs2G96S)Z!;QC{J|7(sXJ;nmrH8!!o$7cD6(=Gfe#N}NNz(($`Dk`>o?{l23Y*-LuM zszwg@=q%w0T+jQ(@@$=~!Sw?g8EFymgnICD3tc^> z3qO0xB-?XNN)Fkt^M2k61GdE%+GM7#zUX#ecw^?I>4WhV@RqvrD5^eS?4`);Q*D%mbaw2)Qz$*>Y^X-#Q|Htsv&j4#}8| zT_l2v>dT08(H{=vViUhtFt5&*)=$ayb@thYliZ z`c%y$HKyUXtcUl6V!@nB;gu;~ww*ZacyHk2Cdol{xH$5yJ#s8<1@Hn*=>(ewg}W zhTUXmL!CmEL=D|Yk1q>gO6Fu}ks3|~p_f~qRv1pnh4;3z402O3K z>4Mm~v&m`cQdczA4_5}i$)uuAfD2FQG#n7;tOjIU@GsRjc1-PJ4s-ywq^E&6pCS5etGT#&#E`L!1uMh*#^ed!+g!qU~-GNgMZ&EZ^fa-#Yb z^tnVjfbqP;rwEm@A%E@Ro5=~ocn*p5u>zUgF43YXT8~tY4EMof=_E6Mun|8fDyewH zoeXb`{ey=?aw3k`?1*8zr>9X6iG`{5`&Oz~chtjnBl<@t(8nvgr3Ma`dtA^$_HyKo z6fR8c$Jg6?>rZ8mMv_ET7!8slR__F>{HJ{H1`@F_PEM&3$Ibp+V0bX`nr|Anjdlx_ z0m137YDKCllHJ$Vb;EEzxK8tc?^^Ag;I*h<6s`Vn3xa{vAzn%pDwyO8u&$^ec(2o_xe47hD}4TK$;d#$9G~E00$^ z7RN19fBJ)P+A8TE+%$qY77v0le9NgQ+DP#zLQsrk!5#sYCZhO%x zJ$5yV>5W|j9~?+s>vC@wD#ALx?{e>ZC9@J}fh|S#=ciAU&R&R(aAkka<*R!x<5=yr zALeHH(mseBv240h;=B9oO|5{lH)}$Sif@deB%3!tVw*SjWy)g|Wp{;03K`FIgS(8< zYng2l@q2)A76yZY?P$~w7a?Vp-Q7*TClnmikx^%5pa6^ZQY>u#FUxrtJl?+<(0v_Q zU4sO`4#Q&^K@~dZ)k|fbeMk2QNes^k=X3VNjZ)-7*b7lQ#GP8*iNV4x>E(U2_4_VY zBiY2RPT)1M;plg>kqhlhkt&VDGX2kk2DXx1TAY7wXpJaGtx=^82Pn*52>gkBfnD={ zY?U~t$y`I=^H4V954d*Fv)wtB#7AT5jPURy3X8q~oHB5l?e=;X@~tSUsF$>gAZN{& zj$0%N(u6#2rw#NXL`Jx$-HRl5Ilb;dg|BM??$;LVgH70_eo023j*XUi3Vr7ek`pbC z4-}|*r;uBvCosrv^yhMf8_(jKihi#a9)vjwiFe`|$J5g*0-iViiZu$tZuN(P;?I^M zl$Y3I7TH{dY9$EV>v)f~IY{PP6RuHwn|-HHJ&W<}T8Em4acw+YA{w%6iKhwIeS?@8 zm3YTS-kbtHI;0%V(>g72Vb9HQWf9erOe52**KQZ>zjV{wwu3M9vG#9C8$5Lz>AfTE zp|_H!WJmk81uJ!%>bYepUPS{TY=L^2?zaujm@k~Q(dE>`co^BSdG|N9m9FzFSX6_;2?npcF zP~g5HXx78EXT>YAP0+Ml-`DqSZT4~R?BY4(a%9B;jtg>ej0|f{ZgiK8k82o?$07emgQ%S^+*(*Iy!5MU z^)@2NCO8L)IRs?y#s73sKb^HXx0Y4C>QFU4F;*DxIJc==7tZeGj==5?q6s@s@&!h* zx_gl45e>F@X9}VO(qd9&^p4@-;Md;kkHG$+GU_E$*kux0XReHIdYk;27dNlxvmz?m ztx?#JYqZaS-CqsM2)h&-cWJcqGapjEBy!-Iak(x2Xot?WReeU~Qa1boP7r3d z#&O?e+Y*N->w2*HNg_1QopG)Oh`R$5mFu z#uhy9q4(}pF8$(YWbtVQcDL4jZY0!IE@@>`rvJ>zs-Ickyy`+8tC=WDyNbSC;yr8d zl!rf=R#klXSUB^+sMijI_Uq1uST@VJ3s7|~ZKoI=XtVZ?xBEHLFjj8^i}sah%O0wy zk^Ee6y8>rhU^GT$GS}Y8X`uyec)3K>a_D7kksfLFPSc#ylh~jptm6hkFsiNmYIDK;8OTsF1pg`ZJ(hIz-zcBvgn za&7t$RrDlJ;IBLiqEyI~nTYtRQ_AB)Gfno=9c|gWiJbE}CtWnH-=@id^i9-@sgA2P znz92>8SRvAw*u`0#Q)6a<(1Z2W9j#9FVB)_67~{1&aShQ1Ob-gB;6_FVwr`n;iAVC zmG^3a#CKnusEvs6TW+?WOf1>r*m$R$lEi#p{-pgFd z$iwlWu{GD19;&Aw5Y*PEZ;p#{2x?}n9AX6-)YZ<;w}i*7yHyw99qo-xi_Hq+yK94- zIiPpy(;L!#kE0k*2L?~?)IZ-}HrL9nldcqM=(NDOx7l=>aJ=>11*d)&V47e%5st~L zA@&belqSUw)*=Bn0Qhv&mOB|H-}70?(1)jCQ=awVDUpF-U<-VGVOrPD zFR7t~BzP9WgkWlJW5T<-&R_QL?7_Pd0*dD|V-Y(4mq_{kp85FI4ZgOC)eTEImbP5t zFRL;wx}!!J`TEHgrb2R<`9@d|p;=&gXj&f%?8?%DY)Y$$pOeayUr|zBT@SQ;9ER`+ zK0PG)ZyEq^lQ}a1o8~+}iwauXG5_00Z`y~zq+UobX|Mypw@~s(!A+kR<}zGbGCS|D zv~qWzCPx1Ke{uol?6z*wC2AE)7a0|e*SOlw003AAc_n)7ZfZe6G8}b$Hfpc$TV$b4 z=044_IPft<9JsK)vJtX(B4v~m-V5bLBdA87E5uHs5;cO_#>S%IE|Y@Ea^xcZK|u`f zg?(LWi0s|Fh!;a2wh)FvlvCiSn(N$8m$M}M_C-z}Ho-&R@pIY(G*aV#VhjtXn&W?q zw%{G+C=n6@uAV%)02h7!bTGkYe86p$9VR6u4!?G9FG_O4d+kgpK{fBK)!aoxC8$?r zxPvE{xR&RIH2*3EqHILx{3T{(HNn1Q0A&y(Vu+hWn$v=mndzrp>-m7i*|qbv_{vCy z8V${z8eLZ%c3;>iA5Owe)ZR%*M*=Uk*gSk(26FRPPA&bRC#L+$g7Vo#+*%eG z0I1MkE#Ax!ZCse2`;qaJl;K~dp*9A=Ans*HC%&?D$ryN~GL2)VezI|cWRaAFh$ow$ z-KX`Od)r}W!tv3N7B#vpt=eAT9iE6k?D{SFJ0^h}qaSMQN&dq3|EFw3|NJW?6w6mL zdSs(YY6w@^86f{VBSK1?#He{whJFV5>!w*Z-nhul@GV$BN=M_#!(j{0CYG`0tdQp5;FN zs}Y30XdsO4e}6}q!&JoE|0?y92ujZX%1?N2aUSB^{j1pXBRcx8;#`QxA^2AXak`2K za{GUk1#5&T{j2$e5a2IhZ#Rfmv< zk}NCmu5ptJp=Te(0BJDIb*GcmZrA^_GFl|}l1J>P^yi#jCS-p%i}YV*{r+z=^vsBC zr6@<`$kIRs9`mmz=>C5)bN?^MFTygulLB|AqD5Gr2-gRVv(}~j;S0+SX}CEVFnwl& z$XJN{k<|VJrx-n5|9&W$rd991^KcrB(4V;Je0$>dDNkGj%C=VVZh)9}h(Q8n|10qR z;DGQkyDM*MH4vZ`=283l`1SeZq5A*I(+2TBi-od!M;B=f=jtf!R)U|WGd7cEGL-GO zVnsC7zQNLNi?WIUy;)j;6UMF+_+RDjEk+2c6bPCf()?zS3AU2cQ|5sJpXM6KDgfV? zVf`q{dRCI3Y>^+?Hb9nI5p+dKkF){~3i?+Cc>mZ{Xj3Nu7`iO}$M=#l6DkRMtTM9> zDCmxYb2g|KS*4;8T=P*#lDd8O`0twwJmke65KR|^?)7CWbfnE1e=HT08)wr=%h2Zp zdYz~x$^P#UrNGbrn#SAD{CY|&+s@n|1l+;|!>vN)^I`jyd$g@CIuwHc)x|9dic5OX!lMMA4 zhEuHZ2?jT9Rb7R33hYaVm+pT29zU|}$xjh6`_Q&Z&(`D<4x3+CyA4^E&#l>z3^S-( z{#gPaiFfm|IBW{g-Y$=6N%k&hKZoAG`bXw|1?GW#K~qWW4ex2<4e=e_)V6aYS6Fel zr^Y6Rj0cOYT1c7peVfo*x+}Y$n&o>nrXyjT^vBkzm<7U${6@aK>-WyxMYxvt#730d zmN&WOSCfieoM&6vSytq`Jo!z+7QhTAeY8Qv_wik>b4V?fLg4*_O3#yp2_ggn@%e4& z-T+zBG(U)PeAzX(MaB@j^pT_CW_Q^dm_up6LjkEz7yO8UK5v(;5jp76iJSgVe@Nyp zf;V9%!4HikVMuuUqm$_?KU^?HsK`7edkw@fgXyzz7|71hr;{AebW-8U#IQlwu( z5V1V$spPII1PsDlP@k(3D3Oq8g0yWF0DT>U@KKDAy|*YMPHo8MaNafU=HpBr-0QM} zkw%?48ohsc-PzLNUk*r($V*jAa>;WUC@S*|RHnSUZn@hHT&sR+P(+@XaFgRsi&M@Y zJdN;L{IqJ)6#g3Bk^yM6UDn;CQ=gie_nY}Feb)0mXktPo0z>i@p0E7rNz{%frqOdb z`3=$c^M(WI)<7cxSjq;!G!72#p6}S;8*~KJ?+4%jOF<|mGAIN~ZEYlnNW}N*B7VwD zNll3*GCi%{MCaC2`&~Rr9Z5AwtJ7jCS}NwQuym%2SPApnNE(?CrvL}md+*_ku8_W= zbjBETQK~;PcLg6d25hVHep~emD{`Bu3Yk|!E&yXAYsqSgCCQsaN$MZ12)t8dNbd&; zO7ot-J>5y(8Dgk>WiEk7AR?00Rq1O%P}=218)iN1i)e`@d5E>;WVn+wio_`B8{es= zG%l>U4wp!H(zYJ}OW7ilo@LuIqME{T0>7iYk;;O=*<#SfCq@Vd5!r3y!d@U1zE39Y z-kzDtq+wEV5C`^QK2o*u3H~nI58N4QIuIV&V*q3Zc{!D&@GiT{KwIS_BcZh1<`+H* zsTE;knBm0vUy|4job!B(MPCix7MA)x3VZ9`RyXqCN;DX6C&Dem_SGSr%G3WqG%!#5 z^ZQa8#_@>C!c;j_Cm54G``csbNV@y7*=f#0?X!r;1xI!5_>qyX*dTqi7}mV%)o$(k zwlOT6l+wEyDDq!BWPU<<&ZBkMHq2woVWe}C&Dv4m%{@F7xtC!H(Vj(CDjWIGD+lwHy&9ne<*~ z18r;E;3NdYgn^Al4i%l%RArT!CyyD@D;JX|qZ1QdA?DuQlYJ}@YH|3=dFp`nP|+n9 z=zX#~nSBwjj<)$vg7S3l84^+DjO9;C!@^?1=0_*AJ#az2L5kD49#4ehyXSODV1QL2 ziU(q}R`yPRlTff{5P?X!znu!h&HsqmCft-O+0ZoPXw_>jo(sM4L_=88!5EejTu*1p zGJooEGWXeq!_gBeByo*!T}j^j;T}DjeV2Pnhy@7&E)$W-e3#iC8zRbmxX8g8oZQ=N z9j+@OX5{VEUkPbl+2FBg@rD?hMHee6mNu-NN9F?_HZV5lsP>1EH5zGcRKAD=^-vJ*GW5&Ac|PUNbC1C8;*?eZ$GL!aSki zH$+m4km%<6`Zp#){Uxx?SJ8QWy2hEqLdC+mSuroOCNpUa%wFbwjOOYMiOucx@HgXoFOI8`QUypAOmt zTui6bPJ7KWo{!VGC)V?)zDi}8ws-+}*vBAIV-`G2h%!#Mki9>eXQD`5OZE0{+gatN z@dI~PuCthR7=(hRdTdHp@eW-qio0G=;QzMy(Z|wlp?7fPGINRG z?}?x(ix}3R33s6e16J1);>EWdwZM0TPHUs~UbY-Jwm8V{A~6ROY;OY|s&_HXg~P&7 zpPDpJe9b+NOIDS3SIN+yBJTS03i?XT^)eK1dUzq?3)i*~z$17uP7#ZPM#(ds0vKb0 zrhxP`P@%qZ!Hd}6Y$PJi%v;ih1kY^tVWnurSJ`)e5EloocH^i9D)_{$$1HW~>)l&# zyN4or2Qy0wytc#8%t2ermuecnNNcem>6 z7lfKSX=X(C!=eUb`>@Nwu$6nFr{iVN>69p=*xS<$2YCR{(aZOE8r0Xya}|@prdQT< z|1Av$I)Npq4P%OAscp4aqM>3JwO)H8(TCf1WnTprDnF4x!M!(L$Kk^+8O;zpD+Re# z$n7(U!=Ymi;O34c6Cf9|aq(;{_~4m8+V|#=Y`!Nqlhe z_HY3eo8&db*r)lPuYmUBuO6aJV`P2fHv_bfVj{1S(w7xeLgHTweu{pj^s^6?P4Ej9 zTmR?UFXnuSiH7@?nd+uWYK8ns8;p!4fBj@+Z24eMx~!Y}%^ zKXw?WNh}tcTFvUBvsq!5Tcqhw%`<(B zGQ19a%IWp}pv9C=^r2Odisx=w5JHdn3@2Z~EHzelxkTQXw?+xp$g( zz2xGkW&bian*7&~Vj=!L#*SU{Nys3B;$C&4!$Kqf6WoUA*?w+34{o-|o<>HcObOzb z#XTP#hU@G1Pwiih=rV@9mrmu-=*St1iOY-8cjj`%kxcrf9TjC>^r2R-)dx0Y*u^%D z?)Yhtd{QH9ri`nQ2wT5Hg+VoU<2TG3VCV=Q?zAo&^Oz}*sIryVNs8IQjzdAo;TWH% z&1rZ|NR(?{W-#*U=*tw%oY`(7yKar`Y=zmt6?90fwf18j@a%e4{f|F{aF~}%eS~mx zxn!08$GdXNPlego~)~OxcZONWgBvtZNOVr0Nc|lw05(x zL*sa^V$&x`>XjM3QRWX;@{-jbtl~eQWoEYN`WbHlVgb#!-E;RPFC?%*;Z0AGGg?i# zX7+TrkpkZieSE454J|3~KdKoW+JQK03Z@esf}fGa{R9rE_uCe~EJQ7mPYK%l^hurV zksMbDLp$iB-;@n>mEN=>KxmYo-{-nTwvT5}>YHY|U`tT$bzBafw1Zi5NtQh)pkk0*~{N(crbTXf7iB3z@KUQPb z(>tFpf2m*1?tQi-R{SV^Lr{|RoYe9|oU={XVh`;s2&+0*jn~;VS~*M?DSFLZ(}wgp0B#X{6&=T zAxC!hGxs-aT4U$9IEANW5q$#&Wi9nCVs85s3>QvTtNkKn{Q^>>}NF?j&MxgvM|F#YjD#;-PQa_D(t`TH&WOIz;eTlIKf6>ev=*4kU?3b|* z0`5X3x>~99DW$Qy@odMWY{q)}IeDzTrV_i+TGHGE*16^x8Sz6(5*<^MrNXF0?__vw zBcfkT(i;-zhXi9}X01#rQc8EIK8@jS|BospvM*ywOVsjF_N=r`d4ATIbt1(6qG*&C z0D#F6$R%QFb*JlNn%Wv!3osEy*C72AU&~ge!&i;RkYg0MIPkC75mlF{bW4vIvNArV z!4$#UdrKrK0bjPci}9zq4=^97wY@QVll!`tZmSPdm%p5kK>q>hA%q=Sgx|B`g}7gc zs!JRkd1KF}j;zBH$*Sh}<29x_lDf3UV)VO9OA;XPOOKBsY3$F>+0JsHAnhnz(=)-D(xdW}K;N;{5F5*Y!X7-isno3GGT zTM989LY{+xb3WDd7D z7&-H4J6WSyzIt1=#+C9+nZ=XkhmPS?S-;5tMbtNNX%a2V&NQa&{@S)}P209@+qP}n zwr$(Crft4H>z;exKd4=+Dr0A4M#MWQLXzr$m2&ASg>pQ;pd+oTm3ymKM0HxmHJ@Op zW1s$&op)NVAKy`-XvcwkEz)9>;9)!$>Y@A>C#D6_K4l-}4jY5dbyW&pe550GtYG}2 z>A_{A+gtun!2n|MxG}>XC1s*EFTF&fWtKk;LIwNSOSqWLV27W-O{440;qUhjz`C?A zdSt7o)9}`muTagP{NfyQhH{@E%nH!~<}zhenY-_D_5F)m825<5>(NZww8=v!aZ5sI zxq;ap|uB6R5M^Jf1+$PAomv*$!I zE(yPQ=b}kmW$XQ)%{Ok#sr}xqBBK?vRc6Tc&(A1U#Uv@*YGDMNtKKnK@puB8VOw_J z3j7CS%#vjr#43MR!r(HBL@2T>j{2*9k&p~kfxmzLu0Q;GZbU((tuy^%DabH_ z6^6-IN)^Qy$61E_opu_C1>(rfE)Ql-s7Mr0yvI3E3ej>QCD{w{lsBi5`V|R$n)G!b zVN`GF73k8)YWwVYD=m+1{tvr^gZ`J%RNRN zAwe_g_y9$wSxL#3NR4WRDrB=nYRKm3KV$?##rSrIb?TL=GN`U{;_xSih`d7dP^Z#i z_D%c+Y6YsZwu{hbv}(`omB*CEWqg*%raAa%fjVpb3dX_9!L)ht%i0ZFyRg}X*dfDV`)+;o!52Ac_rGRU zXrRy?l6CUM+m-Up)SEgT46&nJBpOjS?>pd_tz9r34({l>@U&nA!=K zBOv@v=v9ka-J`Ql4ArEOp$Zq}vIv?n6(*$?0c2|8=_7|}cryz1x)2ixj5_lZ8OW26 zQ{@5!%wN7ZYMUc_Mxt>DZa97JQ70m0??qyFLHpPSL6?&DRHHBV}yv=mJUE>l+am-HlUhyZK1XID~>)iZ|4_yz4 z75?HQ#@3bLZGvZz*In0TQ;#Zv4N=FrQz}=`EL_mIIOOP=BTyadrcX}!@}Uq8D|Fsw z&Mo#qAzwC)cpmSmvJ}NbtzU;?CS#)JNpdY8&;;BA42T$#z}wha(WHNOs1_^QmZC6W z5r=S*$ar=tjWQ_4MhDA@-kv!0UqBRy1>%a&uZ3JQCYXAS_bhjgbIT}Y*B5xvHavMv)X!W)GdQztrH zGsZOrC0jE5-(3K=@KNHueuHHlBqBhhI!udN(LP#h`x^{vBxJd0%I*aj3~q$fhSs*@4w*cLv;pkdKpvT-mSB9~OgWK}^UlyR2zb_B`c;*Czu_%>n> z>`C={XC(9sWXfddaT}P*V9nf$cJp90ND}2&q!ULQBKemvlG*tZSTcf*!&)U}84E-Z zC3?)A93ZisrFD0lKk;wT`*_Axx~e(& zRA1$l+|Rm2K?&UJX@LOj#vho+9T3{MAi)Hv;FW8)*Q@B~6ocW5KqJUD^8mU0=ARH* z5ubqtk&us5MWdNz)`Sf=|Im_`1C8n#8n^p9ZWpASFD{~SqBe7VMQ5@`X?Cx-P~6Ye zV^zvo2B5*q;H$r%tTfwNckW7t$~=d=f4@YsKeeJljH2>M*k83}&--EE-rhE7D-_OH>y4;82Ar<v^r}@cq^qO%Wtwn%+93~J-TV>Qgc_DQ&;1Q@opLTmp{dC_}>b(;b z$WRFH&C|2;W5_`|*~=)Si-d5bAbB%H4t-Vjtr~l-Euu?HwM?LEt0h!S&Nj(J-6IUJ zr_=i3`eShm0}+J9`M&3it6NpJohS0-rflK}=N*N0x-|@hSu@}TzI_D6))Ex$^=V8h zcmnZ<{zMaFcZpmRS6gfuHVR{ice;Wc9R~3j51B|fKSYF9LUEP;2j7uZhNwuIo9hDS zAC;EP$C6EHfn+=BXn*r|v?7uu;S3QOX*LeJvT^pxcE_3;xBKJth^u%Z!~w@%iin>e z>g;{^G*BvqB3-f_*AqM=>EoH3l9o-U5Y0_VbZVsKGN9rBwX$4d`ZWRqkq*?Sqps8* zWBOCO`@6^oCIJlC<@y-A_v|&@-p6^9Y+=JO4S%&1%gcF6UJe}isTIk7_`mmK^?_8; zH~whi#7q-|$vsu1DZX3;F~^cNB$@Bz-m!Hz@&tJWXrPZ5iM!5(l6}bMAxZNf;TGe0p-ePLnGf0T7F#69? zSM=&@pUS#iBwA(J*W+Dy>DqgZ4Y`gDQ5my$2v!4a53=7U4a?vH>KtLFa^Rt_PvJ;1 zx1`csA+#yPhK6Ab*^D-yAf~2q;9@GUGGK|hiDHf)Xlq(sodonL3_-K-L~oMm)8=?> zxKC_*rekE7%iCv8-+;R+Xjmgc<5J?tNy^O>%G4LlQS40Xvo$Vru5KTOesd;A=R+nE z;iYcVojbLiQMaO3-^h#MDKd3LYn3;6xDL*p@pQmINyvnuAd%S2oaSgTq`pG~fj$mk zDtF6Z_24O5;;=G>53{nTfyf&diiNj?-k|chF%TvrxSXJ0V2Tkr7OEA>?i6gU9-ZBsTMp2S~j#|nQ^Bs1%?Kb~z53s#Fg)`~lnL|Ry9=IpR>m13wGJ@88oVazF` zQsqe8vdjfpv+^EetggzEU}rO4ekw^ z0bm;=&!~<}t`)qU)9jmK9~goTv-o|o8oc-7vC6obL7SNJb|Tn&qTE;kom!B_Yr|#4 zN}vPtYi4L4Qw2aVF3{PDN?2O)LF8XtyIZ}7KGeiBMg7uhMWd`x+6<_L-u;${_iIYy z6MVv~vCv4ON}y(@myMD*mm8g@XdjLuBXnQB*$Wz9mHz`v1>oeN_knm&ZBOl;**?3E z`;jdX4p>{l6!GP1B;!clb7FVzp+@`_&{>d*TTurUtU)d=wKuG=RXYHY)QG4E$|-zN zmKa>e-PT`I42_hrg&9I0>5ccKLgHbQ`ggXI?zVkfAu65TukkA@jM~oJ`b6?dEYUy> z!@6;`h(gF^@+|4A!9sqqj?B7_4#WS}IJLXjPbThhx{M|X{3K1uB%a7&tEs3XWKbE#nbYZWIyKQLH90Oa+DY1U@bf#4y_5Uue`JRMZ>A z2t8+JJkz>84oP%(YTdf00_C3nQ8)8roVsxUm~nzDMkYsDAC#HR3b}ubd)HuK0Mt6vaywV9?;nC;W=oK*_qQKid04q z8|9mm4L5j5A0a>xEfGv+lEYwkK17(TQQPd^uQai{s7zaeos||CPam(V$|aWAKcYU^ zaOr%%bI{)I>tzPF0Ke0?H#FG(EF(a1*YsE4ZFf)wZ{)*8- zH!Bh>Mo_CI3O87;4>eFORfc4uM5(UUEr(2Xx??N4N~ekn6hdUw5~MmxXUaevX6K<1 z>X58b&sD3l>kj8+v_Pi7kMa+;5!M_*xK|2SVU87ony4#VrdoCvX3D7*HIUqkb7h}* zteYZeF3UASnA4Z0OGg?9Hoe?Wq+G-b*!K#4ZQ0PVCu9qvjDHMW%bnTUitU(-my1O* zMih$69V*r1M=? z?{0Xp48&mu%NNKqGGm62>f*`7foj5u67h5dqRjqik_vh9E(q0y#l(YhljcdgBOL46 zLHq^6Wu|-T9Ld!LWb)NXa`T8+R^b@cq5?5=NJQyja%2W-Kq^tP`Q#O9gQJkCA{5>7 zysTE^$^PVC_CC}4*o+TUjo}hq%7t=HwNX3v4W*{Kt)|L?ZMkwT`Y2&CF1vC|fc;Huj%UA3C@>~nZ;%fa1isr8gqwMM`*+#{BweU;|$zk?yCaE(eCzLqhSaLEq z;acfdj6u&+(oSqhn&fyRScKeQpas;_Xn!VvD060g?+g(iL70M=JFW>D3F6;)!8D?2 z5Wk%KN~CHqh#VJ%>g9NEs@tPs?qmsgh7lQ%)<0q8@^b*^@OY{zG5^F{`6K;exhg1s z&7nONg=iK~eKUeo8Km~l)CRrbjxyb3e_DV%o{GIb9>0(Z;Axx!RG}!}zq!fP%VZT& z`v(z4#)PH`wCCVPIyJKI&A+lWdSntu*xF$}c->8rt=LSh1Iv_rGpqDEfoT1S-kqZ> zHF=S3_TfAjOObo&^?K^UxHW???2l7h!8_e}NXgKRdxIAYzR%~29@8aP;i3w+X`NM=K_$LSwb%iK|E8l9^ zSBT?gLmhzM-2h4&@jKb{p3m$jC#;w`kM=8rO|It}cK?9RwqqZsDvU6t?ZlDVsOdiJJHXx&^` zkIgaSQjIQ?1%$-t3?&nXs8Cp;|DJkgYZz95t>!eYv5F(RSTsgYuxaz7fKD3YxM<~s zMEjLugxZSLU+%jZ7_0f4D9x>VwElw?Y7yl#7WI*COh+t)w;*13_EY97iR`DHuOK42 zfUjv8G*v8Pi7vZxG_3}6t?=!$pE7qJcgVj?XJz*pNfd#Nuz_J8;ZVFW-NYm{cWRqF_5>;S>?M%xN}L zW_r-=|M-+x#?`*P@`{O28%8F?7qi&_|zg;_s><@BWf`p4sK z#|$Oc6eyg>Z?-aQOS-xMG2z;$?>%fQdt~(t^25Tl8_d6KgKI-(s8bLUjuL>s5# zjXC(85+XIz>Y}+I|G`#){k$=(g*ECrg-td= zZ|-mz-gbg#;m8mJ8XILvL@;%aOAZz(m7!6A`A7ew#RKUv)sy&s8-`a3j`BTuYo}9W zd5Y>F)NNhJjE`N2Bj<#KcBzrb&~o+*x#6|SNw%`N^{u_z11;z0^vm|)>pg4aQ7Yj> zh_VH~MvO+PCay9ws5s_`6YtIHEanRVVgKcxpl>83M@q|DrO-m;Qph; zA;Uj@cmSGLV;MsE7^=&!|rN`hP?v=&+*9pr518xU#nQhjUx^(K34VWW*MAl8=V|7t&yj_Z-&T%L1O+t+3{=U{o+?i5 zI8oX33o9v&*ujh|!B}PO!=Zu*MG-l9@8Rqy|9{x<%#o8-N|NU*Qd1~<0b-vB^T(nE zqaBkQ2s~rjzthQ ztMG*S6!RYq%g@}<8xPT&R%E1ma^$gBgw+H+>gH{}788yL46CwTp|^$7#nAmk=$dT) zFkAaCL>NvJWjl0t9^ap7d^z34<3Y^URNrQ^zTBGkQes1GrCjgR_a=Pyk1FuLJt=RYJWQvM zaIj9f%1K}&j(^M5pBdDL?IQAuW6-4jSc*ONd=cto*StDb7Bo4RVe=g1(wXRB(x1brX+E@R z7i(6I3(gM zI=v@@Q@63HcQQslDbG>%FNj@_S+q{sRC0GJXPY7;(q8 zj*734-3>m0G+vW7FI7lVYvirG9|}Mxs{}kTTk9AWUo@yKSr|NM&YP@+&>r!Yv|Hpw z>}s{YtGLw50I<~JPmcpZ7t_x@Wl2&8yz?cU3gq0gx<~?vm7~^TcwF*a%yOB@(Y~(c z{ct?RD=DooAqikk7$`kkeYYA2q6kRRV3IT2_N)x8;!ot*nKvH?BAJd@&W<^boNM1IK%01f72FVWmUZ7v=>iG;)<_m%`p6IU+K(@)kRZ zQN0Q70te4=LU13Fm-M14{l7ngD%glVF$W_f9}hOqrb$+i&m!|S9b<6}B8NO~#jz+n z{EphQYYuMO38@oM$tnofG%L_EHs}TPUhNW1=K7vqTL1tP>Q?mv`7l#O2i3lB7%kZ~ z&h5ttuATd8agkqv?^_fl5$~|!&-Ms_0ju~02GAmxD_ciTja{LuyUGF-6%z0}T|Zt$ zTI|VZ@LmM)<-G%9(`&zKw0eF2iKqnH(4;+*ZdN~|t&L=IxsHy=q%Sst zj+c)tP59E*EQ9OBj=vdy!~>!4{&a41xMEZi4qyf{ISAHNS+R`pFBTTAZbzY z^yb;WV<8v^tiwIAl{T3wGd%_J*W}Ho$l$&*B!m)lZ=kqYIxOH4-TaGl z+GtuKxj8D#De`;^31|fThE%eUNh&=SnbKEuuq};h5+s_A_%vJkaZ;w|Cs*wXdwB)& zX%oQ%OyygAMw1-lGO-0_v(+AnQu$xNQY~bO)Z;MNTN0{|#g!H$ddm1i>ny-K|E0Ms zU$qh+Vpg9+|$b%>2F|kU|KR_TR`0g!sQn_Nz+n z$zgqZ4DNG$%#s#^!~%!-C+}s>o1qh z(4pPGUNYxMZO=Z2fRmG<(@=LzFPsDOl>I%CI|{7dyR$M!>2_c{9^P6av%%6zh6t&W z(y1ZZcsNlP*$v9z(bPfhQv`K{!Lm3wuAh=Bh|dRZdUqWZ{|y>j1Z3(n+z``wZIl2u zxaZ$zJ7&eIU)M-CNijky!~O>M_ef~ta8ODt#$6Kb3XPgU*evR;ASAO{t6WXg-ect9 zGW=Cv%9Mi*Os0&-b@)C6JNpWite*E#rp^1)Z@lE>p&#_(m0f#Bk8PoL611=P#5lJ8 z0ZX!Ccy1wv&GRX>1WYJ;3!a^dkv6!q3+r|U6~_P+nJ5&x9uW4ogcG(EgM2cdLtQT< zL4@{YZs#jiV5EeYRPt|UpP2TYUh|=|ofKO&gW5G+st9ro7}fNT&p+^wd-v-Nc~r>E z>uF)%hzE*tt;k(&pKi8&L!<0^%o=y8y?F@RHca24|Cc?V3fs#PcX-rD=gVmX-~L-9;l08%DTK)vc7;+%#F#>P6$NehJF#F|p9S>Rvo1`LqFdO!I~Su$lZ4Gif)S}f9^Ic3rYTUE@e z+t0Lz+if(#E5Q7+o+_yq^>jjm=g{Ee6*S@zwxHEdgQL&uCFe!dOK?$h-%KfT89ypq zctaJh7_rB)&YmcN3m+eU8dTdlFBwA-4z^T#nhf2*Qj4_7qO}P-S_YPVt7g8Lp#C=S zi)-IfuU^)rZsSM$`W0w`i)|-6Zf1ew!O0EFA+XFeY7WQdKA)?^R97GM@VYdbuf`Ho z5YmYAm1-iR{xOv;u4e?lq`Z0!(y|>-Hw}SGzsGJJB`8R;AZj@1Oq-zSgXrFc5 z_d86njeA0E`u)W_LhMxk)W5%_(!$pxk6m?dX4th(*wmR!(V5L0uvVeITI_$8Om=mp z0EMWYfa%}l|2S3E`&CcIVQS>KQKVF5w@y>9&`iF{ZmiAUoSulfoBms#$S@6WY!yq# zN4(INil6AWTxPvElRdO}bF_t-X?g$kLmVaKsnX;;f(I#phlS9vj?bw?bVRP!FnE+< zUNRm66yWFQj~q{xpu?GE<35*Wlp4 z?7&jU{4AFM_!ROtJ5S8~Yj{GDi&p9$X8#_#fLoEc;@`BzU&F*^iCWgqk-pT&XYY8J zmtdPjr1=EqT10T66~zmQ$TC%@WJH)t*~3w5nNG7A!;XkQWbE3@YffYR>AOXWp}bK7 z2%wc>>Il}Lm@A`1zFQi8U0Z<%&H8NirzD-EwN5=DuMmwjgKbwRQY3F!uq6jjLmydO z(F*Y~Q#+3Esn|#AMuKbk6OoSds<8fda{iyHeoY%*FV?v1DC;D9c&yDPf;bFBKaDO0 zl4q^EBqfbOf~&;*gbmn;H6_Nbvv6RNV#|bjU^-b&SkxlBg`wYjM(s+^lQUn(`YZqP zu3A*6%C$aswyFy$mJKtb+NrOmKdxvW{e_(nG~0sGzGqhbgVmC&5J2Xq^y`!T z@U=K+Wd)R0YlExj*jgXqVkpYj7(L_L3`0?q?jb5c@||&}brkxJ*^ce|d(ETfcr=fX-I&=eH!cVQt-y`s$X>gB^~bko$-y7txgR>oGw_SsrCxlod1u`g^Oib%)1cIL0N zA#r6u+mN-h6qoU+cim-If^Fn-}h{ufN|O+t)#v?!@}u zLr+;y)+(SWLQplV`!Bg-nObt0T6Au2@vogkj~$NN-Z5a`dp<{~*I0KgA~@Jxzc~*g z@Q6H}!n~PJBF<7QX}GP8fo~L+S1@U;p(mswakme~-f{2lzaOBy?Q5pWGz)CF=f9}^ zbnafIrYqOC(yc&`(z((c+%j%FsZxu96~D1@cwR$$zJC-eWK?D;j3{||>~4R?($5}< zUk!T=KY3L2_}-5I873x{$;lLKx-W;^IA8DIqarbXKDP1-wYYn$Wm?k{J5$LdnF?Lz z^R8<`O@7R-yAREqcKe4?w|~y?3C8o0A@H?l;`N9AM=M~DeCzy_Ny4gWa|KevytNeS z0xNBXYmLrKi{3`rd=GIpB$0go=RQB+XQh%Wr@m91{2s-wU@8ENTl#vaQKPq9*Q3MO zElYNlB;QSR@pSeHAUC!GxZvM)5@y2r&MbJ|J}J4zxta5dx{g*A=J}dpe!B!rY+x$i zhaTN|4X5qbeh2D#42FPNL~n1)io?yB=4twGYB$)D-#hmb*}tfXN%I-&T6sE}@Urq0 zE&MGwdj}aOgz#v5`(YU!`x6d)O#kN7e-PXF-2DDN`@4D4N_`rd z`FcxilcQPZjPG_I`_$(vM~RoZGNp4Zt0($Cr?L6o@f`%WeSX(c5c#wm3q3;g8tn7o zInhjKIFad?q=1YvkmKC(;kv3+MIM4H+KYdfQ$ojfQ9Ha>xdEY!Nd5w>5&-YSs9QuN z&sK_6y$+!bn+XrDHUj0{a1sUYRNTJJ3^&Y1hk_5_xFQH38QBW&BaEEOuDlG}vJ8y_av_pmlxH??;_EnUoKM;Ao7wa`=!Q4G@T#y$|hPDm>{uYBe7) zSAfz!%ayu+N!|Mt2+s1ACX3_WWA->*P{>4i(8+Sh(rnn`Ic7nhjl_GwS?iou51$JE zr{7b_7~*;?Qg+O+3`nn)O92mrH0!r@MG@`%*hb z_hed%NUcuBS;#$R_NHMEDQ++Z)A9L+dRPsoo)!xruHZk|n~P?uE1A8UdF6%BMy&zZ zKNJ?a86tS|E=GkRgEaRm2A2aVKp^J|d3C0AIFlg= zc4okvcOb#EEOrYx)_?CYIiP_&!hYzn94~rr@S6uy+xG@Re2)FcjvQ>WMK>sB7AL#p zd{hBd-VH~aCl4RRk_M88yF+h*b~Vuo(SppGyFanA$NT!GOX0X^U5oR)8+h2i54bAq z5B{?{SR{qSkuyQ}EScHOVY*|}zGw3NzWAK$Pulp`Tk=|p4nrE1TN_=WcTVni?7@2p z_|8BCaNvjf4WfL$|1azzSNkw&lGREXd31S=4(R9qpY^>VLg9A@A}<$1S~Z#bFLGbT^bK>ajy4qU1?kY)^hu=pPEg^RTJT=O)crC;{kaIE9 zlM^~gnX*J}kvSTjkdq1;n|6yFwXoE>NL;;2&=v4w`Qb%xGKn=%CkuBc184>O&_t!z z5n^x$4!|Do+i(}HpgM5{?2fW)R-v>#o<4k4O~xeoj3^sr137rZStDqQ$kkFHQpT8N zQfObME*-QhX;qUg(m9Gz9bM>-0!RBI_` z_SdhswLw>}R1Vd?4{*faETLuk3d??f)M`L!O@RZ!lDw;XYNg4VIA z$fiX~sn?5RsR8i1SgMpm2J~Y1NDC_{5@}6`@$J;-sRRU(f3#t%m4@}(^=T!CX~Siy z#w{bKr@nK2v3fG|PlR~-Yq++HPi*%RAOF$`G-@STPs?>1$km8r*T9dSPaWH_pgW-5 zt=dI9%Ow~u=B}B;yn#FU8=&l1m;Qhu!E#;UGE$C)=yQZ%RJ zi^YWFXvP|dC#VdX%itpd4U=O{e)Iof(y)3P`2DGuHh$EHylR*AvIn^$yYQvyhmOBrezsw!| zMlHROGO6rfjarjB#o3wKOwS5sZ38)+5xdEmzs0`q3h%J!S3F%7PJJ&lZKy}j7hy_Q zIA@7y5k`6u?vd+_2Wjx3oinVr|LP>_ zL$PQ!8XFgd+$vr|o40;uKXpQPCW3*^mJ8HQfEL1Oyvd;dm+Q?`I#jMzm<<`GZq;7! zd&@h*+CxHv`KBEP!_JqKxZl4DtJtxXm08^WkEWPXaiy^Z$^x^P8QDwT8-h4I4j=Nu zSesn+KXhz@)Exlk!-#m<8| zm=5#Yr+XSGc-SJ2`1`4E%>jmUw1Q>7anq5pwrz(kyxRi-uAFpgj#h&>h}vrZ3apNb zX@kDITfjBeXRQdbpy_Qe4^Opo07rsCa#fs424mzs_!1mg5O%sc=H+8qpoL zpF~-C{gqTk!KUK`jaI@Jl$+B!MoY&ut8#f*MnL|qG#iC^6;gjNtvVz$} zvpalf$dHBnYt#fgv6U3X2DQt+O{o+`qA66Qn962+XUP3aD@-SwSF5Pt$k``(`Mz{F zx7zZ*IMY_WVW)Ch4~!1?B3p*u1rrHKmY0lZyf3?sbNOYxltVS;STH%}U~h{GLZ%6}w8S@nS3oujiRE$2vD1uP2cWMIJEqo$nxyei;laRn3YyWuH@{yHk2Bt!LbzSu zarV2AOoL+&W0Raf@(WSs+^J24!7pspy&RI?!FuJM3CcjDCPgXE0$^pwNn?Rze322+ znQjUTmugK5_@F+1$x}$`i87W(#>C>+;m$@e*U`r1qu}8UJ_DuKk-%b8RFqow6_edq z3aP8x6V?&<=K9re^ON6zijpF%3k1U`!tYCjk>)yHW<%O%j*(U6(Ge@L6Y5}Q`*0Cz zcm~%{Ws{SqAhEPq98*pdk3(1vH;<~BB$Lr2qpyk+cuAd-r*{*~bL#O5!bgU3CI`S* zR#t3pA>P&JPBKaw#ftB-0_AH1(>~4d6Ckja&E}C{1-@9}7J)R^BfE*I`65|;*!J?( zjEU*oP-rnq*Qio~b*4X!mRJ-!jI^4{4UPo<6M$9!9WV<&9+|&duZk+!476bd55qAy z70QdwC4j2RIAjkCQnyo+fJ z5C(YUlWaE0tiON9$V9tH1oH3hC({=oofV{Vr^IAo(I7;hQ}MTfqk3I=Nge}F=V8>G zPQ&`CHMY|3f%DJT7d#9{kq|ELZv>fjY!1lI9!Ekoa(HpP!WZ-XHI8j6O|_6F*i2rs zFj>SlB-Ftz3%3CaY}E!#RZ(X*M4!sQNk8g5^*CY+>tp{7CDGX@(`PJA>o%U8ghrWL znP{|Ec3>1sR?r!nVaY5*H~57!VZlzmn|0=gAw@I`3j_fQeywMw_!`>OSUm2)r`yC{ z6~HvR_(5s+#IM7q8D}<2Bf7RewJ=$XGOk8=oes#LUwLujNajM95aFy(8gD5=c;`n; zkB|Hl9qQ1xQ8ih!eLC8g`p;L>$f*=1 zR$qgLyUW7-xqvRJLssKg%dA_Ue9_;dcgu^D$IAj1msG0Ns$8>%G*$JVmqmX!zWw5? zn-G3&-huSH&D543&!m_2fH7h_M(>!I4UL8!weDiVc3&mJdN6%f z4zwozRqA)wm7=*D{hO_Sib3vccfU-rk*KDT@b=KMt8#zyi}9A}I;r(d(&b=!vy$72;{l6xoY^ftyPagu&|XVOGmADw;C+90 zt*3T5kzBxb#A!zgKf3A)w~Dtt-h~lPxW9G1u=zKMgO>;PB)egRj~Q8 z2|l>=FC9UzKwqr(kA&Pz+kFy)WXjlXRC?2bz2GPU2&ip`dF8xw(w448y+oQ+BP&Q9 zU|j43_AA)USEZ)OanGRFA+VaO-C2Xn>lFd274ZE7D^0qGWm%XY5UylD;vBygq&TYR zesxHLvdL)xo|Bea)j33e2M!)TdErSxV@UB8l;U<(K!t7fpn5>v9}{s&ZTb%FgzD<^ z*5;0IdsN=jg;0X^f4_E+9^DnA%PUF0f?`eaZ(QJh;o0)<8Tu*$vYk`i#o;WdhS4OA zY^Mo`$aGfpQ%cTRL_*#5Wh^>J1wb(pD`!Y8N-KNENwilDn9gn84;M0O^xOwL=l9ms z0v^nV?ax$ahaaWQU}D%!L!dvxN3B;;EQ&M2Hv|O=#2gm$)}q0c?$(>v@tyrgtp8q` zzaw-&XsT79vr!GJmzU?RQmt9%IDGCK@VvjcczQP-56Q=32<0NLE0s2`V+-v3GyZ4G zrlDsQfyZC*)$n1W*}39zC$iwz#t8 zJ&VxbWZQmPKXM2y1PRYG=ngRc3?H9mP~NZptnQfwtk<_xQ$Y~{!|RS%c0IVXV%PB( zd-^bSD)OKEm{oq75aLUI3&a^~skcX1E{u zv9RRbA-kvMb<^wuY8c`Dx^ZMXB4$y_H4&pgx4H4$_E*pCl zMcZ1ekd+Rqaykdx#5dbS?p{aW6J_<(7iaOWh5{%!``9=L=O!RyfdpY`8HB8Xl-LlemSpz#iTo zM-*N8s|2u~5Qyk`&QbYul=)b+2{)S5^A7ghEzW60xj zTC-3(Tc=-NI`@zM3=^{vLja2@aJrAjt*zwm_N?HE@4kkAjgF9?f2|6Qod^xN8l)vf zJAvwH-kR^Ns`51y$odV!;l)0BT)wWD(7OKnxjxCRhug6naG>9h?W5DOqBeH^iP3m= zTjbcYyWp|vv7Sg(Bww(czJEmj=K7?@o$1--=n~pR3*?U8NBDc~)jRVC*S5zlMl{IB z{r49&z1sfQ89CF;o;^@Yg;ouX!n?l#?uywWLhwoi2hp>kDrW}}ocAXXt5)A*=hbtP z$|E+@Qy$Y+w_+4la`NGWYP8S&<}KJ~DQof*`Q3J))<~_zb1w@SH^4 zCnos}-k4n5*|P2PjT9(Nit>EcY<9Nqx+wJBaSHN7Ad8Eb(hetJ%%~Y=@6I#3#U-$b z@CAkFIo!vuO`uLPj!8P+c6dc!!-0#Fb*mXUl{9^s8C__gNcxA*&4m__LZZXEQPzx> z33c^wL4t>^TM0kEJunCUVk$&oa!$TG=ERl`lw}h^qq*vlQqat>(%WQ(0 zQWa>{sGjFo%y|j?Hf!AwidRREyOU}Cv*X*4cwxc9O556+eLtkA;99czZI;uDFn7b; zZE|ZU-JW_|FP>)Qg6pGV_u3?OdU5n>j!qQ%i<~*TkxblPp^lo$=w+S$Sd$tBmA$yU zG_yQHO7;g}6A41qU#dbP*9oKF@!|e>Q)hOOD)g z9YYznKuu4t91`R^xr>`H8J-F(bZ9t=C?z7R2^&!j|LolDd+^LFI0B7E8f7FIiiMd) zv}ZcEY&VP2zXp$UP@EAq$&_9IC9t-4YW^OIpAB@qrr z;0V7};yrz3!obl3C1Fjx08;o$Dun(&Qtk4Uu~yYW&5D}UVS)fKDvC6c-b{O@u4-W_ z9Bxdx%XatrDthPLg3@$O`kcj{>!>Ka+r+Id5c3Wrpws&xRYou!e*&nnhV;Jg+`SiM zxERXJB>UrX@Zz_E9i7_o*KZGZ30lML;SEorVwW-H+U_~#)A0#|OPf7^hY`6z zo>avaDctypvdlhDaX^87zjnv|&Gm|{Q>pz^6nS5v`*CbsaPAuVz&`$+5~gqW%w=o& zL~Z~+7ET@~--eJ;Hg>+dkm28&e18mw%zDRu-CjXl8T-9YLA^X+kyNP9x_bq0uh=q_ z62hHisEfYyl^y&6q>Fk{yAmHISB|QsL~r^c0Gz!9>~A%ub7Ed=YM6|!@t>^*MIqMJ z5k!DyC~HadvWzC1i8DH@uc|+VAcWHDHqltZa?3rr(C6+5+q(td!i$<+Nd}HE$IJ0* z&dn`C29y9KGG*bBn`=8UwFx}(pfCIgkhoPLAXzSM7X>-DaNqATXYbJ{|Bt=5ii)#o z+JyrN?(PuW-91=!&uEAk&4esvl?hLN~0&QFT!mLr!6rs9igzIMx%>-MC$T|%w4T&Uds_hV)WMetN6sG7c z0-+eQ_y>q-!+6<+$|zFtaUQ;+Fs?~8h$oi))G)uXaS7E8_kJl($prCxd+rj8Mt<~< zpN;bo2Pkeq^SqRk#NRDt1(su(Ft2hIeu#hj`U^-^xj>a%^Bw*agHFuE`RbUH#qWhN zQ@CySkgv!8F5KP!HtS(ozrBAw^FES?wGox|A)nAW9f4LQ--H<88aA1uQ4H3U7z zAr5NSWN0z@K{ubi*~(Wky>c$r9{MOaC5Lj@JS)7co5!5;`&T2pfQf7_y_Jpoa*62m z$INrEu{eB4(WGI(9DT``?3Hh6R`dR{CuX;$85^cnlPv`of>4IERN>-6suT*}0OEb$ zG^!AfgufcP8EJk)1QuOt6N-MbpkMpAp#dzVn`SNr6X8xr`rP9>xx_T4Pu}hsz#MaA zQ~NCCD79>2VB7lmN7*BV4AMtYHyEtBoFFtUXNi216SDlxCfI4+uvW>8R`^Gi-*gw! z7MPMcP<@^&{M~ESjN%Lc}=D!yX=~_12tgXzIjhK>L*V_ipD+xNS zFH_wsO3odE1f}cR4Sm6p0PA7AY1E8qthKkpaSx}JO}6+Rj$SntyKz0E@k)A72=V1! z>KtVI5X#k`jf|dKOA9->3%@?8fa{8}Xj(l;{g?3 z9P(P}e3(t5JZ*R7qTNV}U(TSAVTJTCzu<-#pCHcScxdZSp@z&Jp5)3KhUsY^L8-YE z>)%ta!st~N{u7uP=POSCwPBpYHjrcBg_AQbLb65JBXu96u}2j{2&JA=?u1_(R1`$f z4^|UeoRg`lZB)XNprL=(M>{aC5ksFLd+J?YHE(3;C$>UxUC@j~Ja)kTtp%PlUq8$l zB@--z6c&KBx+LmsN4gU$(SU;Jf0DMLPH~cw>7X{;v+p(X#R`#6z1fLRP_Vw9@aV>Hz z;S^A}$vY1G|FDLNZq_=6)4qLCnlB3(Y<_jud)++G2%h?+EY<;YCyj2gvWx645_p;G z=9V-|wv-h0W49i+E57yhHB8bX#0pbV7!^%Gh%cJ2@=M_Z_rAX0kmzd`Gf$YvyrZJ) z3vdxo)Y`am;j)%{=`m0Yy=pn-=MbrzIi)uaUx78nDzce$dwmxcyNmft7ks7O zvE4A8$T>?*CT!+%g?a}I2Yi&P?`Li`&eBeI&eR8Q4e#G)!1h`K3OYJhg-l!a1N%&wMB`Vq4 zexAO_&IG^$xRTc)Qox;&!&-0CexJy>dAWQ{{@6n+KTdBivb(q#p)~%bCe!2^(pgdv zT_iuwhR>#=X+OElJPY(Vvv2&C=pbx_u6q9v^pHXZJ_7M)jrs6+eQI&;(uQMRBd}Ns z{8ao7gABtBvh~rvDeE#;TDC<&?_e5&80JDGL5h+Ny!E9}G6DdtxXCp~77|IDqAjE` zq!kos(fuce2lxT2I`R6?ycK!$Z+JdTaQ{;aFyl3FG*o12r`hJ!H;t$6(;m0-7&dUG z?b0k9iXuDg%ycgB`;1tqaEQL1GkzI(_zW}7qAt|NJ%SZz`xWU&pTUp!s{Q_!hLdLR z2dRgTVYA)}r8I6l7w+h`$l+FF8w6pLRtXkowm~t@Lc)8sFmN23TiY=YR|zSRU{rv| zAIpYZ7u3VR7eA%&Hx=>_$w!p$&Y!UNk*`st_MCl%X)U-WU*7HFEbPSZj^d;MB9AFP zi0(DwP1?I(c6oE<%6U0S{gQ8LOv`zG*FpC2d#IefMl@@qh8i#h#RT1P0rgOl^oR%- zb^lKi0!@0g?l}Llq`W*pu~fDQk|SkqzPiBSQvKn8vz$vYD3|L4pht?l`Z`pZp}oF@ zBolxf+bm0q$bZ(QT5aa**IPcf;-y=A5ia#(d1Mjt7Lg8ih@7f$!|d=xZj);%-x@Z#?!IwQO=fzrjcSDaTo6tsMp zn9f@HOXP&@ijqQEyvD$hbN^@T6&i$ME1(P|d{moKFBk8I=9pFAw1Woxir9iNV0!bbr2C@;ZYUnRrkeW!s z9Jy|X7%y9~&De?4*u77$(D`jWM1lK&VZ$PItroH(cePgPq&}0mNU%$ zH(%%NN6g38r<|Ca&sOFfR{o*uo9J><=xDP62lM@nXQ#bd(6365^Y7r-oLoFvBWeOv zobGZlTIyw@&c%0`bsy@f2_6kV@ir<~m#V$fKf8Ij+qMG|@JY2|6|;eXUt;%?rHlv| zAlPhKiE0K?B>rdgVga9$a0=N*aH;im@cn!(f7uXAy=T~G^9l2?-T%WkHFI^vwC%Gw z@CXH|F?3YF^3%?){fI~vM|%YC0+WPm;X1$n47Z}>-Hzp7z@B`Lu2TjX(H21SvH2s*|>5cL(pWpWm!);RT~vK+f4dCuLShX|`0pE8#WK zZdx5jQog$uIGY_SUxv|OeVP}(+VugmvXmb%tfqUo8nE$L zn;vg)###1lrZAUR6rvGON}}*^iEk+tpByqPJbr2PZL?(guv6LF547=v0gNI~JE52L zvV<%i@pPdv(Fbct7GzNe)OQv>@i`GV%j)_@_57QyF-^7`wk-5xqT5c%msY!!z38?~ znhPOk2$ODu*$=>+mO>q&eI9M_cM)=Z5_ZIIxlUszS3q-CwMa0Jwrfw@Ek?{EGwB`o zXm+MU;$&=bxfg4o0=us`(ebPw^>&oDK5sS$PUovNw9`gOC@k*AODD4B5y;r@Rz|> zv=~MKjqJO%ic6EAQc7(UEbNBdT!*GB^eQZF^Z*VTptW)y-*+PY`GuV@D$~GNF&&#^ zd)YX-p*IhK=rYMgpptC96)#94XLc%~=>1Op*#mwzuRzk@+7$Ru(qA%?2i3U%xSdm*Qpa(Lznrs5hP1?wOKn#$}M-To0E^ebXH^)~_ZPzI$1 z#eOE*`H_SWDIAQ`nwz(sidaHPd2~tSr3e=lM{a!%JS<${ELYJ_5q~?8$fp3aY*U$_ zBc^72Nu^HPk;703mvN5VhU%wAyfL;_Q)DFOoxd;B4_*hA?qiAYGdkuy5nrEe3Q4Kg z91~i0YLw)D9RCtA^i?y;b)DisChCGhsQjmC;1H5szjK7aV6fsJF_mmE^`TdnN9)#e zi2;r$S2SF$m#`7ZrZh$c9!4d{$!F&YG3Iv za9xMUuLVZQeMX!JX3>60&6QE+#mp+>z=9EDcaroqiwEA_EOjggPCKKhdaF;*ITs!4s z5+X-!!^`Di#JzBDOQqnH{TsnRS9>?Nwy{-RzAKDiH62*7V zIT)^c>9HbuD}~-y0o(3&9*M0xC0ZO~MNC=RdSz6y)3K~PO!k}{%b(ptU0~nCj)wbz z!~{LUvsgT@l8xKrfaA`|0Cjmi7TI~*Agibb63`I!@&@Usw-SE)P3qm6$ZqkxX*SZ( z^wSQ29c|iKF@bvVD(I4Pc-mEd?zclJL8)X-nNLXh#e(O%5NLwUeqq!zQ7X-mMqJ)V zR;;N}6g(x-AD_FH^lKoE8jg6#RZ(0RC1J=j=RO2V3wgntj&+CO*vIHQRcnBnUgE2p za;R44l3#5+uXlhCCIHdZuPk;lb{n1?+iIpEZryGP z&YJac8E`<-w-EO=zmaX_bniHdZ?3gj7lI*CX=}aVh~14) zjm5H=R5Rm8aLbCUIZz!&CA1ua#lD&=F*3epi_0(se}hqFH}|_C<^{=5TgQ)vmev`N zkWBFV4h%loJ38^1EMR-IvGiVIw0Vm0$_@fxjsfPtbc7}C?edByMW?wh-@XL9QcEev z=i?M8hkq{Gq9X+V*s-zl}3AG<|f48zQJFwPXSUO1Pt#3{aqAbN2L`YJ|i6*I4t^~>w?>m{?N zXa51@e@aqfPuw3#q3ghbM zC;=BQUDR~a(@!~m_MeyEg>o#`WQ=Ky^5*N2%?o9(MjZB&R_{AVNtWU`^C2%ZGN-4@ zRiyBn))_R_HM+$V&lh0ahg@kBYe0XZ{4SZdZ0zFIv(s!+uk}q~yvav7ZrqoKqq~A; zq_SR~7jyQ|d4|t$#(MIG8e_}*#vV&NyPL&upEOu(XwYC-ylf$Qys?Tg=!f)Imcf1J zK2T&n8$9_dA{;rUOTcydht3F@O8R($X#f(mer^ox_tDtfieJ~qM4#Xh7$m(g0*fpU zGPYSc^9y?K9q`EL>%!LI5xSTeG~<*i8k^&VZiw5&DYsA^b*8R3Bn1q8ZlN0xQmp|p zW;S+nk^4A594%nJ!zQRNeA#%_V5Cz{*}cpJnX4fXBkV8zz@2N zm;F}F@}7a#K?WU>ga%;j76rpHeiJZu0GOD0jt?|ufi9WW;kSQ`Rm2AHD7BrN}+ z^(Ex`qE?ABR#L#YnwYc0%JKIZ#vn^xp)_k^@p^;DEGp~jM;jM>((8rh5Y?uSBpo{x zTH9(O3@z09ij5lld%0oj1eX`J{e~T4JDw0f<>v9p-I!q2tSo^DH-(fng1+=)KO?SA zjB}r5Bm|I)M}5ytTMdK}fP=&|oHa6%irvNAb@?WQ#GB z(O&3=l17nbHo1Kp-n6K2%R2Gg3TbS=?oCzPRHnR|AO;J{m(+=b5bv=66xGDw!4-0u zlm?ZDu3JjgVM{{yC|)oicDsCI`$a^fnA@D@9VS9XLbqlmrOuAIm_N>BXLQ|Bk&qdg<10Cd_|Z>NcFG{?&=J&r zATk_GhT%;~_OGB4wq&y$^w7GZ`0)cr<9)j${`+60KWaovlilxxSVKNu6Svpe%)=xg z-RtUM*ib1jF)(`8hej1yatVDV>D>;^{(T3g6{Wcif{aA;otD+-&3Dw;Nu2OduPfW& z-juRoX7hBp!bM{J#~mJX<+6){enYOEj5931CI6T#2nYh*BO`W8J8RI=;w-#XoOMPQ zPw_i12c)SZsa%AZ~NDV{wbuXGv)s z9a5ZW%KG3fIXS8DY zdxb=$-^q+)^F)|fv8Cjv!P3t==I2+_{&%SjWIf1Q)URzSJ7MR}qP#2m!uuReMvv+K zCNHMxpg*+n)LH^vIWgvlh$*pq^Z*4ZHCXuBg8B2y1Hbf3qrPD<(6eUab}w}71|xwR zNKEQ@F}d!*W>szF6Y+2Y+y`^>Zlg4eRmI=F8K1X%F||x_b-c}MEt8ygOXnLC#_zV6 zFBx!6tRPc;!gR@DgnBdyV!4LOCtBsojtfcVnsO;EBAQ*DJzP4s_ z9**rD85r>L`vVn^5gA?=v+6DSz6_SPxU4(=2Y%pi;!=1ss__ko4L zm8Opw1h>T4#S!bqws{RMnvdU4g5TL^Gay9*95a|dr+c5+#VlBUsHF(w)O{q<{(h(a zp>j+%CX++Ne*`V7T#4?fCgdae!D$ueim|)*fEs1?4ddxM#tZu4!9)@6nq_~^nmP8*o7~#OTw~d25FjYaEI%yt)LZ0J7joM7mdjDfkX$Z z-c9|UV*6nbXf925xdlTEiwgm%(=4eCH56k-br@^crd^K;tU`5@Y;*k5P`@?1Lp|;f zAQinyR5wr8@Z82@Oo z8fwtG%jm8bi`l}#M4M|&$=p`9P_BT_$f1(n#Ss>y#>$)_$SOjCVu+!gFreX#(_$M} z`JAVXVYBm@{iYj9kv7KD>oL{5Zv&Xa1io2mGWq1|ACvK7yLL>Ce4LzCs!Z()yBa>Y z1DsA6=S#1oyNXB*4e-Q+|tZybKTZ_$rd&Qyef|Ok4nWC zvtjD~?MQ!jll$}ot3-*q_Nw0{LbwvxA8Ma4$WSRsPbGaSj~9NTpuoXO&nPU=q4?L! z5D@P@yVP&x`DDYVM`ca~RfNiKsh7;}j-fxw#UbQ+OfkPzi)3Y-(4{Iz~F9rFGpm<|#w4 z=ZshP@fmJ_AO8KvAGI3lp>3uwza!Zj^P}bLrqL+Yv~g0oXp!~f@QNZq5kK5NM2RR{ z15TgNJTHRLJYD=_w7XN4g?oMp#Fby_nI3Rc+7N%`hmSTRhU9%2py#t2Zb${seZ%a1 z9b(SgJ9^tzNfqEQU_qXEWRffC`z8tWuOPW+NT3pzVCG6#$8x$Xk(PmE#hAJT1rW;+ zW28Z)4B{hSYl)FN$}cF8cOJ?j<_-5@k{nYuO}Z;Pl#js*c^>Enq75^ zO%-Rl3NqTkdRy<$(-8bsH$~ZMRFLPsUZw^z#?G5ys6@m}=s| zjnW_-Py>H%+hl&^zWfkN#%FP8KCEqM>4uKZ&J170Wd8AAm`oHU6Bru7loVFT_%7>3 z)w71pLAh)f*U0`gCdwZY`K(rLwbJhz!52{L54X0YtEB8Ui6P){Rghd8B+3FA6cKXR za34rU_Yjsf322hm7dM1P@)V&Xv%LtssC^=CMS4M>=lrR&^3kraHi*y?`y1eNe_a<2 zm8J(4<>(Yj&>fX6aQo_t9RNUzW@X${eW#qBziMJ%)jQY8qMZjDO!W~Fc`dB=k&+@^ zgBzUsEW^Ro>sl;?ydD2HR1M}M!<;AL zdeqHTBO%h>`eS7K37vXonM8|=ek=B~0emGxzPv^^wJqT&tZvFgG0ZW)V#m$l&c`wg zD(19u*kGYT(iT=fx3;!phE2MPX`2Q|$KcP{=-OeN7?LQ2}f3Tv1HCu(@?xSObdX+YtVm}>4pF7MkG-w1 zXdU^de2C@`{FT|rHUTlrNe(EuVg7opcu~>)6O%43Fq7PS<>2hK98rAsg=KSl^FMZs zxdh(C2vX%z{tKo&>5eWy4$WxI5xmG07(i~a@!f;^g|^^79Fsn1uuGg>zW-ED-6%eF>F5^2LpwDas4vwA7?!$_;mR6QN$ssWh*EPG3x9Vgy z&M+-M>ll(=0UJvvT=GXm4}0wDj`Rx)!5y*Xnw0O24B3mw3#V|zI@rWY4ABsf;p9vO z!Bdk%*&(Pjl(JM<-$e=^-rd}$YN*Q!$-c!?#rIrc5{v)Ll)WT zUh}QDrUW{6yr4bj(0>_-xZzKRl+F6cdGBg`q$oo@&*BG5y>NeH(1v`x|YmFoCKf8|Kia+#$6T53Komx z#>L_aU(!Zi2c`DmuA=SvuxZA+?Y|?bo8QbU`0|!bxr)H4bdT#g|a9HNJOnbK-wGebq_`)7jd0q+H7^^UXTk+kkr#5>&Qm*@=X=qmS^)nRHpKv3(I2QteU zIu{xc*a5v-7bGbbQf`lh%6!-E{l|Z? z)xa(+bqp%6oTp_qyp#|?G_nzod(#$XFWu@FqZEaUp`TRmac9R%Mzh6ym{jUoB1Re^T+TS;VF6rbq z7w(?dbAefo3TeT8^PBtW zrFg9l=}Pq?uvF5X{loDb#?0ePSaOCzaFyupZH<9y5E?~Lqu36UY*QBuRC|Nei~{~F z!_jO&xi=boyojnj5@*R|DbZ0NIpHaI4b8|cyqKqXW+__Fgst%NLW?vb@kQ~pfuQ{a zQSG5*bnLoMfN>s;;T;bN??$)V$X?0dnOj0A=W%jPUr?!W0t#%4Yx`*U%B+Qbt;L+g zksng1E2*D%uOjXQG0MRlWm-j7Do^n0(}Kkxhh+9rah+0W?^1<^&5DZV=OmoKpN@vX zYAv08W?i)b=0(o988nvT%J0VTbRVba=U!9*f0((lFNOrPKiptBotmS=p>|e;9;SwGSbgJ=7x!sgPKSs9_pR zj!oe{YPnlHKVz3cnp)BS;Y4kCiP+*ZW?lp(Jq?fd;e*|a7JJ|37H-reNqx7-Wna*# z<`=aJZJ+_AeBxJoSIfKRYb}hQoC~FhpknT9_b2VtHK;nxPD;$6Gu%_XjL|uPi)YtL zv~44sl{|iAK98B)Htu~ztty6h#5`qjE?y9a47*DBlUwxe71Kaoe1&+j<0gs^=YMJe zP%2uay_3rsFN8dZFy~r&Zg}#P0eV+N(1qE7)~D0;5+abMup)+RHdDwavz^&cScbv4 zr%~D`%ebeWeR`-(QS4&rYDK-DX$uWS{e3I;gOa5BeB#+f80mav%p0Uz);}Vw;$%#}z!{?k~n-I}-R@P5zjaAtZq>xYQl;!ndP; z!*DjnV5VLU(*{Mv_m~l&%xh3;c^GQu+iFOXVb>M>6utLxQ_M<(uY1pJq<9t0ZEd;tw!e}j4MT{qf0JilwnefN4V z@=Qx+6aoFVg)HI;L)H;A{BV9#>Wls7?tJU5#sB@=6_n)Wd4rm#dD1Xv^7vc)?DSj4 z+wPlR*W0~65VY{*9O{3o5AI+0>+uH{54!Vg9qSc_51i*~=+t;TQ1-?64~PO?AMWtb z?l&fR{Ga_&meXCKD~^x&#=Tf{$E?~Ru8@{7&<=0jW~%lp=-67X#N~_IGn#zn3$v(C z@=Ej!l1oH+K(}tM%wtq*vE{xcRnx>mo!}$mc16w4hDR{DM*ZXzHGd z-bl<~DbR3gU%L%X=x8mEsBXTzYe!v`)J%@A`Oh6;>L`m}eNf6in{KOXISK>~dCjel zzsoFr$pO%poA2upsX3Qg;7a3}Q6GGK`t^t&(t{yPsR_6nDcJIT6{F!1A$>F;7RZpn zL7?R6TDxVG`KerSK?Z)cxl6`dwNh49TqH+Fmra&&YzUXes%9O4&rQ6Ug7)rj=!1db zSq}O5Y=sJRbjiidL7C&sj5N)=4U!cxPDH-X+idWdXp-Qxp}Tl=Z4Ddl71*$vH?*(H z+`hS#^&RwF*+wA^92AMmv=tcKJQaMP7GZj?LL9Z6 zzPlb!lboR%e~t+Ii#8z9`WjsetYC&<-ZH}9F9J@So4T7^Y#?Dq=pX)uXb z`g?&uYIedE=D%i-VqW5}xJ{x8Nau>fxU>CNhnqa7pvTNFj)?VuV#1*rA})mZ=a-?b z*4?W0s4dtYgS?{}HedT`eA8B`)3w=$m1D#h3sW@e95Cb3do|b)cpPd4NVLary@$6((_ZOXH= zTKqaAD>ZP7URK1Ax(VX|D8OXFtd=uZ)Z!Z&a?Y8@UnRk4qWKGDZuYyx^#G!(&kVs} zdYYdfT_%1@VC&r>?vj}4$WMrTyrn+(y^DWq1yR|bt1K@YxI2@0{S05>>oeNsfn?aU z#pEaKF{xaaWCOO-f+_I~w^`~cOz-q$W>yVZ%Tw`49q{omW?NuH7#g!0Hr^^IN+StQ2dY!2A6y<=2Ty2B8 zG6n&&>&*MZskTqmspol&kVcpz`53>d$IpRL6Q+u}ZfcU-=1>x3e7TL0I(1nBJi~aA zy2fEY@08Uc6;FRv*LuHk?mnZkUM&C!)_P{>;zkor53;`m($Cu!Uel^VwXS%3_-9Tj z{2{xFKL@N${%AS4`TFgoAN~C9DdUVkKG(s;Y|tK}3Pqc6b2oJ4u19eO=03VkeKzaU zaKPx3>LKr$Z~KT5XCJ^0JMvid(Puc6l>oFB#>Pic1kkFOAHPkA|cBQ5AF0Rr@LZ! z`T~Ebk%Zb>%*>sltkb~CP#kdQ%xjFleOsgcuhBz(owi#|&9kI-GK`q7iSAdSuIB^e z8(`mXL%yt4*?|^HHhG(?CIhJ?f2RBe1pbExM)Q(GDXt!SpjMVbrnaLx4K{3`!PryK z5nCmd8$>%yA*k{A0^HASw)b<)jUDeZ1T-K(-2Y3wQP8Xq-n!nQex9h1P~J+R?|Ubo zef2eYW8GlOG~iE#HlEw@ea5T6!6W2$5W=Wr;J@CDlK)3eQjg4fjr&>}-i{xr*^$@AKbeNAVX&{1G2ik%mYdNrCTL;Vt#QqoKG$ z{l*{spI3y3uUJ5I`(L#4S!Lrt@`l{Vn+6fUzo=;v!qvZ?Fc4J;8RNgmj_cp@gUneT z{lByX;t7Zp{v{&Flm72yA?R2yV8DLMS4uM(yT0|T;9nwX#bMCjyqs@~Y>0qn^|V%9 zOE7~0>_$-O>)kp1OHgZwrCfPTaykYT-t_h;Fc6UDtF?q({$bOhuJQ_R{{KuGh5Zkv z(cdMAVJ&9`t=te(tv!qn$(K{fN{;rezQ4wKJ}pXxJbA@pP;XY&#$+%K`Fy4 zGXL8iVgAFq35*bb*Ym3RMZ*5Y^@lzB#NC?^-73`963jwC>BgurLx-MXCBn-bH?#bI zmPk9xKldi$?AaiWM?-M$HS)hE&bKkVlRwNE+As?B_Y7S;*hpFQI!pbJ+OE*uHsgJy zIIpS+8Rmz0Y!?Jm*Hy+xsmz!|EA8%d#eAWKgz<_qW>Qlm74POD;s$!aD9ab5g{JJTW_Ct^S2QT9ydVdT? zA1V7&-F@ow-kx1Gl)ZxcuO4of-F>G2vCTVPp64BWRF}AHur*0+*eUkefoXm4XW@Fb3J0RTXDsC85?! zj-JByWqZDcqNdOjJ6@5wkZx_GgSO2%YNL0H1Se{^$5WW$qmeB8D}724>;3g^BrRTM z2}r3HlEg$gUOAVZ>9Srx)E8r?)Q@W76WQtAE+QVsQf!j&Hx7ntAY8HgjKB8!bdQ%v zzvFB+jZj3puLIc?s)?iP>gCbWdjVXxc;PYUk>{8CMm0IE^qLF_xv(HAj@NZvVB!WN) zfT375RC{AcLd<=_w01EM1EyyVYbl~+Xt0p8P`aN9!moFTtt+Ilc5`*F-)Ko`i46UI z5fh%XA}#`LDQvf2j~Z*hAjUArxbN4tX_Z|0;@%otuDWw8NJ_6=OWiezktIe6g1#3o zWh}Y4oV$2db&9-c?;ZVsIfP@{Zs5hgd2$d5Sd5U>ic^1wOEedrCY*N46Ra2P*YX)t zLtMqM1Xv>TOAqMm={DAD(rKbvmX#9Vh{0kN&qavL8+P5oe3X-8CS3nzP~Ew29}h^H z#X0^3x1fN7ou_2_sFuLS<&f)`3Y9{|P+m5!B9$bIO_<+&b~V@L#)8)f_Hw+V@JqIl ztLNa!eTb=AwM~peIm-)GHVFZ6X#KB6+3CAq}19NPpW;-%n>BjLY=7Ro!G6weZO zEB+}yU9i#MF#_X6OZl%lJwHM}v~C=d{3jXRF##PzU@s% zW{nWXq(nzmY`^}AH-Q1?B~Gtg2}N7fyiJU?NOU>U zEo?z`OwIt>ZO$z%o&$d;WOq3_&Z1}ZaEyLyQ3~8Y#*IL1ZP!UsDUbfm@)o#H<~#EC zh$p%?{lM}t*0FahAzeyP#U1$;(9NQuF8Dohm_3E;4UW|R8tXM{TG)@!H;n9g@O`tO zLJ;4_)-%&rRG;cxp(hM;(bu>8(hkqwx0~NZjW6c4ery}-B8Ut2VKWuR5 z+e&Yl9Z$m@dkL0V`@;QgcgI{U7*ET*7ZDX4!8Rk$*91zh4=N(QK0N@-F+Oe&ZAv?o z!}B#G@CNRkTT5*k`|Z(D9V<#}Nu-b8*deXxRxbbxas*kreST{*G>>Sx(jm6mZeAV7 zd9PoI@AaNiLmJcJRQOW(XiYUP?~)o-wOlhf~tOw_2| z70tV0&}=4`OdH-`dqY*MDPJek$@3nl^!7?CB+|OG$Yq>K^zmcaorUeXv9o_xm^+FX zb05FlrysvLd_+v0u0tTRlRB=Kd3i!%3QWchh)S0%S7;{EZhgy$ZK;DL>WlwCjg1rm zIi#xOKNRzJsS<5YK>I5XUI0?Ny8P54glsHd9?gj^5R;QXvaj_|}#oXM1g;f1|nqvPRdiq(P}ZTG6zNc_ru9 zbsV8r?xOKX_F66S7rmam;1IN1<91_|j0tpqEWvJ5gwn*k?Z~%_aca>F?bmz^^e75U zq^R}n{6tp9n!$)+b_Gi< zgi)xf4X1xox@HbX3BU4at#tahuimtT^9qfo8qI1}wzm%?CA+jrj_pU%nvUdpO9RiX zQMZR!NyRBbO#9|VFS|5=6wg=Uh-551{RZlY(MR>`9*n1&vI%vsYSHAem#GJXk4T;d zF^7ovV8VXoz0Dp!yPVf>yy|A9t*!nq{V>`4pjn%d>sd}c$Q6KTAShRC!3ntq)tqT8 zNH>$ycaNWInJOyv)Jw&E2?f9?qkPWFjTk7&@g4DsO7hOoTy+P!0>8UEo*$5-YAs?r)&Dt#=oGqVE%xobYItNcJA|mQZ^1W6=4~Nv!VJds3 z!PyuXJy3HVS1yBA9ER--O_5GPBPxDTicP$L4ln-!*%UbzRu)a3?a2 z-4c1tD5YN{!rMC+k$B($&4mJCr`ooDHEL1`5mB?pDR9Xq{4uvYFL|U>$uO3h`8dCy z>j(%OB)9(YsEzRuNPqA91dlQ736jLS09A81Lp;JXn#!foUuSK0NmZ3-6wV%mA zrlNoqJF6R)7imtNr^A_rd@ptT_b4VIhDKYsj%r?HjR$?R-~XZ$4% zSvSRRMWex&tF;9u;BSC@rO0p<(Ob6Tr0jS`QaVvR&E`TWVW2@qNk$u-lwRqO6edv*s}3I4?l{0sV>`76ZnM4bE5F-MuQrX_tozZ@7GH?6x9;N2 zCY0GsXV&!qF<+KgTKO`b-|KHYJ|FB)E$y$1QK&XIhIuV^QK?B6N60ln>rQiu zJ9rO>`pknMm~G3x#Sc@(iYtd-?Pq>#OZ1@p4&cPa;tnVD6=(~J$|*W>wV0*U%6VE! z;^^Sj-vki!UI8DBMlP9lU3N8B2Ld+157KhUAS6^W(d|PJdzfWdE2#TbF9mY^n;tI_ zgizG4GKR*=hxYN<&cNmqh$!CYHL}Es&MZH{STXZ8kyg~V%Ul|-RZB}pZ}Lq*j)`Mv zkbO9TwpHlY)n%iixpmLjZLH#|3YZgjACFjA(2&1g$iE#*3}8ZAxc%+wlCiLV+|#jn zW08sc1f{XALAJrp0uJV(mwbAgQV|!g)#EmvDR@*&G6QGwKBV`7J;gLZ(>dYEozTXe zp=6qJugF{;jKDgUJd>9iMVD`hhYd2`1GCdgC1@^~Rj!$UFs33;=8^Hf(50A^(wi5_ zMJs+O5$l=r1L%oTIT7^EX7ALLwZjM7uQ^{HYuH=vGG688(!HoJw9wH5Z~zcc7D&61 z>n>+2UfFuVm1VoBpNw5TT~0!K58K8ptjWMkmtw8=X>oaSijICGyW27R0#~i_M3OJ0 zPs_qyQL%-ibTp8FPY%bxIJF!(CybH<@A4u z*W}+bs`;aDV$;j6+Yz)&aCjXHp1ByZbuOASoQXW3P2Rv`pkYQwpS5z~5o8j&-{uGR zB7hWoYV;`)iN{uA$D#km-f{W{4%K;jatntc&ku#$ZS^^30YfT@nPqfMe8YSlOrGqw zK0?1WOmJkEHzg!2kn7g-kh0B5N^p{BD?e7wMfG;Xm4jgM;ARB4&o=1Np_YB+1i3tF z(cV}f@%vRWcr8I4X4t6r@G3HIm%i>}&NX#;mF$)g^l|&#EVez0`MfRCU8sXS97r&P zE`YZGVtfDM9zL2C@^^=PDQt_cM=e~-dwf?zFYa!}QG;ahq_!N+6w%|KI!V-0Y4PUt zntJhjyW#41>_$rZOT%&+rQ}65dEcF8XDpaD7kjdFw>1ej9 zA9eZVecfLPtcZrp24_OWion()JnJRQZWWkvkHj|LAPi>P&@n1#{iX7XzLBjt}{~ z6AtXezfcXVbh=`q)HcoctbTo&oi(<7uY->v$?mulP6_Ft72K~QybfT>lTkO(R7_6U zNqTGQUoz5_!S4-ml;8Q%W!35AGO??OMo@n*K0Oue#K> z!aM?x$Z`59g3>_*b7@x3hZ7j3EV8R3id`V==rg$Vq`hnD;(Kp|i_ggQ1%E#t!Z<)A z<4nZ+&K|U0YLR5FBXT2YuH?ZXynQ(AEaPRq%!4lkDrSN$1-3na z>^68Nx&y-mU@#U9#2K#gNZAIPmG%~6OoX4P`=ORq5|3OihRi+(B>*D3Wo67u`^MaY z^%9LiFo#aPE=ioa1^EE_(uc&s751MfH$9mDA5-rXU0D~liGG!eZQIzfZQHhOt72=% zc2coz?%1r@wkt{Hr2ih>r|;Lr8gq^D&UZcJsohS8yD!q~qw@W7P4$2@CE$pJ9{NSC z2Spto-T2)k8cfRAFJum{u3fWZV;>1aS~Q0WCDfAFl6G?R*MB2~6VeoZ2B(@?ZMpL= zHMA&DCz9Q$$57J+lUrOl@rdGxu?H~@A+jiv7#_MBq^x94x3HdajKEguh^|bVmNs}o!>&?QT81aRVl~ubD=!Xii}*%h+BUytw|&{AT?n+YcHHR) zMPrsSOMe9u3v;Y*t?=8nn!a=Oxlcbk6Rmi7j<%LdBa=fbt5fIlh+aUi2HXgUVg5+` zuWNvWiEFn`4K(O@zvxiXGWci z7c$)D&DpP=otKZgc}=(yw8?>bxCQH8%LF#OAbk90?zOswinn&$jKc!bh2@O-H=0#O zMQ$R=@2apQCzLet-CAoe)fpd{NipD;aI^jo7E)csOg;TMUcrOzzFj+PARR)*dWLYa zifR7mkbiuFZnG_+xvG&PI-MCIYTRt*GXr-IzRP+pD;(d@P~|X77`jEYsgN+p^EE9-}c$@Pck0$>`KDIQO-xf zclxau2Ft>s1=~fR*TiDaZIjiuwI+;ky`&n6|CLX6RJ`g2;x?Tx@^%>xc|q)OJylUS zIh+sG&S4%$xFw)0y3Qrj};qx7=uSY7*ikNH-*sm~0<8x;wm-96EigYy1zTez9H(u-xn=T!O zub!mm7$Rj$`!9R8bm6PLM95Sd^mT@WXwl=kesciR)*sH^=Bf zZE9MS49Bc{h&A?{sN7F%hG(e~lC_+Q@HY_TE$7T(jsj?06ZY~B1B5nV33rSSe&z7V zYAKMF6=y8j>=EWFov~R#a$sNqw8MOI50ZJTHRlQBrOU_$*T`A;F8>hNNQ8J!;C5Ar z&i!sUz)Oh*)2C7TU!k>yIGCLd##Y(Tl7-fINkk4KcHVdiBs8$n#=jQWkhy>7IJK1R z$Y(J0SEW1SveO2I)Iv)iXHc-J=Y>|2E4mjX8O-d697p4X_*C6A*d5(Cc5ZBYHNH*7? zCdHLK$4kUSI5Q?6x4ELtNV+dY^1-2CpAZs1>ntkFX6mtM%9XdMPhQQcloe*2nKFr) zW(^#MY;;=Vw{reiLCmXVQWtK>LzaJsmqhCvqIhtZP!lTDw$gwB)h%rA6mC^4-l97+ zke}{l3s2a*Q}k?7ApR<-BB8f7Ew#NjMJuyW&TY|xQG)1 z*;khr(u8XKqNWZNx6p9HCHRy2)8jdt?PqpB8&fD;DND;16!8Mz7my_dkWqU)azbTVtJwX*9QT!s9Eq?m>x4)CjAX zS`iGrRb=9^RIIg4Ig3@XorQO4ru8>JsH01a*ky5MS@V$KX3BulkhTz=6~-HolDw3j zo`7dRVOhEiMHPX-wEOvs*u|)#Y>bb4?J;tVkIVS!E`nrHUDQMFWaZH(;sY0w4z2I} zTl>dCN+$R?osq-q-f7|K-8rmgHgUqz>T;6V2+BVF%*_qU3wGBzV&iwg^3FCN>-O7vX4V{zgKw5NqMN%z9Wlo z3Bk_4oq3aor@%MvKV1=Hi$C<@iDgLW{{62pUi1#YW$p2rx=08Z0nOIFEz2o;jplwf1er(bzJG;@g_GY$Ck~N;OKRi(JeO{e>oIhak^=Z%PAC@fqs1f@- z^^f0?f88y|^+GI-PN3;0mPEH*SVjm*@7tRXe8&>-;Xkn!G-jZ+I)e0lr?}2-uLj|v z$e;I7dC_!4ed!=z=lhSw82$@36zJGXXS5uaq2iQjpZ0CO{vr^3>kxwYp)o$RJg|Tc z7Bxk-KW_^;8#qKo)U(T(P}iO#5o>ytH8%O5(HxaW)-zG#-KVar1Q(+1@F6Fz{2xtQ~7M67nZQ8bp%vNcC2!`v=39 zcAZfx(mVv04s6pZ!0=0x@bezx3&dyvsD^FvmDJJ}arO{xY}HcxO4^DL6#rf)FO&@6 zl~xnH{?3E^Ue8o#B1l#rf|*!REW%vg5qXgUMiTz%#()C}@BqHM{OZ@`g#N zWonu*F(pE@Q4_0&Rd}P8`S+)Nfy0)DdbfV)^ZLRhe3$nOzBeB~UjprbnUKq65%zee z)NHWCBsuyEU;Cnjf%h3m`A^;}x{b8N%LJi2Wu0HY+5*!aRGZ?h;P0kg74q}%V+?a1 zE?uV3$FN1&j%|2$&;16L$4VMiO}-SaIP@+fqJ#Ou&E#+yGlzK^w2f~HrpLE9=<&eN z;Ej{3@cr9@v$f?MSq3j)suYBB-VRv?q$B_mSbU!!+?tEQ-QK^Bm#Mi?TG+^!Mm%E+ zH}w+k0mj^LnXxMO62Cm%(see<^gVue@>G8R!y(9s=c;pnI&**uGS$Vj4cq-SJEa<$ z;P2U!A!UvlGcfAJtWj!h%)dk=vGF{MjIA=VgjPAOi#CW~b)Dr%OUY+3R7@ZIlR{)* z(n&tEGprpB2vRGEmY^pfTc4@&e!@lrNHP(%;(P$nWEufbLyU_J7>&GU9(NF17Xplw zB%kUE13Aln11xSCAk4g~#b?<4afGJH^lqQ!le44HZBpYTd&m%@UG^I#-U%-{jEr7s z8m9T>tf%hDqX*0GfB@;kbFq7lE66;baaVr~b6(@calTzYp7qKPrPVdg_*`8}Vr6 z2a;<{+ZIcja)CKrS`VI2Lf*m^{i!6-$vqA`KBLZLMIm>%LWLQ^{Ng3TDf^nI)CJgW zlq*&An81w;5gcb!Wecm1E~LO9$I=tLaXQD%XVj)$sJsAu87Ebl3RcOBP{cP9C@_-2 zNVAZ_1tl33SMX%vdYFC}D_%%l^2g6mwZLTGYWg>kvukc)&q1SapZqcuXcPK;C(@WK zho=>5zEQ=5`^2Whgk2g2KjvUJ?#u1}X+i5_amZCptJu@#398u(v`@`eXew$_<#a}q zW}19Ww;uTNPgV7RrzFT#gK;YyYYHAAKfb5kH_W&E*rz=K1Sf$nGDFK?e%|q4sP5KR_CnOfP52LillivQvb}s3JdQvBYv@V92wC)_Fisgo2$=UnKxsinzumjIIEy52us(R=YPO1xnZBGoUix-r}7qVO;*5 zkGFIVl~!7m$fvzq3^>E)dQdLzl&DrXp64n{r@NVd#Oee^H5xYKu1VxQ!ELoXV@68$ zD&4}3z2<)@YGYpFd|nb?Po{)||AG?~sd8%MR*uMQ=m&ahq|n5H3}dwh&d-H0Samek zFG*RlqH$8bb>7e8rg{E%(Uz8~ovwI$-*~+|HH#R&sPoX{eH$OU36RaAbLwrEyr5Ju zDTM0LFkoh}yzcdt=a3RmtECDm?ssyDt08AReKqXVbF{WbKm10?ez{gNU!Jfl2 z`BAN9r!4t+eY8x9L*vL+pXtNW0am)fCqZH{Cs0BICg)>bz~}?$+ivSK6caF|f^_R~ zo-#H!KHd`Q^buVJ!tJ94zleY`L!xc2W;q(7%$3G4#4vk{9&j3XTL=1+B-RQODnwG9 z0FL>?wFfupjho|PJf17Se*{e#bCYKwYLx|RF{--rdH97?QqB!>>dQvEVK%g96sY@s z%=l+b^V3$LR54lNGD<<#z3w*=xG3-6?9>`xQv3DxW>M>x102#)@ZtKl%bHb-Rd~%j z`@}@?3c}OB>4JeX(06pR&&2W)WfLFnJ0^lQ@d7$YxY*XLmpKKsi$>u-h{_yMW~bZ5 z`r#x#Nqfr7ZS<&E(d+({#xRHRtX#mNX~+OHOiSu!a<~t%st70Wgj!;LH)D|6WggVi zOL8T+w||Eg@HxVr&;`R^6TfZkvK!2RKCh!P)g_lFu{rX!bt|x?clFfua zPP!C9A+^9=%+)i&lW@se!%mKaGg0u8#@zjaNjV!^x*>|1y?MH_LvYRGN23(YK7}?a z+DSAx+E%ra4Zib>B*T*w=>&yZ5(>p?HHhTQ^9oJ{RxKQ$Ql!=>!=B=>pDG8C{gNR{ z6FQ~L(jBZf0bl@+Nwo7&Fp;4f#)VdeD$$FW_k5%rn-6W;O&0L#bHb-8RJQHs{#2MhBr$>6{zqt$ySEF@d=uqUx(^2F2 zz4UUI3SkhVHJVMEp)zNoABG2MdOFKucFr+X(#wEu>hW~=pzH4NTB*PjThqRCkkIe~caUmKsNc$&Z%o}q8fy=+NAlIANT5Iu%AlC z+(>apTY{Aq8G;z6k=)ZNx#(FB$SvqeP(c)~kTz(kXb029va8jLCXJIWV;wvTR@0Vh z3peKVY7Nu07r~s5CVfuFV!FIPHHB)?Lme@Pt($RREzn@7U*%tUpiwnvMM7MMsl@kD zYJG(~`J*{6gq=|)-eIAv180Vhig;2j3+cQe7UAd_Y5zZ)`v0u#_nL7+i|oNbiSi2N zNpzb`v~cc+DWg1UGaw5d<;OkW!5gwg@ASNAI#gLdyHFXBG|9|)Vlo}f-rVTQhRZxT zpRaxlA(1~U&pHR6x7+>lwn;Rkt93Y|)a{*F5em3-Oxb9E*S9s#96QZL7e`Sgu0YBn z5fVEarXY)U+15A2Knu}G91Ay7tMWay!#l(L%^p-$Z}aJcfItA97ADXQf3az=y4F;O zr;7)iC(vZ)F-?$DcffWsjfgr7kU{CnkhaQ~-Mc^k$bDcYL8}TT^VP(8Gb=eBGz7^u zSr4z=^dwRLon_&`Es#^+QmU$^Rq@QE|Lvr}uQgg&12dQ5R}l7GAZ zMv-q=lc_{;?|Itn{v48H#b)frZ@k{M>%-6>pZu&GqatpMrAZ#b==FC5>*M%9w)Qw0 zg5z6zQLH~Wb7IwmS9U=Xa-HQ~nL$n%1-)jE?R6Y`c~24Y-8z4-o)7vAU9%?*hHTN# z@97L4i98yKbm}CPW`%v37TdD_(C#)1cuvWdBzo8%Nd#?WtnGlVHwzB{))%Z6ix`&0 ztPUwtB_T?n&lO3j zMwC&TjIG8f!PJ-}N%DAfRW^H5R>e z@uC%m_O}=UOM9l8IvEE+hdo=vdQt2l?uI<|yT<$JHhgh5@Ah(@rVa`ird_tVUass6 zy})3?e`w>O5Xdc=W{bW|rng}UTA4ffxxyGQ^t7ny_pfqxF-l_pYL}h%vbA3oC3#Zd zU@vjRcEBc26>huC*(nsZve|P|4wH3869*mf)&66JFaq>lIILEYTHO~Pb;x;*IwEnq zz{X~;s{GoLy}e<^$reV-VulJlp{W3xtx*@pYPN9uRgJ`_Xw{PH#&>kGoXHze{EPhc z7AKX6(>NBXDxG0Ntq*#Vlau{oAyl^Wd1KoUZIXj$Y2+iBg#+p-o$h-)5kYGwN|NpG zVdeB?-QLAeBzr56$Zy7rwx}1k=#n_m>**GzZya_ME%TbH!)F*+=@}D;B^Yb2(=T&d z|6yzVVX|%7!;q0}f4h;wpIz33L;h)f@=L5DgD*}CfLittTx7qiW@k4mg2m(05k&=l zHJ)X~CCr6r{UFfNL$=+ux?2K7i)7wA4X?^(o~<+8^aZIUe7B@PUUs39;>CjaY`Zdd;?ls4SpHSH4EAq@;Rxza~m?CP30%+IX$E~<|Xq@Q!L#){N*t#RIm6>NVQN4JYrmRf?Ws z)$AuR4_@Ot;04gy!p8nP3PI~U7Rx^|?O)k~D~XVv~!3Hn`#0n>7ZLEt>?27K_VPJ%;{uTybs=Bj;j3kI`g zF=^Fd(KN>l$Tnp(0zqj0JV?N_hXaS~wBApb{}3Vuq3n4{FFi|{!$e#a*!=%1^$sm= zmQo5|oP>Na3u9t&Fg386WrS80yDDX#Yi}_-mP(cSMi-xet6P1!{el{3J=IKfSEW3I zg_Ye>^zI(<9yHENMOmSy2LhVaJPhck2YRg@1rS?FOE-_TcSw7)WAKg3U*Bkd9Q=$4 zLFEb~&!N=oLG@#lpv3m`1qZRN@B8vbF?1*?}x{TR-~8%oK8{rWUyCFi-phyhp1> zAU>@^#Z)+CFFWbuW#2E`x^h$H=OO)?1^>Emv%Ky8|AH1ID<7AI0^KneKw@t zQmuUlC?O|V93CPa=>Nk&f`zH_T0A<^naj2NJJ`#Z^5<~@omsot$j5d1sfw<)7AsZ+ z%hWP`22Buo>{eMyEI9Xyayqwe$ain=nEQdu^86WuQk}2yP}Xo%^#gdnLa?<7P#kP zphEZ0Rb~^L6&WogHd(}q02Dxv57LBG+0GcccU&IFCey|hqg{8u2!OVM1mUqWib++j z3>vGkv$}e#lkk}GJOQCQT}B@Na3Rx45fv82ZA znGN_e&<~Jdi~SEj&*Oz@J|~9jkPAAD*q!L0F7x%M!EPbPTI$N zihor$1Z(%7mPPz4k__~j&d|)!%zE5L}Hw& zqNBusDaV(PCuoA5uC)Q#dR3&Y9xQ?2+c)TUzPDXH=ZZ7|rN$$PPjqv?+b+WYYU|_$ zGY8M4@3uI&0^3YVsV6pNf%4QSJ&r-ldQWcF6P+=Ix?-E?9Sm@Rg;BvS=vvDY-C7Zr zvLmBhS(&o?Df)|{H~Y@r)qWz0mMS?~3oI+#{^B(JH5Zp^jgL_dPoHy+2APG5k=&Xj z5wB^QGE-?I(sZ!C0f4niciR}s2PJwq<|U1r7Ks|O*5H&&s~|q_P)7|!H>ICvfiqur zqND#a{YzgkWG`#PYTCu#=#|i7TqMDg{1c~2B#92exYMdkbiM7CAMUet^+H#3p#tna z5}@6rXFfF)mt3RP=tO`f4-LN|9*G-JA-3F-VS}jA5wB@KSNXSjf?2`@XvL9cI8v4V z`{r_id+nrN+hk{aUNyuGg{7C!*plvh>oiYcRwQxV*>u_=6yg=Cx>UMg=48ciG?TA9LyJ>QD>X%*|PNit9AjNyqn$?(s6$WqwC#pcK+bA$&Gi6ufP%)_K zF7bBZU#@ahpQpH_kLm11W?U$bIsLKZxL}IUdSy>=sgbF`-^HQq)Eu;D`@Lk#Cd0#i zl29QGMgkAm7=YvR6C7;A2&1gWhtp5DVi7GWbQoC$OiH6lUkAI$!S4({_H~|`kRfAg z$I7Jv%E>x=*teoYe9URTNvZK6R(O0?Av&xjU6_a}WZ%q!mFT5v({!EYKkgnAN+=|I zZX{dZSii#tv}4 zwGhX!ZU0@oj9mR+44hu~-}(CHu1!{dD?~4&lO3|Ra3036N`nHr97}j{Y?TsIzfqgb zozd*Eq3D#O|e8Pd>hw3J%?MEZhlM~pKXw|^Rspd?gEV)toIs^|m=0tY0A zX0bu4CBT##)bEz~?Yh2^oi>5pxg83o5TaZ1+rFQxf7-inkK|(u?FW_N&Icq;bwJq) zwRbKLT+@9Y+WI?@`WQ$&>eL;0dy_2%m-f{d7)XWIzx%iN{uzu5)k+o7^!&@2yvSJJ z9BuUAZhznq{Ac(TNtD?UXk^P5asfmD4P%7K3wk#GZ_GGG!qvvN6*0 z&H+XA!B4b#b30HgTEK{M_G{7b6VmaSgb`BJfs#>sOY0Ft;buEt*|uM4$fVn`_DtwW z)H}H$=$qjA42D?4ZnE20IrDafc3KOn_Ih~B6YV93Dc3_1Re)lO$2{|3JeTL3$ma#;EBnoeuS zQSsW8_ipP)|J5o|ASaGiN5~$3Myf$XE+Ez#7JgTKjie98Amd7uik`jx^wE-1pwQB?qCCGyYKT5rZ-_MC&hkVzV3Lf}w;xW_h74~<*0WX@ zSF3&8A)7=yZPT~hnu5<*q9y`WC*FO$Wb5{3b49Ahvo*6(*K8s6I~11&=?81b2mDCPsj#MhVh^Qj8v% zRtShYJZ615RC9DCQ0n17iDp@t!jJ}jq#iRzyc1ksrB--RQVQPUN&Ud)XlIwRlr}mB zV8Tgk(G2FR3eu>y!iVZ5OR$a>Ls+Jx48fp_52T}}L1smj(b&oVY?S|6{z7i!mpw#B z$CFIl_1s_ABtz)PU6WjNOn50vr_(g_Oof>idGWmS(7*mmax@g_{S|&xFYDNQcz#Kt zd6C#@X~ONE6(4bP#uiV%IZBc%mn4Nyx`ACLI-Z>J#NA^)=5E-i2KhvW0Mbu>bZFO_ zKC5#%4vRbXbfTrdCg9}S=5#{vSN(db>B<)+R4yU8P-~n)u-hs*%-}liYx-2tGU9Nu zq4eC@D)peg+mH|LlpWE)W+@+oThJ>Y#}+A~ktXS@j%Qms#=Z!TQe`x~X{%WISy>xa zBW@-whEMfq5gevGxU*C2-7H#jsnX~!r#=F$^8AcfVm++3R=t9|-&*{g<`m@%w_=NS zbc5Rb%w+|Bscr|RJH+<0d4G>g5ZujnuxI7$=vdv-kMSTuiWe zE+HEq|7PyD&udnE1^4dK@5>BqgJca#?r{v2rg=H2RTQ=E46j}zChCl8_siA+VcDVx zeg}cBJqvXvc{bRtAl(gf&$fXc(~!nuV_QQBYglPn`F6-Dj`ducJ@)0hkmKHFcj0k*4bE`4!pYwITQf;h?eww7RY5GBjv(`@l!lRd;uK|k=slVtBj?86rz3&_Pu#!JY> zhyF6~QVT%`)dbOC%(Ix9Q|jqNucnO~OP8axZ*~;_Zo$YZRj#;5v4?IZdd(RLWcnY@ zafeKX23DOo_0MAmG{GJHWFQ>Hcyz3GjU&!g+97QfZIz3noYbO+p8-nZxm}46?XI)+r-W3xG9@bP2 z_2w1U;F7=FQ(mMPoRfU8JaxthoUszYXBs?fSF@=UOC0P*eiD6VK)vvG?|hTiB=!B- zGg1NXG*x>Uca^eSG|W0Vzfv3VB_AmvF4|)Wo7Z`Hj`!XjAL>;t@*yP5#n1Ct4C$fs zrsfYgas0W_QMCCi*%HpC(I2LS-!NnXgsQHUTkUex_S0q9_~KYxW7X5mdMDZRShdVb zwqP!*NG?*N*!+WgM4t(JL0i1;gdBxMxWDU#Ez=h9VO+?Ft-xO!6Zp?j-vn{&27jY-|fzcl`ABQk6|fFfsI@o#a^hVFZ+yhO`V-OQucFN8Q{2{ z2t0M&xZDVbyeR6qxfE6X;Echky7&>JEe4QbLyBK}A6M+~)H?2opQ*)4rs8E|S4tpi z=*^qU^@swk#$wt2EiAgHclTn^Ph@n|s*{M_kBzD%&BfKI5biisWz_caK|ooB(Mu4* zfVXfRG)OoaHb=oipTyAYNt{Z)Y9c>h+n5uFLyl>xU8GYt z4~@@PBewZip<#!wI;4i^MK80=aV3$O8=f#ggxOEq;v8MzvQJK?02}S--03J)SH(J#6oUki((!Xht5`Mit%$7hPN?ud%K zQ=KgejiA4A`nCzV^69PAwgBG7dB20);`LsFZ1Pa5f)}!OL!ecdjcxFAsH#bGc0HA* zs%+$yFZkr=%y(7FDDdAnif~hq<2rb}e5q>T!kdfle<@RmchER-k*g#_rD1OL&plwP zk=k)Saz11}#shktyBJ);;hK%mxB~2&M$mWWYzM2)*ommA;^=2t)TyCiLFITU00Pk5 zmw!CIaWIO}U(727k&Y}9cmn0>F--1707)E{nDHRb3{8pHw`N7$VDs5?i_okPZ!IWc zzH%?!-VYDZK}k-cz&dIxto3}Gg>asWYd+iG#MUpO3G-x;>2#%(P)hGiYLZNg@o)yR70_Y%(HKn|WRj4ML7Tnt^ZjYOklLiL67m-cF zF6VyxbTD5O&zE|@SkIa+4GI*)b80PPK?JC6;$4?cT5Syob4tYMGSX&wDZey3n zK!d=Er-^USh0BhEi>p$Z&Xy}HyztCOH^bxC&f4eSj#5~)P9X`j!R6RH8PiN1Hh^#y zItqfMUQh2XrHUL&$Y!*2I&oOR0(DTKDtrGN#|xs!;=Dc9P%Co=*#d?)Dw;*~qi@f@ z|1BE%-%#{Vp#x9Tk)1+x>B`75h=U4p7WR@-(}FpbahsY*W$21T5m6f>x^dFfM-gfM zrAYpO8;ie_Nv&mY-TgvM;64n$55e?RtJ86+^njzlqwoA#8WG}T;?knm@UZMa^dDAW z`e@*J5+i79VP-tD*jcwzO2h6U^%lr$xV{uYmazD$MrVN{-vIMXYRzto`Q(dKA$b*m zoYur{tIRHqX_ymqUMfC8jQe2YJd=C|Cx9ndjE9H}eeRLU-#0yU&i}9WP2gn`)ZY1k zwVizfqyNj^ep5Tn=wSDIIS$l^Q%2ftNAQmSyAK=i?!!v60loV>tbHS>e`8G(wgY8? ztpR1G@}C4U+Are|h{ka8L^5?5J#?`!2}(OAefXi_<$)!RJE=}Iqh)Xm!y+uaq#0KT zW*l!InndCWX6K%=i{5#j`1p#$79r%WNhZe;F#aYu?}(hmIFk~ag-V9Pxi;wnF-vuj zg`Uq8t)qk{$U^7T0Omp76FxipM@lHEFeZw1L)0o@;m5e%{bd{4{W2P#z6mDG_-*w1 zNX|8zWs=0H_}F)Ktt7-T4;2iCcv&G1WuCQCtGnKThl$R^z}c=QMHi;j)~E}hIkjjE zb%DHW3@)_L6avwJxBvc1m#Tj>%;c5B0W{RnIv*QZlvpAylOnu>J}tNiW4Glz<@<_qgf=te z52GiTY^(Q!8=9L#ftF*N9&Uuk*BzRJQ_D+2=Tqj-V0%F;2>@bZjay%FCs!Ke3g(s&!y!miMoXC6-hgWD6Wc9`(0?sG_C z#uqaHvtz|1F1NOS&ttzFukZe@LWW05YpQlKepd#%mXQv}S{{xEb3Kej=crRgr3e=~ z?jHk84`SyatU9Bpm+#=yH3|>t;6Uz9@mX!-(Lsq-4t<@SeDb_rmgSLihyuIGFrKG63$HdS1RgMN3_TZTx zPtw9Cm%uPRRwPpiV+GLS>+zEVtokAO1CH`d?8qPT>gEPLgz^mH{m_4Z%+2u^x-Xop zL^q;%vREOTr@<{!E}@`gJK)QM+VR$fD1 zOlIg7Q$;x?N^K#(@r5V4+l;cz4rPp;vwyx}K;FDU~ z2oJZlG4yhH8acJ; zCj~~bNlwDvi@YniXBzS{yhZ!*n=xfL`JkX`tA7;ZK6!-oH{AiEYF7(!UH#m`=2(DE z(=IHedT*iLFdIvbv&>PnD!{j2%eUbHZL^ax5Q|F#+-rq8w$6@yqs9fw%zZVgmvkGH zaE(vE-+|M9?M7ECWBz!u3C=67KDOi9K0@|8r5M=1naK4Yk>LiaoRgt{RuvuedKcT` z4%W}X+q&AT;(V*aH_I3u{~!LXEm}qCFdCq420^V~a0aGI?N&PGB94twt;yVDKKg}b z-7T=}6g!QrMjZu&NZ*RRE-GNc5U!MRd1~oM5)=xag55WtIAS_k|80am#J3Zv0~R(0 zCzi3OOM{vD=h34_`|9n6@B}(^&Ix~DFG|z_cE51g@R#F_5-R^f4g#XujkKC9tSkhcRcU+-m~3%2Z&0pFisT1+8*y(bqTK-%A-?t zPcpr$<-H!V*DhaZkl2xHH?xlQU*2_<6s9< zY|1o-Ifw#VtE>d;4s2LI@I(@^VAMA5G3LX}Fbl5NR{0T5XZlZ%;2YV2szV$vMC^-T zJ&WolbX8393UYcjSYt9fqDeMqHiU=k%v*wJo&8#RV%p!;TNzxUHY$7@SKCBbR4cP$Ln? znY5j&^u1i&dvl9|?UFljpOJJgx{DrHW0spJ86Z?B73HnB=7ETh%s8iD?OCE zL`4!8G0WKWjORF8U7<7RTHSdImP4tlCrNCC`btqBh9c<_@P<%yk>P$M_sz|D43BV_+jS{$()eOrV2c zwWQ~ER3wR+)$05e&=G$%8}%RCzrg41;z=NRPwNJyo?*?mV!!kU8!i%f2bcH9EWT4e zjAY#puo{`tNcfss4Tfp1+c6dWWt}D4?H&y?GYhklu8wT)_VtDv{GfgPn`Wf0txL`p zY{Fh6*y@H-ae~s{1tUZLf47tAmYJw3QJ4$Xv*w<+6OZUHohPi+kxvA_NoMTiWCjZW z1B2GlMm3%MW_>@M!?>nx_yuKygG>FYNgH9)-$yLD>)G}k2Ukvoiob}w^GJd_lhK8t zqzU72qQ(>|GWZn#eiiLNZS?Y4@b?<|{S9B`bXAbBwdf$$M65kQg(URoAXQ{(X10Pi zd>Ok3gOwSWRNg^uvird?q$TPg>%6cEYv`}JMDqYD@ldLy7Egy7sA39dn;cIg13tx$ z*OuO|a+cJp<-ByZal1#uxiv(PFKfari7y|1g;cv=w^hQr>4}X9Ab1Rf# zz|TBrJ_}C8=pP%mckM6IPBKyjfcV!z7|yu6U~n->9(ga z=ohpa@%Pt|>rk&*+xx=u#~o)at6L5`MrfsLEY@cL)(*^l)AR#6at6qZDz&w!0dKTN zO;LNxs4Nt>e&Qny%`GG?!QwxjJWW=K$i04~1h1~NMtA@oNJ;b${WvDx{A@DN>oI{o zTjrj~w@8u#%JOKCyHPBYwyXrN&bOY8CcVZ(2@#$g;froF)Ooiy9uv!d)1uKD4A;wu z+)g3vMF@21bG2!R@%ljMsY(I?)=|Kej{cjjJFrkbJqpYbLsDsWu{I{YU6Q3{IeAC^7|}j z{fOH!pipY2R=X5Jnw@m>X7SJ20yG~fUU3u7o6UroIku&M0!6_*Oe8I$PFbJx>H|4d z9%wiwlE;ja(I1Z}@FxH>zfQE(JNZ8z+nr@NBjx0+P7KynejHD((TNfl>+ zwQ{{qSxHbx3lNGMT-R*PO+1O}P{gz)rkEmL8@RdYls-IV7pJEAdlk$F0jmZaH3%KG z0$oL>aa}rgdhQVP|%BGBD-+XuaqOEAF4%tn0kxqs2RHy(9#%iTFavm3^ z!lTA9?HN^IS;AYc|8KSD%MkkmB3gz&e3bh7%zgX^k3IwATFGHQ|GKBsg9e=0Ysqf) zCO^)@-vN}VU{C&Cu;Sd?BW=~n7|?O(U=$N2EhS7PBc7hTX&BCwvox>|w|rQ?Xz4&H zB2e<(_2Qh7*h>T=iUBRsLEqDF4B_;@K*w(f^<2#B0BpyOk3L|Hv~VnqTzZOU{1*17 z42ilm77YY3h6qh_l@oqc=)%p84m_qUt6g4G&Ng#b-|_i<_~b=SVTJ03tGE*AM4A2OFk23DWgf={g$f_i(_!FE7KAQ?2FRDxT zYy-?y+O6<(t#kQlp!`c1xmhnu7m@KiEelTdZp$y~ZqNC#}M zl*rsl>m&0`#c_UItwS4pzZ-;<{LGclSoS*37xM>(p@QOX9u!49^Dqr@$xxnto$usZ z*N*O^hjH6~2mjLjIQ*vp`YE;k0m;RG2#MMZZ;`HEtbOD9i0FnqH|6&%4NY3YV@P6; z6S0nDvQo_dX^eov4QPUCV0i6^n+UYZ*YflGH+b7~`&nrCJL}E=Q#dvD{Da)?6o7PR zsB=qO(^KMh+d@7EX>Yo3Ck`x|Qi+74rmla`T}jV(4Pf3EtV7rK@82CKv&rmNjwS|| zEXQs#Aq2sv)5MhYYl&O`ZX+Exv4{yi(n-WZ)B#u9`W0TKRBKWQ-x*0_kZ4XLeMj8R zuV#sH7Y&G#jwNZ~13=0qBC2?`i|oN+T{YjGgYbL4QUc%GKuda&W6XD1W*wwx45L-5 zXKvW|kGEyaFv;X{y5}&dnV7x3?45vLfMnUpXo%?pDM< z5A!mnuA(dn+S@bINCdD4ri9g%_v#bp9wR4?d`UM5V@-qYuaR}x}B17%>vU?TyIo)K}q)Bt^nZc53T{d=(8-hvb820p>P^ zmR|DflFBgNx)fhTQ^^&MQhd}V3Ha^r`(3B7lZBrQkkM5xq-l;QhHz*l?tKj&*@|4X zNa*MT-31jWyDJ&SMKyjN)q%yl%G|yhaB63Z3Y16Q;LM^K#M`P>skdee5cCY@V@(0U z_S%0l4d40kQAvhnbyp&dOgQa?A)GyzS)*Ig%%3gRfRB$Kepl9(kNrnyJe_l=}i zRSzwY+2t94LZ)DR**hc6iqgSlM1vk_C4U7|+kQa;iN~tH!bXhrbTu?GB+OW2K4@*I zDi0?3?;yD;z$p3Q8;~zS`T&`(M-U_6rL>j3%Q7o^vSpPKA_kJNF{KBoD4kM+pW!b|qCq?dwnalKx#PZ%9j;mwhB_j1z`~y6P%-4f;&qO?@k|w2MNW>c0p1(ewr5C3r7p zr`Me}blt+?CQJ>Q_1rzpbR5HKaMe4|Ox2|KJ5LC_c-PIs8fJIG$TxFOYRV)1c4{PU%edy#rc}S@I3scUr+$y|S4+5i5|i}FW^F_cJ)?7}sD%qTuoA6- z!2~_H%WTRrS@Xl@1A^)Su528h0)xKoJFQ=Us3rgIw@1Im8rfW+r3qHbvR<9Te{e6X zTEjd4I+4vwHFwi6!*uDb)F%4!8TPK|ySszWDyKgGQtAxQGSr}8*^c!-JPC4&kc1{2 zNU&XyY=u+ml*=uUytHBY-L2DY)=KV;WMngDcE7PR&VCDVIfQPg#R8G2?j!#o;D%7{ zd4=WFYHjN|J&l~?psSvRI7p?ygb?XMW`hpN z3D--`U>_$ZqeG{G?Q?QU6{UDo?tz@FfgHGkWj1JN?>#&|!piwhzbO^P&QIF*pTMe< z{5YRve{f2$;Oa-zdoD%{=#iw)P-2HUJB%LLFm%~o-)}0A=VkhBHgWhwgXL(M69>-x zUB5hDfYG?+1cR@xbHVPb_Riak*Y@~Gu{w!U%toRJ9-WH#6`~UA$mUZaC(EH@DBPa@@xamU`K&Vdr)pEQ8}Nw zat<1wwzWCx-GUPfDV54F9hJIS^*Q2ifixTLUaSe`>WN;!Ut5JVc^=h#?fSv$x{iQ{ z@bnkBT9ybds{_vW<|>j;Ayn&ukN3`MU14<}@V(G*Us3siK`n69Bgt1~F?EI{7ULeN zsARCS5RD%aZ(OvcgyHZ!0_ltJo{dJ_T}PLXDL#vmYK2J%+WCGH%h{_R_d2&DMFYlb zl$%?H2Q++z6AeGt0Hc6wt$2ryj%2-U8eEJ7YA0pC8WiY4At-1t8-^v^z<+xmP{xIu zL21C+bh#VKYFDOZN(J_kBPl=gq>_@R&~g4LO$ZGh@GD$n_Wngw`hTG>rn*LPB#f~4 z-Tie&)H(l03!oF^q*sD!_5X4Am2Gh@OS>y1Xpj)xA-KC+aCdiicXxMp3-0dj?hqt6 z3=9r~JDgd2@AqB%I)C6?*Xa+>%!he;x~r@2uCA`GJ7224W>2BkLb=7%$7z2zzwn}C>w)l)NRO&R-a0p zatc!p3GR{Njd$NZJ3~rTParqGfsN|)0-gqjY!@ro%vV4)DeRx%sKj3i@6=XUZM|h| zd-MlW*!&gZD&H{G#k8!5>AOg+o6L>G2$clfw-8lDMS5~|N_Y5c)A!inwl{q>9XmY!Gs?POu7X!W8u? z@sInsd7ELqGXWbzvW27V4)XGwV^ZK8hwM*vyG3hgW!?y(Pzj8pN=P@@p^W}|6PPA> zh=jBUQF;BS1x@xzgpdSO;O;_iC)&1ktaJQ?h}2xs{#E9@y#zD-5sQ?AdS)<gX001K+xDK!0KR)hgCNaPsorhVxf;a>l7J={tFfCfs`jvV zkPEwe*?u9R7QOl=QifdGlu1g#+Ar0ZqhRV=Z3eF+{`)L6tv#JMlx=g+N;kTdl)~zM z@j_eOs)ZbDU67uVY97dc|uTSUzjph2rXm2&9rN!l5 ziIMRvm5H7*J74?Z1W1){wGg=ziVFN`$fe+BzP4#=L{mKsQj*Fec`PqzR#D#cEtiv~ zKqcXnNDbaiG1R~$$}3s5^Q^0$z5HStbfjqO3iwf9tkv}n2HS_Np8I%?Gp}i38l`%tCf1cyN+3B7zTzBObZ5~?1oTsZ^8laH@2hOH3pYNQa+wlVkB&)Q}LO* zJ9h6TxkFFGn|o*9qsTd?sYa{)?cJj8J@eMqoqkV84p+`{Y8AVy(Kc*;b90dOyn{4j zr`C=8;>ta=ADXKdwDxJtr`>3Ez zz1H@wLhD3QOUa3ZJU-Lfk1`~z-*Oy?U;glzbU|2UQq)QjpE$0T*R5ZJ@B|p+#t(&5 z7m1>zlQ?8f4Ela4_KcNZx=U%%n0m-~UNP5zogGfG?c6RF2Wd1lGmqE%foWgIbJoS0 z@s%oDk3=&y;hrux97*JTf4aCuEqmvUBhvI9x-Zlgo|LtwE>e)1hy}&IMKa#ib+EW;o}oN@sNW4&NOk zVBCb~2aY5KYey<3t7224eGd=^?myIG<#u@M%9KF`nu*!C;#XAQ`c_3vpj$(kj$1lT z50~mWHlkL&$d$pnx-Z-uhe2gYN07T|Z_l_p=`d7tvpiJ(*8G%9JpW1=nD!|tzQ!mu zD)_ggbF%^*(S*pce;L-ok^9jLXJxBhy0F(qGp{x!Ocd7SReDqs~o29G*k&e-Ti%$3^~kNgx@5xN58b+P8CgrE+dJO*LoE`bF;YT}(E&SNUcUjtC{mFy0lU~Zqm_JequiO%z`~PI}=CZGeOOf+Sv~FC+8}?tLAYE;}XinZ7=1k?W zU`le80irUblL}PH}@j0 zx5VK^aucgi_FN=0B^>4Q4<3|HU<2lm*Q~y~KPVedS%dST?xV}KJ9k6*8qXgmFWu(e zEW+B+e@FjvB$`BcdnHa0YDOc=fB4D+T;dUC^E^90XTWqm3Q3$ZUR92VnRw+Vl8>dL z)qlE3r}U3rG9EcPi~Xbg(<$m#vq&>GGAAXIy|^HBdtEw5$=GMd%5ND&5;P-l*?j`g z*jGz*VQx=muqEQ-)qUa^#kk(kw|Ok??Emptf?@gbRB1%VpOQwyRNW*{M6mtcl*YqW zd@-{Q$S2WACX3IQ(<|nlvG~i92yd@K+5}s#L#**mXs@J=!B*y2^-Q zGAL>V>jSW0n1#qKCz#8YZz-7|WAF{K>ieLxiMkhhZ(R|TA~6Hf42LEx50SiwK>ZA+ zER(NG#?6h?;M;NQ7l6*Z)N>9xl ziLqu1n|HtUZ0A9}oZdyWTLhf<7MM?x;<)8k$w9;{QHJB^p6YZbaQ!?w3%4g5!^A!5 zJwRiJSiyyB9OpN-ux1_n6(o;&@7q?}nY=u^cfzqK#kyx3RCFdKRgb&VU#R!CP-;l+ zI{d~ekMe7;Y639dum${2)hzBZt!1#3&vVxy5-8=`*Ie_PqicOlr-8$IoFSjSO zEgQi_dc>O*bJ!zC^TE4!kg#-HTtmx73zbrIda<0ndf!K(ASNu~%73cZ2S)3b&e(1r z8gury_nL5Z>>r%6weXQUg~5Z5A_H-dp^EGoTR7qk$Uv1PtCPI-D2 z&%VNFmJwxscYKRf#(u)`S;k!dNozZ7LIg}w7rcpo_e2+SksazbI{gNjw42`ANLOhP zRiXK-#7w%>NtQg+MS1Y_VqtBqcrpCw+5h=H(RWu_VIDnYjWtM9covUYTRUL`O*_xZ z7{7t-uzIe+5I?LGY9F&?d>QV+}HGo@uRMHE`E5;yEccFGxmw#mO{@ z^i&XV6hC(55InMiZ@^lx0C#m^I0cG4pxF1-9L9*xU~HUEt~@0Lw0*k67ATZBdI^>l zR6u~Opu?9MqB3H%vFF791}DQRB@sp}y7CY`YL)sM6bLmvR*Gj1@z$Kw-t0dX^{wCn}WlDP3U`VODg8hl=PToXSdZrrbE0 zk?kdLW2;u9$|tfex$GxE^+&dA3W#qwVzdY)ZoW(gm8xRlGczf^4s#FR!K-tBWr9>| zcg)s=h1wTUspP-Ju*XmumJ;tAhpFa=dInY?#IDZI1ilDDQ?!a6z5M2x{GsFI#=O^p z&1h-mO>^Nn<;}ATw4FX5OKcVj)Qn+p23bpTftRlL%&boTN|QKO@`Cw_xtdOQvV8T@ zsva)u46(xy{M=-7%AN|IMw2qkmhcD+sgc&N=px};mB@Em!TCb|Bc61!NBPA9CDaY8 zMrhne9A+tSaK#`YX#U60nCjxH*GZ*Fp2DAo$x)6hxx|tJpg5fN|F(~(8AjsGLOJL# z>35eKt4QM0eyn=tI5}k4JEmM8j9s%WI)9!n>;aY)mwE0GS+KBFdx{avKD$%L3Pc@PrPFkV=N&NXl zbHkcs99pWqrW-VPLPw7s_f+;4om2PFYC91J{izM{2dLfxi(~dj=w2M`U(ZrM;srwV zKX^SY`d@%f%h%DZh2Wzq(kQF`d&{d0_bGyY&ueb+3kGRqxD6bTM%|{r!Si^~OG@BaSAQEyPe=r8AF*QCRMU`@y&v{=Xzo=g8+7CP*u&x}ZkB`rK zfXTUCraVp5pFd}=hdIY?AHE^w+Rv3Q+(P;S821qgd4<^euer z-pDo2XX@n=&uO(FZ}ypr@_YY+A`Ci-JzdIvy+=D#3QNc+jLgRR ztm*Z+C6E8`NvyMrqiPXGN3c1>_hXx*j`sbJw}d^@o*|&?h$#a6?%Nd+4npeS zX7tWcElj^D3Gb1;gK)hKjxJXLbXbkd#-aePHW4ekxbE0lLMo@_cM|EyY4WL1_UxVx zvuuwq4T9^{JCXsVyxBm1OWiv5Zlm_}zFUOS50A-aBL`v07TI(&8{@{=dEc z6gYZiC{f{z!LDWlf+~SPrv9s?Aww*K4rSW%R-UohAfnRF5Z;RV)I^Fd8=JgfCVQ z*h>1UR`yTRwNqQ7*1kq4{~7h0iYxu+pdZKg^H&e;x;8y#Et6M0qw!jy0P3HZQdjZ^ zDOCs>HFGx8&#Ph;`IpB~VQNeyYhQG%c>-SJ~j7;u1&wi=Wd3KUZc?Mv?LpNZRVcYSU2B zr_UIiwEteihxVCHYAHH@|LJ-c+6(({*T!F(mL0xP;?NW%g5q#iZ82d}jgXVp!y(Fg zA#A{wUwgr~1Bh~`V%`hBadKSrFfO3~fOba{>r`s-PIL4J99xJVq2RATs@?aO+t=_1 zrwHv@&V2FQU0F6kA-LxLwvaa3*9fR+c7TRf=KkD~FE`29W?Thejq0Rq z>X4^qaH)2;t9TY?Q@_B8&VT-Sj)k*2^O$|Jw?2 zY-MCgCNE9>2maAt8oUJsnLO-7n?Jw1e}s&SWyhp$M8j8V6T75Hc1pQ{kmK~aX8m5q zjre>Q3Xi2Wc;0u1U6Bd#@}5(N-wxr)l}Vo9UakXMTZJxCLpLOg)W~sfk1u@3pfC9W zrFm!ezpLj!kiJbZdbRcx#E~4s63gi;4_(-F?f3#U-k?sfSNyE|;nU^YZ7I)st&{dc zSp}Cxy-cIac%ki>GsnIuFF2=XTlcYIcQ&Jzaq$l1Du4Nx6M6{?>ae-sB`{=vhNlkA z%W-nzJK$*^5)FujIpNLZ7Q01^U0t)eE;v=XLY>KbcCP#VRRM4S@7y0Sx;i=mOOt4gzvabaBDo%`eGoG{aS56^w_?kz0#varJC#lP z3^aG5(DHgab{6SNvyDTC;6OO;&3r+Fd~tdFK>f;L*TMYVa&?W#thPxr}P3H2%{vF8L`$wQ;M<|SoCcokCQbL4mu}^~D|_QV7r9@OpO(=#w$NI#LdjxyicN!N_Y!6{4;hFOLVeJdZ z-U>f?iN*cdwsn-hDf2|n2oo0Q`*rU4lT?agBgR-)fL&Ub5NDh zsJ_9}lPvz^Vxs?~@ma+(hn5dSEL<8@NchSHiIX+M*yvO;nLXJR3)hLu{^K4yQ#`PV zPw)u_yB)#P6Get5Ful*J7AZRgoGAdiIisr)%oJ_Qwq&jqTi*SdWH!klMO-?hgiu?>!dPVhGfr}H+>Xp0)63qUFc2v*ts(G_l4DrwajszalGj@yW?vv5zexycn2 z8kLNV#R9Yb0*{Yj+z{!;U|`=$(kKrT0y=%XqisP%GV1Ic4k= zy>h4%d($6sPiY*>|Hj9G-29H;J!pjZunu3rm!C>OvVd@9`B{@Y70?yw@^TilvN3D8 z2T|p0Ph~T(jMy0l&GV;%xvWhXaaPhH%#^0B+rInT`dYu`Np&VT4A335&l&#x}3UltH(y|H_qbC6+1ZH@5sL$xWk+^(YCz0#Ax;Gd)Y_| z0wX<@*SUH8ejnEDE;#oMhpgexZ}oy$c=tcbs?}Ez%Xv<(stPT9jSiGyU?mH>;0&yA ztov^Y0@t~LzPU`gSPO#0&i;Dm@|Oe+;FLoSy@JGZ2S2Q3GkA^QN0MEh6&2~oq$0jy zAFRNvd%@%PLyLrryvL7DMjx*wYDL)p2wbL%dH)ng@UUw0_?tJRkVdiNmzA<7^jTM$ zVF7IcfKslMu|7bs4W?Ff6?cx`bEw}pD5pf~7n&xNdasbvDQDaVr`RR@v9zNp3-T^q z_cjVrOfaeWty zBrwnVqP_7;;N=LG?PTH1PN9hxO5n*yFE1#}UX!JRQ*8FfNqN+E>(81p&O?U)|xJgQcuW#Rog?(;XK{Eogy?+6oF^ zd)=hBm!HNm8nZ%-=lxYXWowIJtdP0IAY)ewI%hJ5LI>cA9PM9WS{DdetO~rNHmVyd zoFg=e?&fyN1FHcLo8go#o=1@d$1p|i7{%JM6HI{oLBwgFL4jsD4mphj2x^~Xs9j4@T;&7ih zIFM%ROiL$iO9gqIuY|*4*PM~?Z+Kw;j3ZF{W}S;`G~*~^pFNy3@dyz1UTM3Jp(gF3 zp9dV&V-y5x{*k4fi00AKL^`b#0mH+fGzuxh7+QPHuCR^g8(GdzrTNRDH3%!G1{xDS z8TF~JMFiheEQ8TL^5M?K9=IC*4vqZ}96u2LB~g6g2r3$o%i?TCwe{^o7yTrn|G7-~MwK;nQA}Ph_O?qGt`lxDR z!I74$;Ca%9jq*<0Qj}2!Oq%k6j0_ty!7;Xa-)PZfYGc1gx=rTiI&17p?U4u&!m5D7 zE&vvQaKAI}zQ$+b6I)19-zKR}KO7npR<-N+l>_db&f=rH!a{9a*8%itpS)jAL#m0_ z3fW2PVKGdK#6>{>=tqT+J~tS=r=@{vd+!Y(Vocd|;|0n+pEx)qw5m>BID-`M*ybyY z`5fF6RJs+xdjL_Zg*E4)ozLVxAdro6m^zTksk7!{?-MfpHPR&znQo+f4~p-+BIx_~ z{2$qwLWd4H6ZcApd(n9%!z)n<3j$nxx|rWYi2^D?zdM65LYyH2FBsoW&{Kla%^XBi z*Tbv`1uUBm0g%W7JIGJihr1`R#|5u+3Z)YQO>&1jfQGmB{JH~H^feKFT>gs3N4jk- zQsPZG-9yNn3AzvO^qYM>cvZDI-4Mwmg*fMo>aaem%lYRz0T7AFq$ny^k%m(_$M@+6 zGz*v_J)zdvV?NG(0yok)%fz3O0aO1I3-DW=1}-_u^)pp-_!#}H9DoQrt_)I#@2K?5 zH_=Eryn>}X7@zf$9fv3-O6gSLs*=YFzy3i$Iy0-`dK~T>T(M3QM(PUFEsXIWf~sar zBgPSZn?HWR-%LVN<6(&6Wy6)$OU_$hJmuV9KkD7`)<6|h`UAytf?kpHS{{F#vRCko+Qf&ZbA z>hkht3UY1&GS=Qzo>yvP6^)2V?_*Ur41c7=VmQk^sC)LF9f^Ttv(dV@UG(KOP--&K zL_i@YW?iBKi z!cSc2;JnspkG1>YHZLgpO_EJ2jYak)31I9hin*r>EkL==~BKp zGt$EM=wVMui=-v|vo?1NBy#aT$NMa^DX7cE_lDlvg;?Cey3-je9oE@8ipr@4y4`Nj zL4Xd-mC@pXX42)WvM$Wo2Mghy5?J4+5mjBILCmB!xxo&VRg+UoR#9J|{vhjG8k+?t zqMB7FmF|H44RVVer@LdHF){;D-yM(lipJd#%O?XPJa+T%)rZ!)bd;0)=~aIiIcns- ziGC3fM{WBuqF=Z3MK}kqt4_wbXU7n-`0A|?dCEA2T?Fbe2p+-AxOM;wuER^S|h(U8=GXNfhQzEOcbs9vU-?=XWpnyvq-C3d-wj z1%5M@rgT|pr$UiJG!(4E_32*ne=%GQROQOHbP%a+x&E1<<*y|28@cQp>3_i8nZW+My{ja zB%u?3lOp2&fVW*QZME?5vCvqXO>M6bz0nDq({gFj2JT>rqok3z!UV!3J-=FR?oLm{ z2P2-3^-JNdB}|H*gF380pM}_+?X^LP@wt~bNutwMY5E$qTpnwsr?!7$j6=G?4)A6tcvM;nYmpL+#I6tJWg1AGUGwcj>zkF@=D7yMx=JQ1 zY4v^H5qQ!>u=dNDt6Qk}1b%E19iWH!^8^&tP?l$_%Dee-W+K zMVLQ;;**GvZ|Xfh&2#+B!`}B5ha|_Y10H&mHN7Y(&sn_ZxFZI7gcxKP_(ikZuEU>S zTiO50)7>HtbJcU?^4Z}lzb*33!R5mw{s@mp@?>JvNe5zk_%2&TB^YO? zhHtpBXJmg{-)UHIm^=KVN%Hv{?szn&oH{h=w(M zO|IQw^PStFX1`<(`J&e$R293@K{r;4QUX0X1*6{7BEBZ$s!1e65KT)#2054zq_np5 zL?jCe`fJu;Nur;zEg%esB-ner~Ia zm~8rhQ*%qbj=bkiWHFR>$v$(EuIY0rEIeI%RjqWe>Mh-q&6M6>77m+~EJ|Op38VTl z(`wiAHzV+v)4hl!nlPQf{xzO<=%87SC9gei8pAu;!;i`pu>_(>cT+FTX*2L~w_){Tr)^EI$&2G`8to+1Y0iV_H+?AYN7d!#|x5x5f^qmPmY6De<#WTRn zfWA{#L2^$=&%9e7#zTO@>;gC#9U;t)$E}J1sr{FkM?Qg0DK4!!ZzfU=d<8R`=k{Ud zZy)paW%L3%iqUZyV5&@CuQKEpGVQbxM z+}~1T(ECxyqam~5;j)=^OK|35z&}Ek|EBmJ`&!&5wGRo3mZG*!d}OR~xQdaZzhF|2 zT@7D$H#?W1Yn|ynGwoTk{0iVayEkb3d}^bPTDKg$3y;pZe^9Ydsd`PyZ!Gt@_9=XP z@bt{@)r+sIW%BYensW+Xg_s0RNHanT6*Jjw{2?bO=oGT88s9>7E`lt&lFy~3f47Q+ z2~lUZDm3cAGv>wDHmh9q* z>3@j@tT@$oq?ScN9-M_ASdZ1gWjxxZm zM0y>h^cm8G&uoo=VT0e4Lx*8-l#NYdncmU!6!p@2INPQ}Pnw1L+6UOSQ}RV3Yx*22 zWty`EGVUTq+*PqfcGSHlYF7kWOfJmcStzmDWtl$D$djRvpG&`C!0%; z312L^+J()nZ*uMoSkm7it1sB5(VnogV2A^bP1 zH+>SA5(r~V-~1Utk$%@%lQ-vVtKW&`WpIUQQdJ1!K67=EH+zyB+%x-7r#4q4ufuVw z$BI*xpCx@#zI9X+hG_10dAN+(sZ-WTT$@Cj?lH_-r57pt7C$O=PPcgLbU%&=*p0G1}VnKQt~4Hg0ITpmF0aoZQ^Tn&l*cr25`qY%+R- zST~z>ytqYAn_>TBT?GUwUR?83aUmHua=wkCh$caNScmOTQF3jD|8|0HI{z0pwy-Yc ze}>E6!5%LNvA5SBG7=6^)EkiQZPVC3+GAe{{Zgt|}P1h4n*P3YDzv2rRCEcy~2_WB&g|T9!F}65ljy!AB=>FMLKACsM zo8~9p_KO`fBZmZt1y|FqvF%%@%eUfWUqA)tdPt}!%B>u=rPm3}tr^;k9vJ4tSIPa! zRimNmyRr`oE@~WJ8-Xfa&bl>>8`GJ7EgyXJP*>T8v1zTue691%sTMOlc@_73Xd z|4-)_J@@E-kW1+Z`9ZMLN~5p78?)xAk%y>4- z*;GGTX!0x6h>?93*-my-Ua?|N0i-zb)^&|g-1MCSdJ}&^@gL90ay5CVtW#d^agGee zthFHv_l->2&MCzj^?h~djwi8BaMpE=+Hbu0%Xecaj&-HtU;OQ362JcbUt(x&3~x8q zGyOZfnQ{}_sT=Us_Zw1Ncr_4?kih z10{Uj*7pfzsy{a{7TAn+Tx{jQ3u7eARb zRJ^(^_x|#nUXt7QYaOKjH%|U*8Tp$3ce@Byssd~7&{+iS_39bM9SE=Mtq_jb2tj(T zQU6=1@IL)7A_S5Brz;+g=1?C1ysUi(V`s@lGwxiHtj z&9i|eC3_3#mKD{H8aVoYq=+K+FDZ_E+79|1Af+`YQMU+514ki6p|=npn%fE2nrP4J zrdA7^|8O$RNcL|y`unLD1woJO;frggqJZWeZ#(cbowoVof2&1)!e^rI{PfSy!QZw+ z!EgLm;hYECdjHc4@aI?r7_I&hjIgjQ3s^M&RU8Zc!*c(hN)k|Hz-|Apl5Gwa#ebE3 z;lG*Z|ErK^{{PSfkH00WH>@e8kZk-X(Icb#4ESF{==y$L@T-Oz_raufe+jI{x*E%x z*9H1LV=$XLChTJWh-W{gP8+D~5^_j&^&AaB?@`Q&$PpXl!17%3Pi-(&xmiIs4Ov%` z3Ol5=G@}k6-aHPth;GpTRnxcMd>4WtKm5Q`va11iwEuy(fP&{nMSlF#67bag|8u1NAL#3)3HwjsA{tk&&8epwO77kxOHFF-seF$oew5)O+VNSypG9Vxl1McH12AIp4wj z(Y~(fj2`zMBQ?xvGU&HGo5N}(yp>#Kx8iI+vt;i-n$u$k#*i^7)>;}bjp=TS>1KuD zYK!6B1*RzjO-&#gxKa%vFdp~1+kE=Xsyu8-F{FSf*ATg$Tg+a~iO2ogc7ClnXivu& zTvJHT`@gE1a7o1bf|h_WvZkVG5>OCY6}TxfJqpN-6GiLgqtj+jL>B_*ZWo{SzHV7v1TCu|n3o2B(1d>R`s{aXla9ic&0z~v_3bK72! z!F0FAaG}R^S3~msEImMjrml+i4YwwSD%4|otH9K4_u6#wAO={mCRK>@&+6pOPXYR# zfH47qmKs!iu<1K&7fbzaBk!2(H<^4Yx2*7+p~`@xqMbJ+?CYe0?G?PXEoR`Qwaq^` zc-;V2%mfUw+Fg5%8-3NA_d4L4!tk5NaKZD%Kn=apvV9ZSye_x!L`!gJOUI^T)y95z z(zzQqvnu;)sa4;E?{SXOh8XZ^rR!{DeA4g~TkGJrioikJZ?YlGAT!(z0{w%cW^^sI z-v^1Kw@B#G6bjh_o|FE&oinDNl7>NfSKlUlW7U|zZoKQQiDW$Wlj4rut zs1x>$fdZ6hC;Rs@=+@j!Lp3R2+Rj}4q_6H^q+Or2Bt3E1`@WQ|TRC_7-Zg?5&WE!$ z7ck@mbF2Iuw_5RQX4JTm5nzzlZ#=`YW;)L`dmJitq`BcJnQ(^{3&cfqVltF=$`LK> zcV+&CM)}=Kp{lz6Ae-wHoW+9kj2uyLO~ zgDHdNPotP4XJPI;dxTepNHVqc!DTcPHUmyPrjs}h9&9BsnF1vYbC=ItvN7X_sD+<_ zi?DC)CZmn@z2MGyWnh6 z4rkV|vu4ZCeM%Oi--uAjik40nPS_%Ng^H-Q%^H>sqrhS{=uu}#Pccr3TRHnSLRsAx z|BQGc#ap6^cHdG%fq^msrnFr!&;+})g*#KeLE-1#+!_Skl-5?4PbPY^k>yQiEPYbK zE{c$AMgH~$`0^G%P%{I$npiQF5o<;R%eiu{DOhnJCLGiDK-M)@<#W1`xyV}$w6Krh4bbrdDvTGcXYK$=DR10b1mtQ z>&UvikBn*Rk-pL8B+?pRDmGv$%DQf!~f+Dl&X@8Osc}+67}K)UM{xTzWnP ze^1@RnwB{|&5LPETWKmj?7tz^cqz(8%)}smB^gC()XBYC0fob!jIjgu1*K(*LhJ#P zw`WoeoO2fNV$|@JfAnddEd^!ahWhou2wiRq03=6xy1PS$KKrKqjD|t4yMI~JgzDZc z-cm9XOA+TcTd3vsG>Y`Rf$J7t`-*USSaY36SNv*DJ@696pSTnxqeZ8VyJ`9{gJ8RH z;RH~S@P2oxDdNulYV;jx?-FFwy=Hl0DZ%DGwDUgd?dcS^y_vpilLNT+JF_pJ|KV!Y zHI59x={3E)p!I!d+ZWg@yaMVuos9ZTpPo|kUto5r?K#wic*5Q#&jaM1_QCvv7Ziqd zPl&tit&ehQN((kZAju|`Ty8BT`dIagp2@*ZxyR8{hI_WQ>nMU7Ah+781giU+MoWq2 zE#*rq=j!I+TNv|w6j{ty{t~xmT(uRR84tP(DgF{mYn^xke!~W@u&hzKheo>TX_+nW z8{tZI4jvImy&Axu>kAI;P2kFVe9O?Jk&CVELwO)P=E z*~-CtnS_&35c}4uDKRMAT^7mT8iB1a9f`BeCycz)hiuwr;>OOwwYop#&>I(QA+y(a zv=Tfx+4}MT3f}^e(&`?HbLQ2yHziKyZ9TKM5!>6)?2AwBDP2IlQC|ZqLaoR9ef#Y$ zRe3VGyY6ClEf+`GzxhNhis@)vJB)zLXYAs z2}w86y0!eMF6~8_e1@+_TyA__@>hOy97N>)y-v@zD zyF#(H-Ls{a1XrEhp|)EjZz8g-O{n)_&S&b@-Nu|7M)nWbZZ*97dRx;GbO9fQd2^Ne zl{FEmDCl0T96e8T?0nc# zS5~i&)-gUNYgDsoIw7n#pZ;u(p7}Y3xqYw}5$FwM^NyCcS}r%!^9*9B@E;fs?SVdt zAze70E>b>3(aD%a;o0TrerrtXc)V~bYkM4#sVPSnN*zV=xbC|iw-+R5uFBaYv>kcG zuM+it4^mw#O_7IqYV@tS{hjJ(6~GdsptnPKLz(Ejq-H0j~Z_Z zK71#W5~V}t>*=H1U_a~j>{1pusG#%M;1ao>WJW+>`=UkKtvpzRA${cXIhiW z<0Pn1yEH(J=XEVrW@t1%9wTQagC3jsDLs_vqd|F{%8Vn6L6_H%yWbekSBAS2FFO~L z3&SqV+HQd+-_T}*8QE@;+C@|9d-G>t@rtYW%(A>`{M|UZQ7n3D1Z+4L+mr$uTtY1Yldko&c}G zQ}!LkekY0SC~Pw>M>XWaJ(Ut8dVh@~FpQHqtQd46dKAv@-ik`@VW*nEM1c{GnFCRG zKaovnrj(EW7;A=2)yd3&R>g*LGV{2|I8{aZ8H-?A(zG#rEvCH#XisK;74Z6<4LwZx(FwauTWD=+j~WQ4HCOc1g(# z+=`nkQI>5@+Zk)zS!y+WV}4F>e8WC^M8v?Mvui~#GYng-CvsFHFV$$yOgr7GB4a{Z zUY3>$ds^Y(MvPSPnkhj93=~)NA4NZKYN00=e$x1eA2%N0K#H)G%2L@e$tnFL)h4>I zs_!$c$c;DPD#(5X!(%grG2vh@?T_LdzJTDlnP}50vmm=F?)~HA&yCBgM`honD(4^j z0b{>D=Q@%}V#!IQrt4Bo1TY~v(&f;mEbBzm&P^t4Xozt&@qtcAZM;Ol(YS5b9Eip-q3Pgut*Ws;GdCy2t* z&Jw-q#wk45@V@mJOQf2>$EHb-pw(O=bI~14623T=jU0;?xU%#XI?oDT!G2gkBu(m2v9nR%y77^M{%r&d9i(;B@qYJ zG>^cW(Ts!ff)c$d!3wjSE^jk5M;l?pL#4rk&|ODMjy&wO9-&D$P%dKSV%;194XvNb zlM7ePA2{lZ8LE5c$zP8rcc`q=lHl!wM_b+55Nr2b{;p3a%H5$5mV%-&GjKF4IyROA zf0h%QR$VpK(M2Ty_WOct+{_4Dx2z=nm1+uyKW$p_4bdOtJ_WzAUHt*~TxNQjW|#ge zm_-#hNr@WKIoTKWh77u^*4Q ztWJ4dTRNKxBN@D=oG(%M+FrsRE~DGMx{T}Vl9B8jZ?itqPBG|^v)A_=c?@kh_4!Vj zG4DB<5DF|tJXIBw!MhBi_0GW~`@B)3NrzqJl20+JKXXiILQ$>@OfB$WupcerR7)rC zRnu&_$?kBc9H*{9$VrZue~}G+L0GuQKHuW%Zbt0>U3{!nCzW~w zI~wQ4xyoD63rhb@iDSmb`LTvrJw)DsHU(uI_Q1)9Z*QmG0MZK1GPAI|urJwdKt|mM z2%#4eBgTp0-kM1_Mw%eVK~oKb?`Ou~PrHWY@p ziG7yhng(HzPBs*2(`1~VGjVU2*lAzupL}N*TFZ&=7#kN=YN*oJ2y!v%GLA8A9lci# z(;e(T-Zu68z;3)5E|b74 zZITv^qD+E>XUc199e!?KYyxBg_xKil^Io8_f{KIEh8`8db7adg(27_3Nvy1G-8h}7 zNUKIxuZRZU<2MT_3q1rBgzlOoC+|=OfY=U|gZG|EN?EW=?|8sCe&E(Lj3GdH)H(^eZ~kJwR$SLw_QA zzDqw>%aXkB3+42*4ObSobrC*Jx!4g-Rdsn9^Ta%g=^{}Q(=TbQriJk8P*Mlc{X$r1 zF|pe?F?cd8K-F`>Lk`YvC?!~(x~pxts*`oCVa|g+@6(S#@mifpY}O88gta2FkX}_DyW?R+>o@pL1%ND z^-fAJaf$7W;_xp0<0Bpq3y#YGckLX>))_9g19ln)&(xIpg4zBa!$%M3PjU|O5VyU~ z=A{d)tu^WRHBq%78y+wj9CJLF(Vr}s=M{^}f*auYF==a!*4hTWPLuU^ND^2)yI?*X zuz&XvM+Za3PD*A%QZFNLmMja)yzmHn+cZyKV7uk8=FM4TGxi5LM{}1tZ4Qg=*! z^f90A4LIBzQKnsp5@MD_)r>sP8D$lvMdgw^G3i>5or`B#Yo|25)tq6k;HA#LW39!O8P(#5`E0_$+-2f) zXlal|HLj1Hv+-%WpMs#8aH%M{Q z9pY^!LE5}U7 zQ-;G4lWE2*uUJ+Vi^26H{J2TdYSU}SbW#_uUQjP*4El2pjx$Wsq_ML_8U-|chtRCH zZCxy=rUy)C3l0`B3!Bnz$DG6iS z=K&wyT@pD@&wXBqAGb&wO?qo7%{0I_7E|Ud`-jX9_8A8~=Fu9hX2Mp}BMw$uyw1ne z*)gNZlHFmz(nf6WwCSWZW;tRxIbtvfc{plQx=lL0lnTClm~UG_RZwILro$nV;h0fY zFwHHc^>7@EU+xhu=NwE!7NsTN&sB5Vr|-WapAA_~M~o(O29qW8yjpdRaXfGXqNG8i z)1}#L(2fj|Q^4Yw#dOTkEZ|_#pw~;--b{!B7uVL9I)`$~cs%3D{(_u(Fo= zVoIvzgnT|>IGQk;ESN1<`&^9&CL~NhTW>-qa!CU)l$80B`FO-^IAS?Siti;{V6 zsqk@}njn8dHCxc1$K;j$gU@aC8CBVwVm4$toiG?J7|-+7HIHM!jqt;SW~W1|)1n!A zq`rgaI;-o1BBNXknT{v)M{_2#oV*lZYfQbQv!7*^NGC*XGF2SI_C z$SclT%kgr^oyh~P^*`lW{}Te^bGdbnH@mNqcoC_ee9Qh6&qr0m;c``h^(3>53&`uQ zxcPo6Fb>u@ZclvUBAYSd;46w-)EfH%&md2P0-w6 z>(U!s{;Qwy)K^1)+9ySMq|<6B(2^?>__Q;wz;MIC@iXzyHM`_kLI z{>BB~y1Ipz1Q@rb9v?8d@mubH_yPa?u;Sl_F_*4(SzixX939htbiin`WL$!2ohI3O zn>XHmlV5lt7rRS3{SO#Ee3##SdY}Jwd&=RcAS)}DcdoNM-lKZ-zjJWqXZ-xsfS)y< zFdFaiq^;_QB1L zsE>co!|S*BAHUt_#&H|>(tqL9<$(YCPK$OD;Q1M4cFf@64enn1fRFF)^XcOOgGE7R z9qOP(&^X1;r8l_n=8t&u^#tDyp9WrYvdJzFuKkwx|M^31_9Gq^+x+-%|BC;-)8l;P z;o2NK+GX;|KXL1$Px$|CEjehMr}LA)<-$*T{6{xn-Ob33?{IwoV?MlohYvnI;88zg zTtaDGz$fgSrM3MAXD(miSHJSOnueIC^VdB)@3Pn`qsot1Om;aq+~d)4MWa8(ndZ#% zkYYZhy!(6hKKK`YdvC!%?E3W1bvS>iO*xs+zrV+zKV@7(74?WWUgyk}H~GcSuJHOP z4|D4pqxb)w8#i}(?|9CWdCkHOS>C(OV!X$bCx64EGGXL52uw*(_8C3A#>3mUc<;t8 zAK&XUSk}z&@R~cM>z8=_?YDXB?T{;{0wUk}BJj#57U%sw=h@}K5!@a%=)U<=-u}CUi@lPwNB_d!%^Upp_Xpe@7^2M$8od_P!#&D= z#yq-Au=P4W`NeDehxRs|zZ;oBOB~79}gK<5jJSkPGWYFilpwd9Ng!@hu8V!<2!u%c*yQH z2XxUIL2H{cSKi|FpN3pH7Z7atxMiR6;5`oST;qTK_K4r!UedX8g=ViqG(N^Xc+7A- z=U`D|8m9<5XSn?4kNN4(Bd!Dub`mhl5&6+Io_zWN|M+kF{O;C*N{L|=V!ci$>Ytf0Jl75e*1rM@1yJd`tFpE=52yE{xjP@>G6MFGHfOu zo++sEF~j>Ga_67^Cm%nUaBbYezw|ag`M=w|o-{B{iC>JE?A_qu%^Tdfwacduj(IZ8 z>1Q>BZQP{8#`!n6@cNH=^R;ul9eQ|PiDSo%4{r0xKmQXSzI%%YW}AW6VmltvbjJ+m zH3zeZ_8ULr?0@L;%L@^HIwi~IJQ)=nWG;2DL)?jooj&E>F4z9yI=}gs`()c!u-z@z z8PZ+uGo4Pk*UwluDI`7C&%eeSKmQxvcr7LLe41tni(MvrpYZ-af5dOzd&DSQqv~$b z$c{;;2h56)u~}#P>RbFOY;!e8@RL>5_p5iM^CfJJEoM~nL#DG4`v3j#gD5dL z2M-xfhRkNC*`Jm)!+@6Sd{MPujj3n%*uU53qgykMhf5l*n%Y!M78T3fVk>Zi2I-kA zbau{h^>mBSHMk6^XOB31ION^;M$Geqv{6$#s|OQVZlQMYqBhC)1=i1==e5&@MigPZ z6esG@Zlvs_mTZ}E?_fedX|wG4R88-XdcoSNpqTVo3=fztVeWNUOIn<5d-VJaZ#krY z@Q}MVAMkMRnEgr4XjxO^u1cv&mUV;=Tq|I+Z^p3@!{bb2b|*4g>}}}W8B3t`SBxm@9%Q^!4XgTbNb7goDkfY zxUMIG|jXEUb@{v}vrLW$W}=-g@0|I$U+Eaq}6oM|aqN|6T4i z-{N5H4bGkRXolvG0(tMOeEE$ zQHkkSLnfX|lnJVQAMrvU%A9mNWiz_s`hLdShb;>)eP57ZA?Y^t-ah+Jo^g0!a4KnN zN=&)FWiz_v)y(0`#gLCb>{DqNXu5)ofFg!u#%40%>z^<1B!~V-&)9eBD29Y7<>bMF zVDZ5H)dTNt7fiP?+f0xN6G^XA?;kKcJLhm;;c3^Tq5&cyPqw&|YepBBTuxnXmmzTp zg{-649qjfYdj|uaD;h&pLy;upJf;X{go`oPZ|}GsEtz>4+f<^I4Nz?QPoC31J>#ff zL(^om_g#-tnL+LoZztTpeaH3nn5mbr2y${%6r)O|HKaK>=KQ3OR?%o^yLl&z9FYek z+cC3?OYZKcTrWdrfgqJE6y2m<@u-?1GhZhu_;-R=NuIa_iyNl*cf7e-ay#`13P{k9 zj0SdN!2an8Cr>O6nhLrq?GBBEphyFf%`LOLdtP5oxtsdNUa3Jwf zUB*!bd6W{YB3_Sm{>N7E2{L;PnHs-;Z6gXn7N)Fb3BR~V`Q<3*E)hXFiwiEnlzS@@n&rYyTcwwXigU zex*xVen%VAe=(VWMN)DTmMl~8zRL1HA_0ah(bOe|ro<-=g^%kppVk$QDl)l%c?{dU zWSxlLR18`w)YaYR+Qu%6D-%1pX_e+o6G4;dn2~@Z@GT+f?vIO&Fcr3SCfcB21%93g^QSN@FIwXjZ&0UwIP~j;^32Q9(Z{7 znwvMT`RdILFRrIdgABirC<+Owl-PzDn;6nECzk}7kT~mTm}y9|8RLyEc(}@VHM0oC zknX02mWJd(gzs(fJ(px1kgcIKOq@X%fy~o5MdTS-o|9w+S-JC6mhy}uj7g)EB+JRN zlp+twl7KJ@@rfwnoH#8hMTsa?|@-~U8eLilvRQx zX&@Pp=Ty`dQdPzvBv?N%yL`**U%ueQ*EhVpSrSSWGNX!-D#WpmC}w!ch?|AXjo)Oi zc1X`pXgN7ue~P!b;_iCEt1;+{HPv~ zW#;8bx(WjFD22s}xu3HLQmRdZ{&5E@%*oS~G|x#gL0*&y0cDYq#0hbnk`x75Sx}@g zBJxSXlt=`GNlKIzWJOAuXSlO_7Pqgtc=MKDfBlws4{OG5L6{amfut_MFPLs3N=c?f zqd4xMbrtGzf?jy|>k0Rl@3?Sv{6>J;s6s8oks`csOXS6D{3XeFLnt?i)gD#R<*?aD zHzd?@m)Ydc7x>$lL~0^iE$Ws=gOGf7!|eLkT)uqC%h&f@Epuk6iX<{LDI+aivYCsY z>7>ShQfPE54NN7bj21-e8%DPiUcCybCxUvfOHIT$QAq5&#J*4HCggLU6a&R+QDR^m zRZvLCqb0%WhSBWdpwp z?-;o&B3ICSy2qhkpvVfgmLqv9!to`GtJl1Hd&_0Kk7}nlc}|?UWNysl@`C$UFZlY^ zn3p#z=1EQ@sk`s=VnK>ZB3Z~H!&Lf|W{#SNB;k_fXvWQaMX5YvQ(N&xOCjl3P>O^+ zS+kixaC33V+v|u|(LVKR#IvlR%tLZ_hP%Gv^6g7reEphNS4*xYK3-OkNHV$Df-=L4 za-s~%GDlWVIqj+JSuts}Wi`6z;q4nP*)UViQlP!$v4EM&c2A3#Q{4UM5rN*yxsyBI}rD33-kX5z%(eQqicH z;5ZshQ=_8TI8~cYTW7H-D7I^YwL_3tB&CibfgMg9AO-Lmhq=$(7o=!t`mV!7DnpX(oCOLsTf(q9^wa{qD99s^XfYv|O~g7vwJLN~ z<^9QdPLW3V%LS|HoVjPPGI9oKghVGFucZat$}i z$qE@Yi^*e`AShV55q^}Dm*V$SF2B#?%n~Ah%62+pG+A&x5837trKX`GqsTc$ma zY*slB(;9>Ql_30J*-&xIUzczU`P^(h%%V7nVDRxCTh{bRBR-pM9m{2cfs^- z%>Bbo;PXlip-33AjG9G=G$e^5@?4;l?8fuRBab}t$PXS#+I3*5%B~1iSHHagm#_>8 zQ-zauLe`QP%?h@gjM+wIohSr3NO_LGO?fj;xfrKhZUxIiqENtAL83&8GG+^f7rG3o zg4KXTl29}WLzB?t-TTARC2Ui|R3#KqlK2_RP0H0Y<(G?+akvw?EEO~Xc_GNdjO|kJ zW~@<43JnGNbwQDrWJ%5@P#C)A-FjWi_Np!F--_GfueLmfWWhND>uO zqCilHg3!yj9m_b1#9>{=G@vPi@<*L{9{FFzqu}+(BR^1zvSb;rxeG@8a`}ocS1*`G z3!*&6R;~Z)%llpwLa;~$w_(XT736;)=tq{Ip~EM2nNR96pEp#_9GQkDkqUU@6@0ZR zcfwRylDyyjIvO-InWt5mpSDzbro>TYXBRSxb~cLR7&e*sgQ1sxzWaBRs3gk&Cv3}v zpoo0J+ZRk<{J(tt>nnbCy<(bJ__ZFxrcBoMLI%p#TnY$6rw$1Mis&IV1q?=;JE z!F*Ye$T}x`T@Lq7ki7@O``1i1K9}3KjP4u!^5%?8>CmfpX*P~HYEPK7m&l@E9BC-^ zA?DzmgOejZIIMEqbg@hMd$etf-(1IPlam`7$HNX!e*BDbeUE?ln(@--&b?%Muk+RQ zF;cBVw<1yhi^8EwAR`O61eafN`-^|(tBWcB`fkapbV!^5j-}AB3d%TPHC+?lz2@58 z61y8@T>kwoJEC;!l0t=^e}0GGF-xd6Lj*T4=gNs~oW&zvIO(-te=3zTsk-u+5uPhkJCI zH5@I)$QStAH7{2&E8iskj(Jj~1>rnI#5&zpo%*ni+Bl*_z`yhW03ZNKL_t*6?a-?x zRQw4K*RT1vFK>AD5HT%UXuT@O0|!S<2*)>U*ISm`3)W@C#bFm`{wdG&KK=b?w3mXx zU4rZ8%nB9VZqhk<%JKO#&U;M`D+!JY{_lHyWr#>OjNiTF#XtWWuP*O-IrUlQDpF%e zQiCU}q~fZ(cC^)t9_^yWnzB z650b=M|-qXpQf~8IrDj#Z}1-OcrCv}5h~|<2KA=+oeFPJlmyc)a&A%CYh#`s&?`4s z{yq28h@1I@>1BmnZjq)cZRe0mQ=stM4T&fO;oL=JE9`?V*4Y6);d1JYn9n`ljP6<9 zNPMZZNit~tU5%4Un|90QxGfmPYl39V+*|WzonfmA13O2ox473Mw)YFds6ejTbWS=9 zj%@1YhIDbwi(kIr7eBk;##2~ibqYI{%Oi`o~Sa-%44O5%Xn;U2%vjCRhR?QnD;&yNcOt3WRA=X;rA0DMoO|yW0oezMivH zs^py``pp{qEd#6c5y6tp%I9`^%R1b0X=y~d#(AxzD#Ux6znr+RS*IzuKusI zek;i$kLcl=>D>c23z<97XYZuTd9Q|}W+;ircJjdZa!wg#i0>1mAqvQS%5cVfw&7}= z;%jXhPYyY*J9I6Oq6Nu%#9|w9x0-UFC=6>VHBG|&1I20I@BiJ91rr|b$6Q^lScod3 zf68I2%CM!OrCajVi1|9^etOF!jA_&~WK`_M70K*@+eyy5uts;U$4A2!j+$eNfM7mn ze!n2tl-%65Nz;P*K}s4TX3r2SyWtfBY+q+sO9=UtTdNStC)r$L#hTc17|7n|^ z4YnLpxHG)gyb}JfULmp^#!HQ@r_zw--}JzC>IRpdK^2j6qKS4r~WvY%szb(@UO2jlL40GH}!%a)%AYnF*`PZA27fXTE zG&nxeIIv}g24r4}w@P>uq`Y}2$RnM)p>t5z_~Mxciom-Ph)Cu~&ntZX+~T9Q!k#8z zk}|oDd9@P!BG>8lRsQKfrBjj6%93cCF&#zRxdrd08Bs2CSTT53ll}zZLqL%eMFm&) z881f#GtEFh()m$c=EJ&#l4hhq!t1%FLCLLOl89Zau%kjlmsn>CT|=U(D(qKebV=sK zmU&~zxOst_ed}=TTL>U3ciz8?L=cza&p&Vw@3Xr^K@kUpqiYs#UvhOh=gm|mskSkC zM|Acz&gvQA_yOJbm@Zc=J%wqn$7~zWk7OG01L5Q~7jG~4)vGDXqKc?>*=zI}9yO@s z3zTKR!*s)HvSt#itkM>;)#j5YHZ}DdALE^{Q3`<@$&`_eQaxb*iG%Gaq~ZZNpK-ky zv06M(j9sioo1oriSlwsPta8{;SqB-jR7I_KsO&#yus`H%U^6sTO#l1L!1DcdK#)Kl zSe)2<>_0!mSZM_12;w!P?S%Qlp_pu_?#29Q&qU~d?gM#0evyzxYc_Z9xcmB7T--&x zo+-%fE{*<>X3OB9np18TNb4oDUnQc?TWLGg>q$x$dtN8w%PDx4G9U3P& zqAY)tBKO@CKuVNA;hS{!dvs5F^rbqfG{MR@%-jWwRfU`B8o6I`-fdwHPG~mfbd3>f zCC4vyDzz5flc($*>~q+t&|!wOyk>Fpn%9?OUXCQn=72{3knXX@lU~9yzhpd_@Guc9 zeFdXlX~EME;I> zQf7iQEkH=KZeh6RsEBjXh?bL00e4?)N!5tJBqd*5#a0JYzlc z@#2)xe1W}zzSiWZcTA(bqFZ@jriQFa6}?iYy?@N!(J_ZDhoPLIsS?4z|3wicm0vu(CY*GCr{ZoR`i9+O_s2kx@?zoR@n%r-eMK(6V?Ux?+jaz zdMSxsBIzw^2ge*5YX(7yoZqmTEV-TDDa}mEXB{g4D~9M22+Uq&+4tA$c&7RWZsTE6# z#U=C0uX%m3Rl?88V$2RuIrRlxK1M6g@-q&66Y4S z;CI9nQ5J|Yqm(TQyNlU5n)pQhiIEoH#GJP6~nIJv~4QO6fNCQ237nxA;?SW zs*I2dASTZOTrXj>EhsuBX1jr1)v-jxcC%!5J7OBzc+F!P?GyITdz|+aY%#|l-SEzx z<6b|oTI{j1_vzXeou-XdF=LL}sAjO(R%fq<%x-7v6p7de`+ zo-Md}2q^kJ8m&XRr$atGw5b}hHaM&*=#j^EI%Y9lGtCak8$F&Kobc?lK}&W~HY%@!C94~OQWi+!w+nsupHTY9Bab}t$X@}wZZAm^R4NjUx=ht75lKN37X zX@TaZELRCHXMz{L%+mv%gQE&R>B@X;3!)iJwUpUK!7rx*Sygy)Br&Y3oDNmOsO0Op zV2i@@1B0JDGdPk3PM9-~3cPjBJXDw}8iyT&AAex-v<8);z@J52BneWG@$+@gyil1P zl*B^D{Jz5!yTYX;Eto8G?iMBMp^DZvIX=+%$DxE3#Uz^&HO&}LbJ8GZn+UcAq>rz? z9~6&**CUVoz(|UeZR#-&X8dybl3y%-%|kGu6eTs?p>K8guw+)W1=qnhU1Gb> zm+UgQw%INt?EhXABqeC?tq~6@5+BrL7D>r0-q{-ddjt>{u*r5^VCjG8Y5o%tIim1M z;x*IRhPzSB#x>FPHpA2Boc;7CeAL%Dwlea?i1pziv(2vhxp8{VS{jS%Inw%;Rafxp-I%X$Q>L-Y z=x&8`vF6#coEm@mVbnK5R3S==I3(XL@s<@vQ{pJTapK6p=hvvySdwUi3 zt0g3BR-;>%*H_$m8PoW^=p`v&v`{NOTKa+`H6{KdIeEWD!5;KzfAT5EPxm-Kci0yy zxjaB&!-czHI`Vef9xopMJzgdnQk7A)cW!RXl!vo$)GMvI!PktV1ehj-B1&tw$`AfQ{uq{}bBB zAM){Mdwg_Mp`jj8YGpai|ejdN@^!{klMpr`T(Ubggr@DIhCJ^PDirDXTTi#*kL)hGA7_Y-rqO zHm2RCdvwI%$vy`yn@+SQQU2orsi33?mZbhmR;w}hQNdjAArC%aaD2-D@!1haU6Yy| zQf@{ZSpUM!#U-!34a@kJ$@+}9^Al_}r)$5TostA<1KHf8+I`N4&(HbvqaH^_9ciu; zUCjt;6H+%}Tm+Qvy9zelMape_rZr5akZ zO}*cwl3D0(jz8Jr=o^eMBS`~95|C#(dGWi4n3AHQ_0DOYe!|)FQ-1Wk&weGOmUST2 zU=g5?eD0$Ml69B4cg{2v^zv`bYgs{WpHM&ijMHc5{P_8RVI{%}nmCe9e81#!5pfxh zNH#qd{xM5oV|Tl>P6yQIW3*{bF!yv1(rdD!U0QU}`^aMG-BY-Q-) zgw5!hP2l3`I@-}0o%7H6;KM#YK2oVfcNBXC&MzO>eB~2)9<#eG56v$ASqt0g)2lfg zSu=uU!D@Ah=j}5OyGZE<79(75!Aj)Bj)isDrLkAX(GrBWVsgFW@{PxKtz%SL44-_+ z@rTcOwpZhMS0i6`31)p>y}ahDmviPDm*C=>r3I_~I&v=FZ^C52#4x)YJbljj#~*Xn zt8-R2X!kob9UaH_5Zf7nyTvOjlzNM{ZPB+CZ0*}C4jIgS^!7Q2XCL$9&!2JH)j70| z=qn1gJEyMA__w)BkSw`h2eg+N4XvR1&4NJEK<^?~_Srvu&X0cboWpLHrf$<~)#%zf z7SdmdAHIMxqRd_VFl4-iZDCO9Rd(HRmX5BO*xe?z#KpRck=-ew@32m47^a5)?VXuy zASn*E-C!`Rb2z9_t*EFfNM#kxYS5{hgiUxzQ`~uor=;X{K`H47xk}Y(&>Pk{7}z+L ziXwxg)zK}VcHP9SZFxvimZJb)SIKJzrddU;^>Lhnwl*OyTvmR<)GuhN5{{fB;ti?4 zU=90fWJm$X$?>5%bxC zpxYy^s~FV^%|V6vR*)?=gieLv&>+qQYP=y0x2$3rx3n=T6`Hn8MTCTl34W>(n)|ex zLymh5j#?JBrlKnjqBf-7Njd2*kVu$#A)Ap;U_xf>vIvDFqdN}Oy)KRJF3+rL>Zp=} zD$B^Ro+nQwFEfsWMZ&>J4nZZ)Wx29_!zmj$g(izla5vYEkVD}QHh*>A=#$sj8zQi)W? zMBawQWQw6l)P+q$wo#g=4AcRC+trCBhr@oIx}G7+JI9AV6;T#s(F!&*f?Oss+BmHa z$Ac<^hDk+L(TzGv=Y)RV;$t--mUOyp1w)Dmyo9wcQB;~VTgS9}4cZkITa(e1I_8wjuaX_p{ zf7Ua7{nz=O`z5^ z#Gb{`BH?gZK$dgo3!2W(vE)CVx9nabiYim>m>fNo_`6i0De0VcR0byKLQv)YBr`v&YA+r~IV*IX$zD zDl7lhm;47Q*<}$e6G2dj-|4dYrnBqRR`{pB$|p6IgNj63-&M~}-!T+<&`D#;e z6O^R??jS!cAuHk9FNunr5_n?E3`~UsOXh=`!V9-Vk^b;0LQ=vy6KpbmlMVJ~^NrvY zZ5U6t+&;u?wiUESpQFz`5U6U2NrLy z*=$N8y+iB#1CIXT|Ky9OeSSJnscQjzyEoVEJ?Bn56ca&<}wrd(Efz84?pK0|M5Ae{VFw)p{!mL50})fkX10{e!S)3 zZiP}?v+il^SHExP)E~Jd*mY~g0oitmyR;cMs;EW}=kSPwy)K`3S~RLvYKDfTfuY1C z>kaFxUvv3C{xAM@67u!7+=;ldj$Av#?0(LZVTyJd6ASTsI~so%84%e00fR3-<>+j{ zlRcRuQBs(F(qu;cDnfDvS&)$i1?V-*j!L^#XW(eu86}#qaq3-$XQvz-Hrcacn0ZX^ z7tF6Vgua8;>Tvk%IiLUCPx-W0;hEu+RD5R2729;g)#YL*cr7CwHRLe8!`+Nn#Xf7Z zhJE-6?T`O~PrhjK_vbndD+=s^v~Ua+ zMUqjZ5}AEW=aj+mhNx6Y6bGry(9<>XJVl>5Y^Mg6o*-ofX`T}#85j;$dyi(j#lX>N zn+jTJVcIQvhesTo9I)q<)E6-SJ%ShId%??l#d0;`Hp-YAN7#oyVegYq_=msm^WnaQ zA?KvCYtFI}m+>vh&4y(tFFb*lX)wq2#_m^h#OJ+1Q}G4yhxRG=E5 z89B*X!5hcSoH12R!4w&O;#0=qPVg!P<+r|DB~{1hpV4^oH=I7-=ZoichP51x6ZG{4 z^ZJ5xF<~7&5O2H8-H=&iVrRb<&Ll-a>zz=2`cqDy@A1X+I)iF~l)+@h^Uc;1ptk(1Oc>W97G3SRRyBpb* zH~;igJ|5P1ZVTkFOSJ4@YclHxpY6TJ^6d@Fio)_U1zqJ^f>#CI=yG)aoX`LMZ~1W0 z;LL)GrXVQ_*~Z8BrUaf#P*@aJlXk^oP*HItiS##i{xVqm=mfTIvw!j_fBTaUc`~d~lO#-8Mp0y> z@9e1gAt9iQDKZZ~3>a-;Yv@?53aze%Woc-Nj@)aK6du-nfaFdH{VuB{$IhXl?7F;m zBi4~+2ivT3IIM9vw5jRu?Hv^d)ojvfm=sNzCMoVb!Zk8dTOiOts#2}K|7_pJF?M0% zXf^aoxD(52Fpd*eqX4&Mk(D}_HMHsg+xBRymn?FZl^;{_a!wotl$O#-XUBB+TXY?RnkgZbDr!C8aKAyB1-yOj5p3r~p-ouS|Ge&|KMChSY_BWsYnv4T0AAiqjU!pvY>mnWxdGQ{L$RIM;>|Pkw^ZjkakOVMVF8j zcAK;JdGBFdlFkd}OF^vZ7NJU9PP zr?-TnDb%_KMoptBBtDi@Y#q9$Kq^X-T#y$66oIVn1h4#Cf|v0pjd543q069^C9#*W zkYw&OcxOq-9Tlrt;j9YJPe7GHQ6!Z2g*%ULy&n#bg4ZLD{3lX~k~oh^%M49cu~Z91 z`u$tR^nGX7oj>8t_KL4ouelAzq-91`cW4_8p45){y!|0hY9}=G8ix88`Mv$8BLpM` z`1wv)l7NZ=jw-QlNj#}4eA-ZW>d15&IcBv003ZNKL_t(d2}{{+$ua?c0h45x|Fuka zmFd4v`1qzvYY~^s<6Re6R0vE3I);R$e0RlYAs{IPN%4nszP>-Z%&ekJh|&T-fkfy? zx`Sy}s5=%;#l%u{WT}IuD#(o-Ei;fzld25kw(rmXO3TL8uABkYM{9TDH%DYtx?nc<0ZyP(1E+;ZaW;_3N4q4GYSu zWaFTYGi3LGv@>LwWen1U#A#%wOSrR3zu%`D&4{d_-jtNfoVrSBI`FznzjypCe>78t zf^b9-9rN(fB_DsVz`a~@zFtw!&bWS6Qm?OBP6k|@j_CJB40m?ffB2Zay&eyGhQQR+ z>m{4p8)mn88~N^O6#&(UNUqt*JXP&}WWNbO5d`hLNRyZ-^n^}t=S7H?ZLJdX_PkIU zDO*yyt=-nn6&(a zP~y!zS2{pzjh5zhU;Poh$JQ0)deIhmcOH?%#G@hE-iS^&BTGX9=?S`B`nws!!zg;H|-)wN@lp7;J9mLLKx`z+h z>BJ1=2A5aV^Ab}Vtf|PazUB7wgx}2v*nuPSw~)W&vu6_~c||D=tSZW-!I&;O=`lL& zb9kK4D=XY`%y>2B#qAO^pV56aVR>8An2a&2_64x}8 zWkZoSn9RPJvqC|TB9b9d(xn$Cbd!h#_+tylGw>NxvJ0Au8yQr74G_fTnU>%r`71Yx1F_QZ@B-&gOPb z?o&)>K-B3oh$X2qxFV;qIfdC!POs2ke@;2O;6_8H0KGs$zuNuKR!i*Sg5{*(dY03e zf~GMvUO>_z8SRmdH>73?Mart!XS%ZVCR6g+oWjPKctnyW^aD@gEN0W-yhem6I*N${ zO{BMVMuh}IR2Y(^0ZF1zmBH0D&MF)pM{8RtrHSK!P`9eC?;7iOVrxHzK}ej2q;Y^& z0won*fY1R!5)mc=T3V((a){c2IA-^hlvA$y5Nl25JZl_Du9nk4^ z*y+b4c>K@x5P6TS8Ymj9)p(T>=$J%nB6auN3LzrGSkWDXI9H*j2jn3ImPTWMM!O zwR2Uy9y?7Cg~Vw@6u0(?#33+PYbYy2xh|+Vr*MX^^(o~|yzM935|zb*>(g7V%Zj=- z=&Gi)HT9;(Rvs@SR1gvAfavaB!7B(Nf+!}50y?3hRD$~ZNADi@xW_&2@oxzpoCQ;R ztObNXD1j0lVLespS#K;k7313TU%sxXXOd1JkRF7$IBWRwq~`05WfOSf+Ougq=8p{j zx3%RSoWoRx+0t>kF4-85aqY)jPhB)rGsnvu*2cfKoBU6)n}k3JK^SQsJ`7n|n6Dr* zhHC8iP1*43LNVwlx`|?xDfWAshXX}F62w1Kt$vR`5AF%Cd)(vC2NI=Qoot`0eBiX7vIg1f!_W`@LiSX7o$`VfSx%m>rVnpA@I?7l8vR z*Ah_8mhkFmc#sJGeyI50_B6lfD0V|ZqFeH$vTfSJW#hT69G68~|LgnGz~2#Mvj%3h zXJa6gf?*(tr9cY7yG}Xhp|P#H>rYQL-#QDOGnB?sSVwNUXMz*+QV=4njxt zohNk)uN;9ktT5QhuqnWp5E&(OlY~(gl0~Y00VeiZGNdsU!qQ**eCR-a705+|ei!AzGp=3M9dHypaMSbxRxw@q@+S-;LG_bVoU9 z0N)Z`)~~5-MI|IQN|D_mQJj%TN!UIaQbz>YkSHCH=!nD_OjTm@oW_JSKT0(PZ#+_O z=Wx>y?h&Md_ujw8t)>`Lv3Wk@cCp|pzhZn5@-her!;~QG(La2Tr~l#q;G@6#1^O`@ zmL^SQ?j8EG~HY&uea;{f0zKB4P3M3RA3DZW=bE1dXNGG!$`-X=+?)@YW%e zAnIu1et?SP)@i8`#3VYP9|_VBRQc|w5?T^=L*h|F5bM?{boacj#+e1{+gDtD@f*JU z=A2JX=3FfcDrYdR0<)&7O6Ix6>VLTqd=HqWq1xosb&dBP6={-TMlwhV0);{#kw~qH zhY9IHO7OA*ps5XIUeQz%_x9!`CFlqdgalHe@2-JF3qi15?{6N&L6hV131z-yQb_V% zLU)ug7$roJ!kG>A;*8npmpuFAIiEkfA#<l+GV$&G7YX6~+mKt-Cc8xur#W2$f>K;2y<6i8)pzNRvk z!Ux#wfN1!H{lPOHMpw+|V{SHIvpCmWT^P=0*PK-iv#5i9_>}I$V}@Bo*IQP3gIP4V z%3kPuP7qVRhT!&4_L?;OTzG@oqp888)_|XdIhb(S#XM)Fp``dVwTXZI2)$M3@o9 zG2KYgQ4Pv&a3-gAhKjq|juK>yP+cM&k_3|QwF0rNw^*$pwG z)y;Eeub%P6#fGnLYnJto-TQtzg25xY$A8Pihnl~CoY0HkaZ-7YEtb@?D>jR3X2p{0 z)e^Z`avMdQ1cDF)roc2c%Z;T8dYHu;f19(LCD`FxuVzvql}2k#pakLXy+pVBq0$l+ zNQ8Gd+u&?l2iD6iS*A7N*7j)ov~?cH?QgXft@EL7u%=bYdl4X1N+(Tuup6Z(T1rPDbE=ioy4?sFcv9UNW z5n+hZ8ZFi zDu*i^)&(H{ct2`qe@hTc8Gx4vDztDICupn!sffaM+}~)0Hs5el;~~mwhh+^84?AWER?LZ zz}!3j?s1QM+~a>3cn{Wing)ykuLM#;C_K`88so`xOJ0>+yNdrf7W|h`pyi!4)&{Sw zH;%$UoO#y3wf{?^Eby4|Em_fr(55N{;td}_xOw8 zp76TIJ^qE^eJkm$T*bQCaK5=_Tu$kR8MSTbhaFO#5~`q;+B!q!8Yb0@SL<`WU7m2V zKI68WAQ1G!E|0Q9KIlE+z3wqb={|#~_m^(wk3%aWz&MDcpck|?yFTbB{;IF|RZr0m zBpd6=YfoqE&@o%pRc>1H>WAtYc-qv4mGxAvRldZct>^Va5f*Kd54Oe7|MZk~-+c$a zofmINHE#{RX(J~$O=~O!iJ&wN`?GWS{b6Ib#YMLc9IXWM?xk3?dUPS))Mfbd9?Y#% zyM2yr#PsVNL2XpE_)&FQwtdN41f_R)YjD=$ocoi4pWi$!-`OwX&eP-_ek6Z(1g|^P-Ia_FGT2Y@K$m|NM0P zzCX_4oxz(L<0VphJRv?BkwuV2J<|Oh`kjbjSPx|=I)4*0kB;yVEl-%WnBx!G;I z27%ZA-CpDEeW2a)G$QB`raSEPyBrM_C#w~HcFz1-adKk0yj?P{1TM=M9`3TUKO~Do z!dkSco?h?GZFr-14aRtC1Xc*}2|=1LN+hDYPq^3Rs2`Gr0b!I9j)wFPMszz%LYK3d zPq{k3CcK_fUzN-^0alMlvmyJvn7xi9Qi9D7PKNC>z5RLDCI%oiLIxVSIp2<{xEqnTRe9TE=ewRqX$T^e3fi<- z-|+TN=dmhGsFRC=ym-&3axi zpEuYfCLRn(Iw^6W2%5L<+g|Lo$?^7}3aJr6hcJ*R*(d2`^wN+pP-q>alL1LGW2g%% zyJoY_89R%ZY^c^XHi*%^9#I;Ts0t^b_=6F7ColB9gwZxgYU{xHxihhiE841r{%DWB zTMY2sjM_U6H3!~q_s&0yz4|@R`Qv^7NQ4ZCv)10pYsaSXc(Ddike9Zt7rBu5po7pM zJEM?DL-O|ix^19;Yc6?wtK4_)t$qKe^n=G+i>)fGEAY}#8;^Bw&Y(ZsFFn2up0YK@ zdP*axD~qxIH9>lJwziwx+kGL>aR(hs;&edRj~QixV5@rafEVrhkbk&7ghc2BsS`5M zCsAF7y_h7Bxa!9Y$@f_HzWsY~_uS(5ZcLk&Ga~AQgc+p$9=$XmbK>{Zj{dFx|Na=f zO)=l&9{0G%J^sf)yVHB`scO%9<0)$JS`vhcSP5c(mzJBYi|6iq{6k36RFWw{cc3{) z1v>#me^meK%}H;JW3x6~jvG#=mXoz(TtVqT>ek*0v>-yzIR7TN^JfMLDJ4iU&2Zp& zw1%PqFD)gOS?yUhj?Kz&>1(n`anH^GSa_>1G7@Vdu6{tRfG;kKA?wz=ka zvu}90I;9(Q_(ktA?{$y)u>T&R3UGLGQ*cvE_;z)|e;a?s7xQOa=VJtdop`{5?2x}3 zeazqOe9VLFfJDcC>E``pC;^AD;De6julsEp!}l`DPAIvl9KV@aJn(T(@i39C>&X9f z#nqkfpw+JYylebvkbpu;O{^7(asV{%`!sU3$Hxq@)hU^?4 z5pN_$%T~3eYn(4w6$R5x&8D)rOwziO3#z(gxi)MHfiWS0jOd1%Zs>@V*gAg%ZTg}% z<=S5+ukhV#Z>uhoik}uW{TdeUE!I1X<+YOnYjIVDZECE0E6QIeWRRig5UGgBJ5-fp zRx_${L*X1XpzvTS%w|TF&nZkrAp?kF!Zan&nozW=1LZx^S#UMpH8{JiPb6ALlqn3R zXmIXr)^f}IdVxrGhz~yGI5W2UzkoSsj4aj{{M<aW#d)XC?eY6O z8tu~m_GEGxd3B))SzD)oEv2)wPS=jW_X|Cy8LU+~|aUvQFlsgM7b z6o}91|K}-Z$EyI!yfGFlB+L1=lO5G;N>k`e&79^qh6Kd(+$PC zqB0Q{hoq!2HS5BVmmcdBfe6U7BGZykiq}M%_nvB1vKg-_R$XcvVeklF(U^*LZpaId zH3}s{(m>M*LP8~oq5=DdeI7qa_-4IA&MvsQ5`6Ys&t&Q-%82Zs$I~Z09zV)RlK^Qo zDh|+TfYc2#2uUA*#O{+{@{9c*@AoB%FkmXm##1y3l^v3f-s9-uE)PaANt_@?M|2OL zGUzNA+7;#1CD$UtO)pv8SSHPox_eAKIAkwN*a;g|U?05etW{YN}aIy(n$l!$g#axOp+x8( zqw*EVFeTc5pJ6=Wh^Ryo@+@=;2 z!XrF5Q_?J6v0N^ARjiqhuUYOyESdpIK4!tr@>+K~WJd>d@&(z&l4dPf zuS0GWOs0nEQloYfc8)sq1`(n12&?cqM5u%?=(Bq?;_$;K?C$pID@gB#-dnJyRfqZv zf-XmocGyp1QmxR@fKHt7uw%IuIm_9Ca%nL)71SVh6SBh&X*VKJ1{X?#Od;0}SJzl$ zXq?AzH}BhaJ7Y0sLVV4i~pK#nXRCP_Uo>MHROs5;J=GV+m8_xGLe2{V2wD*wP zv_-E2#kPIoox|B0XKIXdSkZQokf0)oN+e1)&=go(zgFhA&uziiRGSsc>oG-LVf!KT z0<3dwKX^-J9L73chgtYG<*&)7EON)yDyCWGWOzi{L1le1-RD;y?egfTLl&wxiYLI< z6|OP3u!m1~7{ob)IHq34XxX9HAF}u1Qw|;<(wETPru@V^a0YKJwU4M=#_q1*U}#uY z1&MO3YKt+Jx^~#Djrn>QjKMSw)~ap%)%N{Fw1Gci)+g#eW~X<^#}8tTN8wvjzTI1z z#$YOo-Z>;34C!pM;V!Gfw!fUAwvNI&YWHL2%{}gMk9*wX-v%7k zS(axJ$WleOpYR|J`Jav?e>;+Nw0M2ac<-qzM`H!Lqsc}A z4^qd-!_6PrU2TwIRW-c2s`>4Uny)I!beQn4r};mJl2I&)BXMpvFNwwhB>YUG}U-7%yH)J|tTFtTEvX_jAb%=A0MZM;9bIDh;=X^T-is!3S ztTUuR%0aTnNB#Hso6#@%+tEj4LHd_&+RuU#a1aary07>TJBoi8DSBE`d$?J6{%K}# z4hC8<3I$rciMIT)>D0vbKDrIeZ4)!SR^WX9>(;;Ad9f=P6Hu9*2RXA4Q~HO*k?5dc$h7Vmw_j+eul+8f`Y< zXDrt%CTqi@)HEc7Va6a1=|?q@f%7@gGN#es8acw?25hG1#i4X#(oTZ6jQN z?5pfn#U*50&{O`%vAv59+A?+CTTIhXloe%JQCF6tC@GdX<+{R{HwrHyTf!@lec~`C zld!5c)T?Xq^&zv(K7(A~T!UJ#C?{hUiv_Eyp;ifDCnoJ@#8J?u*%D$~o1rcG-qaOE zQBxHal`OG&$!1-VZ`zs=ulvUPZThAfBV&BJPd`gZqArQA=ruFe<5yh06hyvaHd`?t zub7=(an(Iwx;Uneh73M9;Nb_q=9k~mP2Sd+g`+x)Ir&k2yCp)!c3#MzMnb-sMr|6c zrmm=)j7Cck0dW+N#Tu<*!eod2C%@vC|L}KwxEu2{voxzE<$B4+MAWKB+#504852D+3B1~y2I$BzvAfce$7ArdXJC0Q^d;+&%!Y;&euF&-u?KyZrG|koHLlF zrmkz6$Yb7>E>#Fr7!h}RL~(+a1->dNr!zLUQ`W11P0tZYL%mwD7;l(el@x0SLK7ts zSvMk1T)RHrx^5eBzOBc?_os*R&eG%?irIwCDrZH9Zq#R#MeHS(*sUoyH@tfGiZ6e6 z!OMD&Vzkf0$G_&+|NB4ktDT0&ddgRFgMayo#k69)ZSZ$R;Ct(-mTT7IDa(1rGS~DY zL*!Si@-_2K!?KbX9}(!7P8iS)Tf!@U-Tj_swI;uvu$pHqa!n_+=xV__-!NS_EJ|p6 zj8-vO5YP)kA{i2feRdCqJbKb$bTLCQXL9Yy7lO+6XvBd1&XA`MdmJBTBr&LRTX&}u zqk0H9G6j-`Qh0?6H?AVwoTZ2Xyw18FgQ= zBVKZPd&zv$P_A#8=Z31&$Mug%M~93$5ra?>nutIqbdrF6;)ymnd9`TIg=Nkx)9mAs z`I2HeVYOMYs4O`VF72VSF0l@Xx8!Gg2uQ>>z3uM>LAVOclJzF%c4?U#O)%=w-S3hm z0g)(w|HZ+3s1~^O75VA`v-Jap8$qZoY(g&MWhS_`QO(+=Um0iCI( z<1bikDpsomOUTvSvf6}n_{upz001BWNkljq&oK1fiyBGQh|&I2C* z>aThF^oXNSFwg=iJeV3^7u03J%7hd?qML?fi6Ybz=euNS%u&}<)+HD7IaTekw+&Gs zk)4?Cenyf-1ggY^iXaP+!a$wVR3$}YD2*df0_k1b?z*N}H5BVMy`Bt1q*8D8$v5#i zKhHSa5*5Z6Ol_&H!y?<-bKY(1Pt-J(!HN!q30emPh_=R;@CfJd#^8*>T8DKGfx{X@ zRkmYpoX7ugWXm5zh;2H$fG~haTC8chb`;b!&()sN{|NJUGy2y1Y*ZKrdt z4I;7zQx?=!Nnsqoh zA#qfZ1q=LSN~R$YhNf;Ps)pJ~j8`bv{xyDkLy{n3ji1Fd>kzdp5x&ILIi+bRtV1ISgacEzwMmlNziMsza0vI6CO__+i3HQ*m8gbMo!y@E=n) z`yrQUg}Hvg^yRm_y3F}z(WC44IBY--JM^ABz^{(!&BsJ%Ir+C=aDwDFSBDfw9eQGc zDld5X*_ZrooO7!CRKxceJbaI%VTZ$HMi{{IPicz;83<$~ky_wP!{+*e@u$DxMgEZA z6$75iiui}}P5wy`NEHxdG4XDOzO8V}lKJI1=fC?M&#GIF9!liRmyAEZ!UWzyK&NaCFlA?IU zSD&8q<%>Dvri0D)*c)X$*$dH?qizLKTP{jN+(9Hgw2sjH7}eIF8{1SjQX-{8$_nq6 zEXL=2`{}3X>Iv?_h=E8+4~HB)?GfjOs+w_jdP>ku@ka@6=-6CcusFM9Tm)>?4*jPe z@ZR5dIPPQ|4Ia}SjTt2c!)8Tw@dkMUOhZ#Fsg{OCal`D{70hdX+x}=t+y)`mY*tsCeEkL8 zR3eV|IcO}-d+A_6d7?4C08MfDlLRZQE??an^#Z z@ut95E7sE~H|0ytug^JM7u+_2T39@GTUqZ92rpM-rr)r70nd`hWM-cSSxL{{^8B+e z`08@aGt*(yIp$z=%uX+5H}NDJSll(!S?v5d`3udn-V-|Rkb|@$t**KJ>PtSo%{i@8 zmf0hAcAhZmb=XTG4M2$yF*>Ak^ge@r!Jb&Mww9`vRN)S?`#$OL5f8dChY7?QR1}fy zAJ9L3LjUWRWX&3LdBHUKf-^T~R3>!H9404RO>@o`0=fGhqYr=0lcy1XuVd0IfN(&V z?r}Iw`Q?sdQ_Yw!E6Qqvz!MBJ2Jh{$^Kd{n35i62R9%wpkcW?k-2Nit)vKCme#wj9 z{TAm+77uz%_5#f08JnBueEG#o&Q>+`V2}PUp0fY`F}wW^y<$fA{ZcGJbiZEj|=yEXZ(9INaq<$|Qm{S%#+@Y7R=$%dIm^syY z!OLllN-bUMfd=PtHtRX3=MzMf5+5c6sUvJ=L*!#tlXK3;OJ1!6)hK58phK@8y-E8j1gIFI5lOn_D08UIoW-@~ zcAl^(16+K_LE2$I)AS&1rR zWWrD=@=li~@F-VM<_pGGOKz_#Owc7xcgeCY-9TewMUyG81=W1QYLT%hLej7ybUDk} zl#BBfXIB+@;h^(VpG~xOkyJ!c%Kk837c7;v+EIBDO5Kg zYSx(51(U05o}W!w`H;b0KpH7_1~GB9qFOJweSXSR?=$TT=?9L`tjQ9?2p>G6_vna`Oy~{v=&ok;-IQ__a&m@wwYND{FnqRWGddclXGJGN19UO2`ZPt$1?>Xx6VhLgkjj@rkLU8Kmeuyp}(|=YHKEG!E>r4?>5=IrYeZl;!n$w(g$IDWFjhaU~u*=dM3 zF7Q;m+jQ~$V}9|=CzQW_#BOAXuP6rdEu9c0o)2W|27zpQ__9S=~nHQ_*r8s}Eb zC#O97?2}gX6r{W#r|cgM*`N1_Pi`o$rktLhP=ZaZy$ic$X zJpPDyOL%pLH|!)=44VtCFFs+mIN`JADYqvb`pp zu9EAW$2JX3v7%fqnVEB*KfB`Dvl-KAg(Yn*W^1?k!P@?iswKQ&O}=OeFBOFd9ZvR0%OU|y&Ia?Rp)(LgkQe6)J$EpBZ<0fCTtZ#T`&sZ-Wa1gH9YfgE2GUn^+6}Rpf zl|3dL9<$TU*iAR2`mH_MHQ3oRHpMm1)CHU6L-xWAgX)Z%%UeFZ*)aD>tVww9HbH0hA2`yT1=F-P5mgQTqospA;EcS!%yWBQ#j znVGS^JZDl%PW+0*_Q>)}IQ^98=Tp8Md;IagXJ72I>>TmlFyuieYzePykAp#n4|g0V z7c-tO=aklC(=LNy#^AkOb{-7rBq5Q|NYx|h3^_U;GQUar=f#5Z^qd!8)ZAQNF@Dr% ze4J67f6MCfgsbtI>pDU{?z8v7Q}*6}#LmFdYsB{xUU)DCY$j~-oN*=DL@~SlnB8GW z6w2>7Q#_bk%GHMF&nm|Gj0Z!_FZ%`S%S&E-IpKCDSgQe}XpaZIjHknZVWV*Wn(E>G zsqh|OEos)5%vU)#xyD8rhl3smqYm8!q7c+uZT8y3V27$$(~qX4&5UZj;MKII=NwNW z->PNon$5;joKEmzm;Fr8Qx>4c+>qs$k)xGousE@in( z8ts#&9S$-@FBB-D5Ofjph%A|~8&u?*8P{iH8r5Z#2K3VaO-{AGVRG^{&wlruZ}qR3 z^#Vj3(jUg8X+R`G;w)kGpi5d#vFFz;#*#v$xNet6al(E_(+wpWjY_+y-9tLzE7J0Y zd=_$gGeTtvJDEh=HEwyyVsgRD+nm!)Nceur;Gjd+Z3wLR*`ltuCIWY==X|l@_SJ;F zB&K+r(0GTiYwCQ-BWk(>xvwU{>g+FfZnRGGY$qJ#msQAnsB>*$)vBCk3xc|Ak3%C#xtJ3Sa7lQ6y7r6 zM<~3^8e3d)b9TwsUl+KY$Lz;_nlSnQ*}Jo5Ns{Zp?|*lX@YuK9x4Yk}du!=#EF>Wg zF`U7Ww2@5PgJhDKzMCHOpf^!w(#uE`HJXvhNQokb8W6pA*Iswua$jQ8L*8mM2y{0H zG`b2v{LJd9DzhRpB0N3(_&J9}YdmS7wSl$`q_NI8GNnQ*1JlqLrhzOcFdH+QG`Scr z39TGgj>(7DoIO9|{OF3w+yZ~ygI6d6(~Ysi2A+C~wY+3<9rFCNgX7x7jzBEOEH9sP zadOV1mop|t%-+64)ux?%#pgNZfwo5B9n2tw0y*Oc#8eGJh zR&1b+F~;Dwr%;uSsMjX__-%WW0E`;L(c(|L<|hk0*jKF!-Z^ z!M(Pk<_H`iX*3k|vA{S@nO&8IX^loxXepzrz9)+ zTzTVP8p>V@n(@u+;ibY6wi@r@ALp-T=RO zE9jNVZvZ6>>Q=;N(Boz`WRWd+Ha+HYamDp=$kWktrmHEQ=`vbPIA2^-Qj%CP1HZ%F zXot6I2W)uV-^iX_uL!Zu?n=yc37ea=(JInQSWldQy&@M2c*cv{f1 z*L8mP1Ih2z6sCZWC+pzV;&bs8Ex}Q+;Y!|)74O87x)v;plIOFM55@&o>AD8*7qJwC zCWNLy|0)??pEayjC?~+*yi0pF84GSyj5HvTb#W_uV#`)}u`RS)zMRlIXO`4>%bn$E5yD7DU)ff=%yhHEg z5nC5094&_|uby#2z`L1?E^@L~14|g#XHzm>EsP5g7M9nbwRgy!$%xsf$IL&y;3Azf zy?oB)2vRGjSf$+L4zg9JSKFX}aF2&OZSKStiKVcMfVkJAf3QnGdqy|CX1us$`1F0A z2R4OUU}h#k)1zagtS<7Ol) zvf3nr7PW&-?mXzwtGl>r-9zm4&Y{+g%)Mqpve_b;WCXJXSK}LQo;~3-Y7lmJNO}>S z)&cD-=k4G%C0>LvjOD3fXkY_-x92BOntdI*y13Es~2Hg6j$Ce9YwN1!qy6sJDp| zgO!#)TLn=_Fnom5qq}p!+uxgDHD6#K54l)QS)4uNdstDU_vANyl`uvLJaluJ; z!PV3E`0-zlc`m28%L%8~IZBoIR!UY($W~J(tBmOi!q3OWrNj&atSCUSAisFdWj^M5 ze!<05vCwU5ja_>8_IUVUK)2@M|HJGHAu$3UJ4`TGkX=0EvY2o=zhXEkSy)|yWS7q2 zJ`dj7pjY$ol4HlL7PA2FNFnU)UN5w*ag{tP;5_pvk~+P?19tA+XTOusw;?ED@>}shc-s%F$XSlY zOpY&@Obcd4jOEpcJxy#&^z8%i=VN?ADlh}I6I1hSx^BVgYR1)zE3We?vsOWhq(C9^ z73T7U#l?zGbDK-6z*yd}oJ_fv3A*2--Dxw3Et<|0yyMI9@tvDpb}L=KLrQ0rR8 zNZZ9sS~x+5&p8eTg&RZE#C07aQ(;Si6ehw7QT~8Vcfp-;#$`EUdHyNKi&KhY3%i(7 z%!f>dD<*CiFN*1OYjo=tbvMVDeO(u%M3e%fnBnAS%q~}a{N6biR*uZ3EXEVAW?D{k!3sVbZQX|;hrY4|%c1?Ei37`Dq%amH#xJ!|Ykgf9 zPDs!j&?*G|qm0GPDbwdEAIy$;YUCK%jOpcwvCc@l4M4=zT~F7 zV)X2N9$i0Y<{9X6O0m4=W>k{;E#kOFs~*z!EkapCr9GOp6ZRSxi1{(6?@Jb*!3(uQ ztR~Dehl!P-j0{t)D>Mk95LS#C^yu{GEHgOICoE1r;@Rva(`P1TJ|Q1paWhqjMu%G1 zq?LrUBZW8EV4)Sgqb1|>3l>KOAEsx#FmmjC&SE%XxQbZ$H9DP`PA4EpT(lN!Z!Iv4 z8%F6Flcy^_xpBF;&F~WQ8;-6DCUy%eX;G_tbYmOaGBLsio$XChbxtvza&!I>#~iUb zvvI@%mQyB|Bd+HjxmTmti0L$ZeA~exg8nA6(qeF&bG16==E;KhZaiF-VWeYjE~m^4 zlibuuA#W=L2$bz()wifmEq2CN=$k3&#iu;^(IrDy;>eU@e#LA!<}|f2TQNzaK|8Rh zxfYga6V!5ccP3aj8N=~&u09&_!I8n4U64(#m|hDeONFH*!jNCk^RQB>R4Q+b67ogP z>}ttnTJV3(B`*VkEr5KT;k8=jOhy^wnIH`eHtQyveT%p2ifvcmDS@FCc4E<2*wxZ!=gf|5*UoKe6l0{F^ zu_gG5?WUy2GM*pL`FOtMe>#&yqM%4~M#G$!B@~`SPu5vsMGmt9yz&HEJ*a%u!+9%wW$JLmCCPyAtf~{ zMheL?U$V+m^0MG&Ibxd4IG&eWuSS@{U{z#{(+Q?DX*dZx!GL@5J`a*RH0}E9{wTi^ z3AhR(b6wu;Vp%dw3+7qLhvR}rlbo|f!K#3H0Y}Sqpz1-i&hpyvB$G@r%}Op-sk2a2D&S`_dwv92HrN&0#casM$s^9D4YH&`Gw|pQeB3bQaz17LD&8ec zq}#=|O?LKHlxc>cUhu)Q8?G*|c|3dpWr-{@N+Y0jTGYCG+<&mi{d-M%H3vtE*ZhOM z&hV0k#;qkJ8*SpV3I5TNo5_Uf$r0C`3(kj2M!g2q?$Xc(Z=EmEh>ciuub4kIz5k=+a^K%p!0N99NRNF{PDaETNdL z7@tp=Ph3(Q+$bX2=#ZRT5I&kS9?w`@oN|43%Ek4JL_4&_5EEgSQX4yVaoSC>(x;iFLW6$Y)IE3$7+PtKpP0jOiJZ$(ZZ$oTbsEw0pD$J3M%}#e>5Z?a0P01Q|aW(}a|m zae~w8VCN&u$tm;MlwcSd>|k>nyb=Z&A*S7+83pV&6_=^t zaW0UNM?9z#cN1L8M8EoOr44omw~=vhc!5>U(I20YKfB@T;+p4|iZWkk3tIIy{@?*S zdxyNe)#R{k6WeNC;sO(8Is~}$H%>`e<}Alk#;4az=OIfS;(9TGV-lGBB#88R zLW1F9SRt`z({f>$E*KqMF}70X^>x6j#6XoPc78^7ea+L;f+tx)S*`=Iy0(Ge?a*yE z*$6G_N@Aq1noyY-MTVW7k=|VK==8R{Y6;Tx&~b;jJ)qZWupLMeYu!WjC)U9c)3ynk zF8)Zv*@ASw!fAOp?HaD@;D}cnG6V<{q{L4e4E8q3^$Ck-*PNao^Xy#kXq`%nz@QW! z%InkXv>5Ev=+qrNQ{Gng6$tGhq9(RGAyDUdhG6bT7(oNicJPf?8(OXVSWZT8HsZyLYc4av^V_KJXXl|dpw{lut_8FLi%`@l@)h;kg8kMN$I~-TKfK~d zsCCaVSK}oIO?#mkvEF&Z#PC&nAV{0({g!aY&lw|hkjOE!m=R?5@xV=9F zrEtNl)2MaW-EFeb4hbEJO^DIkq=95({t|h5&eiiXPM#fAUNq zw2zr=5G@qj(-g%SN7qky{<7e?T=$VHijq>97-0{0b3nV>W)Q72C9P5-lANvW8^nt< zRwpkxKYhZL5R_73Y72>`NG)7dA`DqA{8cKI${VjFTjk8=3yy{>Ru_W5H`nbHL(oxZkBv;*ygpxRpV5hIiDY$%*adn#V;Su~;{-o8bG8LX{ zvRl_YY$)zTl34$0RhBAWuPWeGseH|Z5E#NB&^}se(jq4>a+dj$>(!96`4!W2%5b&r zu%v{-QYH;MVK><1Fy7;Cw8NI)NB>fFy51ayfLO1~7-j`Lot7-}f?>MO@_IQh7~N(j zEV7cLdBG<-=jB||vII3#a6eQ`(vo=&r;CEC%iL4yx{4q z;OVqr^m9syeV(HPbuFly>vZH_^#@7_fzUR(-XoHV?O6(Gj&@HG_7$U5!K_qZ#ps|- zd-ssNckXhq(`67_B({kXHU=h}dpUt)g0{eR@XjY(OmfmfgLH|y`*e37a(KAS!~Hfp zy#QC`LQ*i;9dm!ULPbM@QO*<> z^5pgs1DF!j^hDGFPv~3au8`!Qzj)lwx+B-$RyykkIvM2?G6{6!F z{k=nW@7?F#ogH=>7A<>S#;?RhHhVPFdu)&4X0GwC=lHV{;nr|!eVR?lUMD7B+^`fA z#@2wO8PWALzUQNTkKWc4OO}*UBlYaPWpEr#vo&a0EQ^_CF*CEp%oa1#h?$w0WwFH; zGc$~snOU-!nRcFgzZYWn|3+-Y=AVg~=&q@*%sf?nDl3zDilJC%5^h!)j6!Xm0$?+< zv3YRbPw3WUl#vra{~SrE_w_d*d4sIfrndHgCHkagBV4`$mOg}q-E6t%^lVm7w$+;E zWP?I?ii`uIIR3j$aMhl;k0)MD37y&m&qS)+uBGhUj|JX&*ULKQ#dTe>mL(ZrdT_16 zZhA{o96_sSz~MzN`r5ptPw=9;4kKJ~D+GbzlZ_|z<4U_<&qg)}Uag!icnF0XX})XN z#RDcDC!~r_=&#;c_?dfzz(T1#Pa$1CQ|7RG+4b1Yul)^79UMZ&VKL|FgB>w*Qhy;M zd(d;gWng35B9yc$czFSP529xU#Bvy0HKpTmfAhb&hF20hpN@x8k9mS0%LrF8!Nt(w@^_q)qsdl~kvY(P9UA;|&#Y$rVmaCYKxstd$&s?qr0OTOvfK7VJ?5&A zMqA_Wp(Xpf`I~$B;G>^+T@ZUZ3P55UFiEcn<~wUx)2_f_U02Hf*G0!$ zg4nL)VAR?E#AdhB2dZkseZgS5Dxrl#EWXCVMp>PHz%_d!R>7e9wK7@wcQ#aZrOk~q zQ1v*0TRlrox!B**ovK<4wRM0#ZKZ+?-^2Yqcwj+;11Wt&7v;e4=qo)bQGyhJYUe zc3Y=TEt&;}w@DwY!T~#mEiNaW2p8+!V{#n^?lwm2D}{|qCgpg@%W$^~*2z#swZhBG zok1nv)QT1U#CiB~A9?ojY?Bcr#)!1N+BsZKVm^ApYS#h}7ldm?X=3o6gR`;Mh)4$U zqmd69A_Ae_hkqa7Xq>nPC?`{miD-fw^_$r>3d=;8679IBWyz}P(8JZBCTo|pI`uqs zLi4Q?*)PXfc;c8@vO17hZGgSMV{n6~He#L+46^Qf)=N->=GUJBLKFGT82MU0P_;7s zT93fDU;b(Fd1&xhQ>a6o6a{s*YJ4?T=#h<7Uypt+_Ylze=i>0}wzuc_4imB2a6QEyGf@A0cbuk3-76dSfl=e9p4l$TE) zn&n}bZ0xNqj=oOCL!<`C8fA$Go@y;i8%QpGmazTMB%46gs*nsZ$Tt%MpkDS z$y_MD!l3*dToqu6O*JO*>XJWDl>15+5K@HBMJQh>w7=Y76xPF8BNsA@SZs>|C+1d{ zmoDe-C}7T`Q#fb7Qc z_Z}B8&yO1r0!f9Ezs4UQc{%>l^Q2f$I5$)0uJGBopi!Dqc+G`Qg#l=ga;85sK+CbOpE!Z?}?D~X5!`OF*GLEb>;1D!y)nL*}8E5IacEFDtu5&Pf zjltf*d7|jv?)(X&L{jWxf2>8879USSt~m*m<0cl9=WZ#`KybwhVgCZ$(_J94J>o)T zUbL~GeUT=H7t(=4#N^Hi)=Xg0Ih0LfngkQclMS-XZ)L6F2H>1QCZ;kd76lNc1DaSE zKw0jYbJE9==@a%*Z{jr@frLLy?7E`folGMsZhISa}3 zF1hx9y(U<-H6}=YLMR#FSzd61$d!lFlXncI9Vf)g~F-I1z3Us{VEvh~aVjE}L zfs8<6*V{(F6@sl;g>d%mc;Do*J` z(tpsimDeO_#-`0IBNM3{z*N(L&1gGYD#2#-%0WevU>LQ1*)r#EqriEp`Vp^mg(kEW&19gwXw_&2Y!Sk>GI9!#M}bu~%B_tecuV*fI2PTm~g( z+t{$4M{CaS99v%Lv)M3N7Zg%U1sBxb^`fO9ejv}KWQeF0?IM=Eb*Vq%6wpjSr}Di3 zA<-t&YZGkbBYCFIAf!i{=P4U8rmgrQIo&kI6Eavx{C{Yo+zmTa6jSLK^!H4AR zD+CStw~iD%zjY;fVK;*Lu9?xYDOTIosYf}v1_2kf*PFGS3FSS!a_Wa6V8Dyybo2|* zO_I2eSxRcOkzH1f+k%_K%Mr^;mRXypH&BvHC*dOCte!UHNS4Tsy@|Lx;|MW3f}$4A z&|L8|Ha&aQPLTW2<~hL$HNZWgRl3-9>xM_yq#8pW(C`I|XJkvEp#s39alWJ0kiUcH z5@EMTE2oXy?k=4G771j@;9?%u*oZ#R#(@mivNx6MMXgFDn}}3F=^jo|t@fJpFJuuS z8$Rsc;R+9oTo5fa<+|o@>$$U7WkBWVW}TVnu(T^`M!7B3STr-z4PzeUlN57|5Dc$Z zI!*H4FnvG4+U1hm0?9EUlNv0Nt(up4{&qDr}UY{$>w7`i+2A39sggpt{xAAa2S%D zclmyMeKb5mXcQKZjfeguLMRVX@>>?6(9&+U-)u>%ps_6*)f*o}`~G%i*?z2-RK|?MGt_ zY0}21b1^dZT+yg|{KJw>D*`sf&=6D2r+}D+YbhRqSnGV>!*s~e${jX@7gxMyUUR#I z+VN4BFT&~AO+i>oSKwh8#s<;>JwokrZC(_%EZ-lEOL+$-+fbg>^`YPjqYS_C@WSz) z)zQho*~DX!i)AF451Qv{b2Hl_DK<$n5e2@^NY~(Bp3v5O8yOuR!4k#6$4_&}1^R4WitQM*t~FDT;MD z-MFz~MC4Du<#w?xsFC}6YQDZLQiNg{+0h-#v8s`y&-T>NWlf8u#3Q|=7l)`QT+sy- z{4g+ZNANbR{gVmN$vtN(pOJ>%_XC@wi3oWBHo$!M^j;h%IK)saxRanT33*)YjA3yo zib*jw>Ot6htLX69rAEe|NoPDy#Ng>{)d!b%?EcaS?7*f*;}y0E3l$PE!RD=yt&yb{ zt`==%yNB}cS#2if_}p4ofAeFr;j5^W|?d5y=jQZ%6GP3?J z{R6eVvx#hT`ynAjo@}}?AwP@^R+)XB!m2iUF6fqXicBE)Vd9}?YifgF7-6Koq8)Ll z$iTEtA|gPuagdtS$l0}-X#R-otI>h)7d?`JR|+H&0VHvumPES$4_71lmE6#%_7gd< zGB5}qsK1<}YhW2+TnWxEl~<~LL&R5I@tJK%(-+zwTJY@KxqVS#kkMZj49b(2I1rGJ zNrRh1w)mBtJU^JLuC={7JQMOUjY&OFTtx*5-GRE+8=Q0W*cL;UCB455HxzwY3z_?L zLVAxrUV0jU+khWYf-fs`4P9$e^K)KAY%5J871&9%eU#Y6apfbmlmeq_)Rv*8a$_Sh zhnE~Zo9N)s-b4ivTM5G_M$7bf`D+x-@_jLi=`*V?IZeva30>!IH~%xybLMs2^Hd_y z6%lZL5hTH{L|1?ZhQi$<~|yuM`EJg8RqB%r)?Dny(wv|B=Y_I-U^{? zUa^Q21ht)sY#ERmi0aNm|5P3o-Q6|(v{s(=*!_fsr@>g!K_KNj&GD?Kd(HIInIJ;R zC9~qEUfcr0xSpf8<6sG6Nftb$+B~u3iL*B0{qO*Dnn^!e&S$3IVEyNw-_8~v=9~Ru z$z!MBZtiEAAF$tR?TlNIwH#P$7r3whJ)Zjp8MpP?{e1ax6a0BD65jo(U;o)DxTAoi z{k9S-v-W!Pu6{g0YNqfJ`1tv*e=Yd7miBh~sK({@q(Af7|C#=Irx)bcdOG_O`RGnR z>JmpenwUR45|!xfE9CjXsl)$j{qg$wZ2YnC=`_Li!qB@ZxTo-O%>NNZbhl#JZt--2 zYP8ib$Q`|T^eo{wl>7O-b=CcGs?cKjPQ7?RbW8Le<@0e%bhixP7DU@GeJ_H)iZ3sgX5ft*vg}VOPn@o`| zy5YUpLi|h>?0%7vPPpA-`$YTTdYL>GC|tTg&><-O>HA7$iG1vTZqUP`pOAvXNI^*# zNiNq8DqmdXhY*k$UDr8#sW%)_k$I+Q)K zBjInT|H%RLcEx?^%VJ`>G_4f$As;8dM#)i}zDJtleG{l4ug<5vpt)*OkthDwLipP_ zr>j!)4R47gF#!uGc%%d0$-#5rzW$NR=O(#?QM`~Ut4+bFM7$Dj*J4kmE|4Z=%Zywx z-W+GpN{px7gx$CI-~i)@Hmo(9$(~JDVfR;lo5Vk(^gn|&I;9HP0eX4yM@{UxV=j_X z!b~=mDg>`mC)IL|fgaJl5MR?6eW`?V$3eC7B*-bV3~*_u)r9G)ncVL1r<@DiDciPE zb!=4T3*Y`8KhGvw{8Vuli+bwISDtUuLc^7V<;^$pY@);TdE<^HU4dsuQJi-v+150m zx;wEdr~l_|awIRE6)(r(=2*h}s(A`~G@b-d=iw9fkteSmcSXVIes>p+#HbuAomQki zR=KA1KZP_H|X!A`$+UWazI<6o6#U&Sor}KbcdeQw-&7wx|^&y)82NGfFg(lYIoj5 z?`p%mls+~4TAJq_m)@-n^eo1)YIs+$dAZ&oNr!D2)J{ZK)%FnDl%;X|-t#19;JTl^ zTx-Ygx9#XVi{UfC;M3@KIzHNPOgNpzY+!H$xgEsb3~sA4nF(Q#xY z>WJ_8iJP&i1Qea->Xv)B+1yfJKgHYWe=fQ6^Ee`9OX}1yRIJIw)noiQ%x`LM4uwbu z%S1}JW}~=sf#ShN=eexV<$t~5AE$V$u#ziPeuk1Fu0{B^R@39TA#r<38T@aEYVzLq>f5s|4J+c`rG0G!V#oSF?nR7q{vi)tdo>l9~~pNc9qO`U#3wHK5FbD zeZq@bR61CGi7O`MvzN$eok!^MpET9K=JSl-v~qhd(n++LQ;28V_dPY4e{{DAhA2u{?SCNY(+UdJ9%BER$cuEm**ewhnz@l7)32Id{p#Kw z&DL|p)JZY%LBR_hC{-DC@!DV%V^i@;)7fjj#~Cya!`g$OazYD9jd5%{|_nVG7q<9|Db~$g(X4)n!801uLpw-6z0U5^s!I;tN zY*fXiKwSmLws`yra!7KC+r$-h6VG1H1fWvQWi@O2W(3As4jJwCyV7KL_oN4C2c?)? zAM;Y#Xal_B2<8GRvR{$Ko;;^4^@t{gGi<=#x+NRjQ!PmFZ}bY$#4mGlO9$<$pH#^Y zicK<|3;COOlK3I28)J6(mXv=Nn=MWRKIyWY%puwuMW(}##c&_7hAk3#ggH0!3wYk< zOn@Iu66{UaS|~x1-MyMhUz5Qfphk+vn)f-fgP7|($Wm}Ak9W!GGuYDx)GRiy3=$lB zj?wM_UH3!aT5jTW=M3W0zqb%1@e4d_o_I=;k8S2wY>k23t#uKngk4&8%g=q*qEt-A zg(Vg?u1`MJ5qDpA2L{#iw*E%Re1V#EzFy6AE(~HF`&&p6inYy2lK*3;v-Z*R-*)Mt z@^h*vMfzY#9!|&zwlH|EXm_Mx+f&uCcD$Qqn_MFew*ei{nkZO-hYt~%f$7b|;LfIL z?v~BqSiFK|9^K1hkC_n|P`-Rk=9QCgt*#C7JI12%G z72#S09|o6cUPzXZ7N_xM@9~EpOJ3gHYP%fUMfkgVX)Vx?dXLQSG|33^&{M}(c?d{) zkJoQaEIx(Db2oX7*KzLIY(K=QSo3#;3O=oQ`}D!VsFHWK*!a=(g&Q% z7*}OKX|}NGA;2ZBpxHyH7mMNu<-Rebl8@!0{Zam5&?Eny8ye)Ht z_Jiu^y1^jhI;68bS)*HFEwzJywo9Fp3x>>>{p)|A{9iMiU5Hg{v+^HrQdoq5 zRM1*Gxu7~KQ0jfIGx@l!>mT6h zQCkX3rC&xue-s%EQUpao#iCNxR&lBRKmILc3!vHq3hwqUfG~g&T1s7Z5z>{xh&K!*}&`ZL$CX29yo2tH=y2O*(+y++N0FK&T`cg-8 zjEeFvd7e?;CMRJGsa|}r3bXIDw1fyPM;Kd^$uT_s^)u?EYSstM9g?oELfqi5XbnC+ zO$P_+tBY6~d(@}4=)c9c1KmAhSf)2)kVkJ3+cgtxEk>+G)vNSvSN*aZw%q{51m{*& z<6(kFPst!0UR2F6?LUvD5CGDQ$IiG|yHLN_mFe%_rPBXySG0@tHJ|gxf4+-~?!PL6 z`uks!mI1o|zoc~@)R%uZA09qjRNN!;-z_k(L+bt4{UnZmsIUGr9w;aYGIF4S{x8w_ zN2mWK`^qZ+AM65V*IxwWXI;MStb#Hc=`yTN?*AGD#*kI+jzT?mm-w zGYe7}BfT}e1NtnWu%QnawTb^9btR+;%gV;np1sJyb?g_JGJLIOi?I|P`M+PIEVkV9 z^+L>UR7NBqtz$mA7DDI;Hq{@@|1<Lj{L^laC(f+cWGh127Z5XvZ?!zFLglt4^RxgiY=?ALKa-7kGP9! zj+g3!bpaI_KQ_dsAydCgh9^=?h-cb1i`1`p|9<^%aMk+Q8_i4r=XDkLlF_yQ_ z{$*K#jLbi~m5wmJ5*w};&z#MiE)-fu-Z1>zI1+L(K`wvO=W+SxG=pzyIxNPysE0>W-6imPLq>4$|gQqau)1899Y)w9(FyShc7x%*8GZ!?TLIu4)dG_ zYXH60fLh`hHu5IS|AweH3lRP!V3n28jMkY5Lu8r{WjiN0#tq# zyRVpp{$GE;8M>Kdq-X=*#IAw*mLA2JAQ=1)QvU|^k_CsAFq;{huV8%Y*o8O-@26=m zfNk$>2i6>E%D(<<9Ie6*1n3w3FViJl(2}58}a|?u!Q60o^%-a$@Fd|G1OP*a%w-WfAnsC)cN`H zsH(}5Et?+wI$lzOEm$~rGGjX!9f}d;(YtLia2;f}<>G>{!EUM+QiKcbbi z=$KSw3j<`qM#RoRvS?1VWNQmHY|xk*Tg7TQN(*~L)CnnvD>D+-Gl@QCs)Hqqhsg04 z6O&P0Ku;MmCJu)Q^yg{_7ObLtXJo&8H;Um^S6W~fYb+gR6$)=8RbnzFk52fDIHzz-Ar#iO5t>G^axub3)$bA$0v z$Q|>|#9KN}9ZoG}Jqzrr=hOu0Ok~lhfj!9BFnY5QC{eeLQo8ENDJ5mSYsh%PqF6`k zJsEbB&@NI3d}6SKBFJH)O2GkxfRQvO&d&ca_;s?#TLcf-U0q^?3VKy#IOhbFgj2Ch}C!)aDCi(&;z?why*U<7722 zZ*P6@`5tk2AkA|f{jru%^^i4y&4;m$o>UiXi86lKb$4zo0Pfa61d!Kkx$;GZ4N-&NYsCT910-dN)JdWKj9VQcwR4DeqZ=PYL zbP_0-9ZY(?MnNi!b*`K^(azkbRJoa>ZoEw=`>ty{yJ9h?bGa=nqsd6fHBf8#2W?MB zE+N;+757Ul*CrXf%jzqGcZyQXBThD9&EuW_kObP{sh#5G2ZkMk1Y3J-4WG%TUr#}& zv8~`Q7K0S~Q@N%pot@5e3iY&ItWG>Zg3X;e)-q55?_8prm44>eTaJ+jZD&qb_e<%~ z3rAU0-`w8JepRQsIsTV01&UTCoUP05Bebz}tq!uqNr!!u*>Q3OvgnDaRvSWd%q+X0%yI-7UeB4em$hd{Y4*0Mm1{z`9C#1+vp=4k z7CJs*ZdGjVrtD_a2w22Dcj0ZH{_p+7-M;(3!uVacJVMGj-8A6x2z1u@KIU5^9j}s~ zuY%v_n7xiLZg<;@g~8IHH~_WvYsRNmVP}7%N3Q)z0DX%LoqAyt^V6~8-=33~>RBUu zStlo<`FN8QCdjaq97b}9_R7m=U_A_S5Ba-#5%)AP#G9~c!{C$n|Y(9<@wvH4|N zBE-waCJfG{!%Ql;X71=YT3^wx*Ektd$+6U&;f%YpWaItue!4Wjk~}ge0sn#~Um#bv zheie=pV~Lq~s%XPJc$!UXuY-4nfqDmVG!qgwo4L63UF}R-HF! zT8Ra3HSu=^I*JN_jzB-BmrPMKG*{KswTo2$nuR6ym5OaAVwBJwAC3i?VUFXOCq8s0 zpt`xG33E;!Yp5VR{0*D#B1-%nly(qD;e|a;hU+uc&?p0n4G%1nmRQ{rWzA+x&6xT6 z+N<1}N;+1a)>g2Zdjx(-UutCqQG*#7=i{$+d^N!VnK{e&GXVR_fUvF+_hzxJC_!B| z#~eR3l+vEO0^iOXEuMs|ER8Kj|%&J9w!yKtCkn6G(l-G)13A?5N9aJFE<1_1+j#LWOvvUEf zb1f~jn&o82I8;I4=oabXs#+hOYAc}dnqy+QZmm?({B<1hBgCuwCGhh`P~OAJcE+7? zmvPg&b|nJC{hj+6H!W8=+h@3bN^s}%{Z&MOZ>#WLomhQKD?bSW;6|D#_)>NW7FK;Y zE>CEGH^}&OUqbn|+g0!?-@CQ$p4qZ@JQxbfciXIIOLz%)^Fe6-2r9@L_xM!Zot1Rf zHKK2`{(W=35NknnPeok+gVihdBeJ_&-G0rk_J&AC&+_bwC@{Tz!hc`v67I@nTQv^* zY-=a#^TY4@*fOZ#^pkf)&fTQ1YL7b@*t^>-W`7cJGoSRo*ueleuTOYNDbgjL9R?e`lt}uyW+vHJjrB*dV`zu)M z-CpCbNJm+IM~YNXYoyXkPvxJcX$Brqy-CpcTfU=gn&91ijWu$#YztD9>^uqbZKKF`$+`6d%FUAdhu|M`9NoGH1qC(^ z1tGz*8!41kk*qB6e2?)IecakVGvuY>2I11NpMf-i~O1gI^>k%%-s~qk_ zOFFZr0b*O5vBT<$2*aUM?v9uKkAhrgd*+vp)}{oB$!uR@sQe_OpmP~=S$W+DjyHI{ zHFfu*xb_*hU^I?J*;FcjlaSo()wVPf-8)_QaO~>mpug?m6Mn=R<}-)NbDG(ZA7@Y^q2*4g$`h5b>6v3_Sv~ald z`S={nTUO=1{lE$81fR@B;<}_v_mH~4Z#+T*K)S9hJn>KxAz<_K?!T>|nES|BcN*v| z{PcgsvYAMPYMP<2MQ78AGRvBV3mWveEZX@!nfEtI?iC-YIBUxIWEqtc!PXO4@d-Z)OG>1qOe&N+6TdvJ^`O zHW4t%$#m zIY#E^JH7?ja2>o}ae1pMV{nV9G7l*&5`rqqAfr4pJ2sZk5QK#B#8|k1yYhxMn#{Ov zxnrJ(W)Ce8dXL;e2+$G?(gkEPFY$tvqcofmEp@Fq&A3kU){Qbx;`fiot8Q%?UGuUU z?8O&i)HmW^n<3j*8e2dTs(6+4B^|{|9sgQVJWhNp`2D~Bz+8R^AH^(%j0Vjtxc|05 zl=zqf5ZJrhhf%+`r3#_;AkSb=BViz@vn$EmMd zFSL*C?-!V}j79i*Q`hBAE|M6W-m|x!)teVbNhYTPsn;D=E*<$nUB3q&8U*%ELgJc( zhy{*h%xsv%q+8J0FqjhaWt?WjG3j1S_9anG^34J#sFs>TuV`h($cLZ`$e%%L!?A`k zF?{omYelcc)yiY;D`3*#H~QoZq|-0n>$2>8pHtq`k>|i?_4FP6>po0(iQGC^FW&(s zEIa@GtYS`M%g|KEu}II{#v>xeOq8=>tNZdfH_|PthgOhuEHrD9VDEeJhILIHu95u9 zxI7|uw8UK$Mezfjep&pj(R;#o3XAIaD%<)FZd2N@$`lNa4CZX6Z7-Mix8J{z@g5|# z)HO660Q%HaOQ{xa#%-2AEDV)AG*re9qYJ?8)*)vg%Zk=7 zlMxu_iZ&pag{-p|t@>P1Rhrk%DmW}c4%gO>-e>R?m1L*>3bu~b4^6fB0G7wJ z0q1~PU(=MvqGVg62ydu^O^JErw zbJzT?1(;5&0Jy3RPRSak3r9^V1Aw1+iX-y!(5NUnv4o2 z_wz0n^>_wx6&@oVmm?=xmfU;xpo#V@olSQ5G_a^ehSv&t?J0yV^dro1Qc0CuEP**2 z@z>M~gjuA`ViY1h=(pIiGU}+vx>6=6VQWh=;>8~cSo3UnzcTrWGG&8Bkx$Q7WRsE^Y|Gfze zC@yN5)UZ&Tz#tq^O(W1zZf`2CYa(loQ(y6&(Ry@skJ*4uC>gbc9(N+y3^!o_EZ|zv zX? zKiYEV50ts=!-W$%;3^nXRS{LK+lJ%Xh*CF2CH8fH4z}RyVIz4scp_PkDu$|>;!+M= z4-mp3iA>J2S!I@tqzOvJbiSVX*}bQfY1~_K`6Rw+tsya8&7#IOmXT7n1SiO0qyvuW z7*%Pta0$+WGPP$hs;rTYG*kZ95&10$#6C!RzA^AGT zBd_$0a|i+xZva9nh|6;N7&Je|PiKhH=3Q$3J}A#J;wa9N!#-l~??VNGS>iIRdLBTk zdMN!d6#>kDm%?jUMaZZf8dvy5AaJ;U75<)tQnHumF=N2Pv$sDYk`u;05kuIOG>gbz z%lM}b)+3cUn=$rt(5M9pAc|bXmLZoh0VRc>7^5;!C}uF2 zSqM8JJtH_PpKaV)X4zs!>m*S$rx0Ix+#J3b<95}cKmI!$G2>Or&Bne z$#5P+XnKHEf8lqOx3PDEs*i#00R-@v=V0xN$+;-WMZnfL1W`#@vf;61QcD>`RP?u@ zdIR0rjHh!G+rMA|mwOU3zN&x%eZ#;Nz6goVS|v5{G#H7n!oh&or)#R4wyfZ+c=wgU zQ;EosX3TO7-MyBWgn`S4;>>yY#IB?n;Y|VGEAt+Y8<5-je6&>{z^+u>2}|m7(5~-k<2KJNp__rM!@GFQktT?JHpuO zGr-f`RZCODosl~m)G~^76ez$(OrKz(NGXCt!`)yoJf}L@azRtw>ns>&xfx0KP-B{) ziHy1@-WY6MS`IIr`7V{&kilF@Mh0`Xbe_}oUf`)juc?kc2m9gNcVU-PcgygJ-y=jvA2IqYzYlf>|0u}-+puDE4&qG8?auqOl;Q}g% zJHUYH?~8+D44a5ZVO5jtnZbXx2^L$IoAAp#I+S}vGl@qlzqzS*agt+9>PP<%15Ckd zs&u2W=wbhBME!<jDvPam|aXnO*-|AVXk=g;HxyF-k%s1khL`SjjY1$;9aHHAh|3RmBxQ7Qx1iKZ}NF7gp|oIL3KiClU6C~%QO zyAz7w+YE@;vJw{jO&G&Xk6;Q3hOxMm%+RwIUzm`{fl8u}I*%i!#5{(Tdu&(chd==X z+m}ogVNr+@vnRg08_%F1m?~0oE}ab)wz~}emHx8m(VzDEJ{;gZ`8+}`F$5ku z2+pmJPn}~0blTKf!zfpX+YtLFS-lE{$E1Vb0D#MYZ44)Ey77uqlVO_IJ6q|9 zv{+>5cqiS`!ahgrzzrL15H`k47(X-^6I9jfAUdDwS29Ygj0GoY#)N;5R)rh3dH`ek zgmiH|99D2)C6Gk+Er?K8cr7mxh^`^RP#j2aD`Xw-rkOjv~wl3p^R~3=*BLoDs z$H64INhr#w+u8k3QC{EPc2~_s^2vzh&~~{<*pRWqdiPE;Sf;4zqbqd0FYvdb!I;1} zqQhrFfH4=2AXTFgHnGJ1>eG72TLB`GUT0YzQ@tVS(o>O;FhYWJG>}b%10BaFCj8x& z=xn}mDQNQ6<(m;mG*=x@S;dNz_ut09mSE8SIk2+OjG7osH&J3wP0NwG_?4Q|Cp4J< zl@11eVt!VdKIRK5pNHDq zjx(1VsCkWU;hW=zlZdpVOowAx7k@BUwq!d!jVh()C>^F_#`7v%FIeM<;A_t=&qg9- zFC#-A@@{?`&5ioXn2?s$@05eIl8f)?_7u+;h9ni3j(1+ZexkQ`U!+}8lvYsR>BOEi z67Kt%e7p~_{>9FG{D?gi?vhW*0LJErKPUlAmtL|=OPvwNB$pr|b8ZS-U0~5YhpC$K z9(5hu5q>3G;hHpdw3&&fRQWGtEd|rV{SafGYXiRX2h`3@7O6Lp3h|wQKu1^k&tq^z zLjL^`q1>=Ak>VoCA>nxvz=Ouhrs>fFXG?7-(3a7ts1@eH`&l39d8Ck(T zzBo}dZ*DWHDR|KHnXvI~Z$G8`B+V{o)w0o#l)X|+;IoccnN12C#{fqe)T{LNVmPZDZR*DytzgW+i;)BlpKkSUPV4<822nSbHVt1Z zn_F5Nfu}Yi8QHr~`Zb>hxk$FTx9;ULM0)eRnd(vqaZf9k-FycD?I?Q3dcw>b?&& ze)qXsW=_;ar!sHb{>S}s?r%@j-R^2k7COJ*SM}@eU$i@#aj&IGjeQ@&cXo^k`8+4z zOOCQ+rHe7EssrlQF>L%`SZa7t3#vB6M`}_XpL5Yit z5Z#(gw8o>)(cYITlZ5Ms4??{|lwdNM1XX7ZrRJ=_u##vAoFrVG4$;|mAaFr<(|Zqh zY3b|PoC1!|vw+*h#?f9qcia)iTw9#q4Gz)AmWur5JNFcbQoA~gvDrM~Sv+A4ow~5^ z=$`5XWtZi~W~Cv(0TXgcakk7nZMJo|U376>eWuSKn}dh z^n<*2}G>tCTq?pt84p*O;rKa`<@K*xp1w<-``{dqD zn^;2r?c*8HW{EMBrq~4n1Hd4OVN82z#pf2pk5t#MMXkWl>ac&$C3N)l?YZrbB*C2} zz=*EP7;PO4`mh>x7TC+vcT5avDI%QElb$2mLn69<4vTxf`QWZ!glcGao=Dd709qtE za7|JNAr{Y=dU&O6aBdp8c_rb=fYI*YXA&{-c=t&V3G64Wh5hVwv2d|KH%V?4T z#8oco?M-xle5Eeqq^=~?r4W$hGC{mLS$am%;{ zERw9WPFi#7AEm@FEU9TbXCL6y8&ar*cP}LTJktu zAh*>!otCBp^2+iD>_+c(3OruKmCiC`+N~QD>CO@In|m)Fjr78aEdEBRpGW96O}!Iy zS>I1yIVK*09^&Kw<;cQ7?Gug!Khe~$=;$s$S>9!xX(Ew5g(LWp`0+vbDip?=v^^Th zl)I-Zcg(-ZadnN_dGF#{WmnsTT8l2+&#+ikaYMaVATs&z=;14HV8KONtr?9pk+00F zt7UpT>cX*<+FF}ax5}iUuRK8s|3K*E=|0|TFnZiTR?eTo860D}s}^@Zfl#H_-sf38 zkr+^#p&}QDlbwC_bc{2l0F)ig(1hFsLRe-l-r&mMX`fZNwRSBQ;y`2bb&Z!&P{PZ{ zt*i;|vy8Z@BbkZ(DJn0$m>gmQfSC%c{m zKd>Py?n(68bL!f09SR^b0}3+IWT%eS`6Hg}(XXu_W$0x_?d&rDS}U;AbK#+Tcj-G` zHXL#V&7sm5UnQ8@aPOX&^_7qq* zx&=NHhBoyD^VlERi~sKg(!w+zya zQ8Bs0Z)$!W%jd~2qq>QFrZeWtV9mz-31lTOHvy$`_V1u#Ui`m!ddKic+HhO=9mJY= zVs>Y{??zWx~iUAYu#&sgug_Q>uuq?t88VG z!SXbYbQv45h$VSu+&ZnS^FPxpZPcac>y8hJhGg)3WOx$!3{gI!JnaY6^?&m<5M{vEzH^{R30tI8TrHDa4stp1EWJHmOU0%tQAy26xKWib;FIeNJC zkVvrxJ2rv=BH9Xv=-?Rs?PnO@NeF_@hs%1Tt+_q$Yc%1VW#W?Gm~r?hc}qjp+~z$} z(^mD#Ci&kgX%y|T)zIGXA^yjy?{;qyQ|A!7dbViW`YOo&Y5 zeoPFqD%_^d{(({E_FnBu{kvJLnm(go^$yFz_! z>HKsWxV^wDPyE$Z^G2fyoUE|xx!WTVd*s`>jk&n)J{Nj;;oKdOb4Q>jzF5y`XEAyl z4>xN78a4y1kR+a758&O~ew9RgxjlwEd^bdv{l?YHIb3g`vj8c3o;R>FyzX4R987zB z-7)IyEi)moWMNk1F~7&a^YLFF6-BUR0p6v<`#Gp(o*XfqT$YIW`9_G47thW$#WBf~ z2k^X|(S6MqK%-axLeo9MwJI;l&A)RuY|7K-ov!=lku7_XZLhO1CM~?QoZAekMwxkvER#~NKqip7y3Ulg0@dh*k0gpx98U;(oQ?-#_2s#w<1q{3C)g{GEO|5a3&bD7NZ z>DPsqL-ObKGz%doljVd;&$I~WewEOjse65PP2xJ~_`n=e zM0#n;gHOmZBsK7BG1b~N^yTpTVF;ouf{gcHBB0s?4cIv97BXScgOn=Uy$sK9$wZE? z7myoBb7a$hRgb5gUUQF_U;q8w9CLL}=*PsGj@d}4AK{HmW20dDoE!7s?sp+4+jXTRf!9U3rGUB+JcF|nvahXI+1W_qZ6ne*Vljbre zJbXL{w}&{8(Z<}>8`JYK)2)5qwpR!{K=WQv=E8TN^KHHApQjecc7r$?X|zsg=Q%L{ z1z!0P`hJC3ezKOwBeeBXitlxg@bl^vB7iSx(ok6Z4G{@cXq{y95xKZg61D=w?I7lD z>wUZ6zd1#bdGaO1o5_aKa$dBPk4TsL0x{INW~sg9MdtgUxaPf-Ty#=_wxsNxcg-;E}II`$CtgchgLrK5K_&3bFcj_Av34 zwa+81ZHJy?zyOz}+{F@X{i`#`9zaUIqSyv*Jdg$)lN#G={4+4} z3wo{DpBMgj<)6Dfv5c_Q>|NhU^f6FHQ0+0$RwJ^Ts=C$JM4p1GiVqwQiaaAv({8&V zb^`;&PJ03oIgW`J1IWs0XwI#Dxd3*;U3f1h69=EJ?toUFCH~#xTr&a!2b$31E`6w{IRkzj^ zWdCoKL`Rn-Oja!mQu-}zU&Q~dSSS2lhMbtX11Se$AZm0Iyo3aZ9O=oCj&|Rd{z+L{ zkF2W13v7Q$dUw|gl4BF1xxjelV6ve?vo|3K%Go>VcXqx9}Y z1{6w4QiLapAJ3ky!aDtKQurCIEZv!1^-Jsev+fZr3({0gIz&O?#=rP`S;K(S3`&qx zQg*(-i(FPqALn1!HSw?-XwfJv>|=Wz8&c!-(%o9R-tf92P6UZ#s*{!#lyK-D5cTw+ zqI7-R@P1tLoXB9M`w29-F_@sCOEHEYH{bcbYDIvBk%f{JwT~C-1?2M{eA@{>#OCJF zww(JKQ9(ciGbJzAAB&W#qkVH%hX>y&yKwxMWGWvIbc~d&3emetTm2$d`Vd86iRW2~ z20)*gPkG+gq}R#1xpU}$rlnM*P{WFf4%t;Nhyazp&~NDC-KZQeosKRdc0MIwdxWr5 zAd7^>QxYSorB&l|BAiozjgYK0@^_CM_TtYa06#O@7 zl&U&0ZWqotEZ`=w-FMMcIdVmC@+F6-^T_EoXwVqYz9cH)DqzVw1^A^8~i!(KgIQ@5b>pjgo@!)DTQ^4#;Ff@{S?jWb?B3yQdeQHqNYHU-Fsy7j zd8FA4a!wJv(xYB_&+prMJNYkQs~~U1fKf=3YJw0v94HHxeRg_@teXc&BpRIJB^^{r zSmc$`lrk65EEZe%S%U+S0!kHx%lvE08SlpPR1*La3RVRLwV-QqFcqMr5;e=I1QP(Z z%(K7>SWZ)*hA&}cRqN+$NsLp5&pj0{kyF@S;Q~{K?)dLp2xl!*DZgD&mSQN^lD2>N zZ{!7>*;_e-NJ?rm_?h=)CDcoAX*vx2|D< z%w89-Q>1%_isN+q1c3Ab)Jj*5;^yJiA@|u>2yIBRbbRRY2~w|A$fRr&s?RrB$L06B zv0U~{jDC-Pr>lhY!_NJ>L({jH`Pk70;`^qnZ$}#N>Vg<_p$u7$8sOr4d=h?ArpBe3 zy~l*h=U#n`Sm1CO!bXQr?_sv??VG5f2@x88^O~i5%06xbK9j5=xJ(ss5<|D`ocQfO z%hi0slp}t%=Ba@g1WA)Jf7eB5V5iupKzhYG-1Io-Er%&6i>bBrdcTz}p>Fb9oz%a_ z*o&uxiu5naDrow8A=(gMGnm0gj^n4ZP{7L`=I?-_x0_jPuV;%9^F2@yOucthFRl@) z(KOWTA$@i-8V+w)L$o|yR0}RkyH9`FCeLA0C*S_O`a~O|J(u-~&$mgo>pM7g*4tn|(@0(~-+r>2{>#+@XvD^qc z$=cyia_u-;9cJE1_Xgb}UM`c-6ZuVVaZhlav!h78Oo}R(XobYort#(8mq!m07`~@q z(HG#O07YyCPMgoj>LmYuf0<&38~1?+o!mCEKDw0xD1+qaVOfNkd_PXS=HZyBbwQnky58Ww>*#2z|+# zMNnU}hDCYL*M04cTSfk{inHw0zq20dUexWeNnFJ|X3*!syKj|rP4A$t{+hB4cb+rs zqCHiucOAk&rz}qgV-%4*w!yQzSViy77vyZ0U41@t!{)^cG3b#{L8u+Z6JppViHO zdFS3mCD9j2>TtA$nlQZjWB#RleTl%4p`CpM`Oq;7 zh*Y~m(w;TyXT24QmTKBpmXOzG;-P;V#JbJcb?nZeD}ndHVcI7e%u*ytW3-)Pdp>Qp zcx;BuHksR5F}oLR#yfa9XQQF(f%_C=;H z#I(Y`4AU5wNs?v_kp}@WQGlHiNIH-@p|w!_OgNqtqQ9@dpp(r)Q29@O z^(S`-4bi<&k1ibUmWmjF^)jKSmQTTgZ|eF-6@iWA#h|Z+e)DIuC)w4eegMf@ zDhh1?#`ND&RX*dq>S#fyTg?JuUkoav7E;|dLnrs~veA@L@;+XUt!tr>v|L7Inbe8U zq+ig+{RzN?n{}ZoV7cxdNa5$)s<#W5^H5Dhv#$yceQ#4mO#Kd@9=y<*j8II7Blw$Q2G5nj0o{G}01p_B( zAlo(23>yx7TFfO}N8GDC7HUt?e})2S!qhf|$H1{ay$0!9A(;=^^dy%C45#4hU4VHRm}uUlIZ&PROLN;iG+hTw?YAdLT2hH$8(IfEou&VnXwQT~Vyp!gr>9CmJXI+L;rryf1A()R ze)~sfKlM&ry%u%h|No?2o>IV~L$`5#yob|?<2z9sI>j2*W%1^Ta_MQfyLr#uGwOiQ zlOvEsqar0DuapjNj5O0`keSqHE|P|Uq1;C^94DCX-g$d}Aa#rzU@j~#8LmL$6s4^7 zH%kEn4G3GHKt+AcgWk(pQX08!Cz(GsSl_vx;CB zLH%OXKW^34dP|aTJo4OQp#E~^1jN5iMEU!bxwO=Pqe9Ja_TsY6w<@^FOX|U6;&c=z z{}iB#O|pC}me4qu-%c3P9Ud^DIbfa1icMP>Mrb9^{Y3347Q(Y^X{VH~AXR^;eC9OFK>sWJh25o0mZ5&RBzb5*a`YK+3vu*|_(d@bb;qUDPU!Njhu?Itie|`W zC||YB`#EXTi)#yr3vxkjcC`)3<~5rhf>BbFZrafFlVm-DO>-jQ zUIQe7$8c10dqs&)kZm_pQddFuW0wJ^?G3Z@?JiPwLK4jtrKzs}qNCfM!Sv+@ls?TF ze~8m7FGCHrI_UkD7Obev(&{lj>+3VY@HxmmAr-QOv~0ea-!xCGM0U5&J+a`pu~8sg z@+W1}Ct#$!K-{pjLB!2B;_B$XCTH=w^;}im9Z_31@0Up3_lE>k!+7KOzc!td*An83 z(dr%1&CUPz&Ygyx+V}21PzS`XP*h?bHomRo2c9pon1~K&q%HZxZ%mUR%M#kpSk{`- zT}wy3#n@!~(+C&~RqFB|#(y4WKixPDjjEB^rw4j!vU*1}vWrPkv~~Q`XZ%@>ZyX5? z0Nc!*dvc6F8r38S8viO~ru@ySA{Jw2>&jVzQk^H2v4ga1HgjEbP z(_T8X2se5PHH{&eFT~43ZiZ{sM9Fj14zN5na+d(5{dg$3^zHhX{z63XgkR5MVW#UE zH^ftv5+zqZtO--RLC9;H>_cWWn7J#w*yi<^w&~^5udZaceQxL5I{Kau-Qd-AHt(O_ z9om!+CG=&Io=E?3)vDq3Y+e?&2el&&6iJ3 zgY|M6F5T)C9-^X3w1MI-#X-xzdob?kb9(}stq<~b>Ur2;C$c&{OH~w=_3hspM`9?l z{VBp()vE0e^H^iS)@Vg=sOx50LQB%_)_oWMna^Ru za%3X2k>rd2My|9X}Eut#pihx!cvY*x51-ZDYAiKP1EM#?ygY57!J4BlAp}A{i+;1Vp1?1 zE^ojXj||fx#qV@wn0fNJWElnmJbMW``3xA9+vgL`ZY=L_?@~N^d}Sc2ZRyy%_4OE8 zw(D_PBeA{s;fAc4^^#9t0bI<=8*!L94mZk*AN_HL+@YGk565$^DLokkmnV>as%Uw> z_8|j9GSv`%67!3sgOZ(4fq5d1OYNTToHB;f3h(R{s;p_U+2aLJ74l7IoHr>SwZ(W#-H9NX8s1T9&4 z0EJ5aXF&2Kbekr3hVB?Ollu}=aUSPyQ=!UW>gk7XYXKoVD=uzJ2}h@j2LXy%ohjGL z5l}3H`gUG_O_To3r|chvt1?WwHBH4nMw?#{-MBSP=xZAY9||wpKHc^En-&?;Bc;<= zkyu5^^$FK8nNDsxG_t0!67HN4LLzF{d$eGBr@oK(z~Qg%`$X~@%tVEz3rk4u|G#ZL ztW&Pp-1qnBoQ86_01yYgI##$b{d6);uYM21hO$@3)RMd2?Y4JJw(EDIIgDXOeK*wW z%?bFA!#^M+WKDwj>_)ne0|CDu;bltl!d#!vHG{s-C8N1e0ve2&#)un*JlZF3bxu!D zT?d^Xa|b>`y=63c@*}IJfX-&xXWQSLy-K_Cn$;I!mlwOv>DiXMSB%tzS{IWH41W|JF&0Z;7;$l%?#x^rqQt8zazH)K1x?b8}>SV`zXQyS5ssJZ+GfA>^SR^Tp@>xs* z*msr1XH`N^RCG|M5DALb3+?TsKkc%6zooy>#z8HJVsULwfk6%ohE~0ob4(0 zTw!6V=#`a+f!M!fkR809566;Ln@J$dF5+Q_8FD}jdMyW~Bxbbo;5lKPwaarcO3iwq zW7VkqD!CIzsag%LY}kMy*nBwe>7>Q#Z7XnpcNl7%wMcHWT;v^C?H~KnjheZu{eU9W z$V4^5o|ks*g2^=GxT7qJp*-Y5FuF+!i|eEnkgLyFf!`*Ye(~`|lbfIsm&*q?4rx$& zvWWH&Gw1;Q0@9j(^2Iz@U;IYLLgPC2H~kHl{2Y4hFxF};SRMP+k2!77#i7BR#3gbY z3lUe!hY4BRyoD3N<7{Gb7iy4hrq_Qj0_Ps*sB5a7lRaFVU+rBJHt4}@+ScN!J;{jL z7Hv(7v44z^mt4-Ae9yR}d$Z^<(jm)6pvUG^PK`CD^ip`E zNaog;we3y)E=SYU$>P^YG_H`?#VPU}z3kMrZ!li%MD0)DU*Vm_!)a1vj2@0@VGq>y z9N}g&l9L`v!bSv(WsE-*Yl4#QH_qm?So5P4H;r9wV_s2S@IC2Yq^r3ISX_gR?8jJj zVvVRJ-NNq9@>0$FHE=_?<2|athU(F?m^*IvOw%HRdG4DE2 z(M^358&b~}SU;`X^s?J`A^Tq31*i4L?rbhiUij6$ z&_kSZ^=T1iNR2c1g@bZf1)V%YT=FM;fbv6}W&LcY?#ah3qhsp1Sjob5cqZzSMruFX zDWcBl?_0#i#n}(WeC_x#@RsS+DH22kH4@0x{lpi^B_tIVicd%y&R@?sNK{VHSG`{z z8#&EVSfjoQRi%iD?Eq+I{s+xRI0}&vxiOl{^{A|ADYoHtR}{a!W@p z??9+gJ9)c6kE)gRq16eOzH;gN#U|H!V%~`1jhQ4`RTxdkObNOhGJ))%_oleiY&Rm~ zJ1^He86gkHPC>rC*6Gdw^?^f}&X`+sR;@-KWPSG@+(ia=D04RINTd#8jJ)r74=QUt0!S~ZQv64%e2n8SM% zPXp*9_Lg~KGuESqZ!1}%dYEb$7#Q_j;qB-Mwyq&IO#+?UGj@PV)?o+q*(s)yAGJB9 zG$2ViIFxndVr~jxtC6}f?Ey}ihM$vDDG!qS-k45{E)vuJB;=^xz7?SH~ z0wN-Dq6OD!&I)AI`1twJ=yyj%;80?~c|*;&h~Pr+Q!Qnyg6guiJlg$=vCN=cJYCmC z?_0K29>n>YNsjRNe`aQIiCnj-j;5UKzFgj4=TCwKa?Y%gGh#!X4?MNyF)sFt_5$ zNWwUcJjt)p(#-gayNtYqWJ&Ja!0#G}B^E;HhdL(&V%q)KYMcN|V_=jm!xb!uWy2 zq)}7lY(1t*iC^LNDsJN!W5BS3h{NgSeI{pdRY3%X9CfK+964HZr5eD$c+NZxx}od9 zY;fyP(%;rW4ZYsE`VworlWGlB2U0`m{;Z`j%#d-f(U#!C!EF zDt?ajJ^F}TU%^7Edr+;xK@}us^;3|;z%&Ozs0ysqDbj|e^<9cNsvuUn(Af;CVRbNP zZNd?nQ^^m>xUXUVl^{52Z@~$K2G=6qXvsHorUIzwGR-n~m#9w*h+%m^O7aQtB|eCm z(P*WZ>Rg75T6R8h%$jN`6Id{VG0&@???i4hjhxl+duM6$?NS4y{$*``tD6Tohv+Kk zH@8w03>!?@1AR#$uAiu{M4zS~2i;giwwkPWu;Hk-C!Jk*@Cj<)XS40@+|ps*!-=DdGw2S?BOht}1VL2}wQLC<==nO+mQZIQ^i^$$ zhSqv)noom|$LBEkcg`No5{++_8p>bS>q{=rs_*M!I6P`cdwX$h9J@76>q04>k}=Bx zA_?&o%;9Z$4xf>$8?sPQxBeWv?#ULN!6m~> zY~=!n$HvtFpE!}){+@B``+<}#tFR%Aw}U+{Ty}22c3HQmb!|^cnYbxZD*nyM;+;>E z{}m2~a&?Wreo0j>O>@Yhr$`mbpq|NUJP+QO&2FW&K8-}PqdU>sA#=VMO1n`udYS*I z2G1;|40G|&Fxb|#FD`7~Wy$v{H0C3@b3eah^qVd4ED7=6a%A!_62{Qv6s`&k*O53) zH=#aJnF_W%kRhn~BsmxJ9Tg*TeSVG;GGiD%)C^RR$6L=ay}FS72{cQJnRfqkzIW;V zG)wm}D%(1@hMT0ntx3l-y`NPhR`H%Wr`qM;Nmn2^b$5&plhFDmnR@+4QvzwQW_50KP!oW>Wq9ZoDXxJB`n{F|po7ls zS>L}6xK$X(yb+SRw7E$C7BzI;u_;>~EOZ3RG&1FeW7=PA@MOYs7@FgMLx3WznLijd z(8gQWXi?0W3@(-< z1v6Eq3f-@m*2SPv+MsoKi6P`kcI^|BaOLrTUatwVAJ>i(!Hx%@Rin0O9H;Ati)~Jd zi&3~6R)dZ!rbxb47?h`0-zUv1d>0Non#ldo`w!%NkpW}WoDfZXs}Lu|pjY7D@uH)o zyb0>E!4Oxt+A!`6qPZusCL4il$D@e8Uwg>xbLhJ#(N*@x!Iuz%h{~+l;bfD;)V775i8|C^AA5{42#_Ar*vuv-S&CjCOvq!;|o;-QSYfCJO&*`yrHdcMT z`P1*7p7kz`<}hr6s)N(cS6^F@Ldq*yL~EoS!dbyoEC;74xANUB^1b{V1$C=M=yR~v z=!&!QKH{dX=}P9`>q3W(#L;Y7M#;0jM8gfl;QW9`FNX%t=c2POS-RIF+9BZy!6U94 z9u`OFNY$JmowVaRYxaT#u>c})#tqjY%&`dyamoG)b3a7mt4(HaX_!+PQ^{#9aExbj z#}sCk1Kk=Zrkjd0_$qf%Q!u0d?!`onP6Y-}VJcB8vl?)+;UGd~t^0QsUR>7R7D#j| zh!;iM@p@?vzM)6vf*^~3#WX=PNUXg*7%z*CZ<~Bri~sqVaQ)oIl5rC+TQsD+GIQrj zqMz=?vhWR2XD;xmqllms9{zHx)m!Xi)nPe{Q>-MDMNSL-YauSof$OQA;6*>vXS`<` zDNqOPfg9q$Q??(u(vKT|=?G+FUGH(ybnB)UU@17qc>)Jl!qsa;tt?=D>_9)B`!iz+ z50gZc`swm_HLb3pAf`2oL5wZ;;iVJvA?E-<&GxP2_^9-cL>MHve<%Kt7EFb{hl{n# zr1}k|0hL-WKs}?w}HVFA>XI(1P=6w(I8p2L6JyTESOTW#KR2f4E8?wf1vm#w4m8ha3l3z z^LZ#p6152Aro@CRg*MxbY!$tNMFh>&S{a-lFwe;391>tTysg|_OVu1oE&w##d;|Y8 zs{4uqmT5P1)H_y7+!ALwnuF3rR--barltv^__G#WR!Uw7!tjbgX812Rml{d)FE!?H zsh&CP1YJMg;7&Ek(F^^cSoj9k5xC11mIu%4nvUSp^~trHVB0x_`zpH3K2wqd|2|Q_2F`0iw7}JX`gBud~pc_-43${l4 zkr>DtUycQ*T$85;$w1$0!=AUI%b*ai} zIjqhqGNQ*Rds!0ODQ0QG`mieY0*0w1fgI=Unc;jT-5Xu;_bk;nLX+GMLh(Q&GU>hM6;VCHJmNK2Ndvue=wo z&R>5)dzh&%mJ5a)+l>L4N!1Zkh2a#X^pkX*-OOQo`z)KxF3zVmE~c#uS$F@I!ozOP zR|5&?RF%v{QmG5Be#iv0oT7OQ45SMl9qAyo&gK%fu$}_$8w8#+M}~h(;OMJy=!-l0 zCO=GItv8052A9Xjn#}e#VpS_Csf(m;P$WK8!r?f--fq|`g^e8SJ*Svs*khX0R#0fX z6TWAn6Y~EXwBIbEK)6#EvFR{iB7)%0n9>GVyO_PFfN7zSwHJEwnToSnqA0b$7o_FT z_o4wVvxXqB^bOAS+|G++juLhx{c4uT;Ptho^GQ~et)-fWh;n(D%>)kLI~_wMI-j~Y zJQOTmMa{r+Df*>Ebi2R*r2<=rI|^qM^!5jRW;Q2{T+v zz_HM{W3eVzWo14B!stC5mvv9IXs^0r65OeJpBF~Fv2#LMt|ev*Efq`VzY*#Yc;o*_ zg_HsIEju2mwZ?pY01i!k;EL55VPmEmk(CsX8$;d?SD}goKR||i%WZX=R)Rx!{Rf@RzZBYH083x16gU8`Oer^m ztD=H}`;|n$w+n0Kx2K*tZrVPnh4q?8gbc3%hSb7!!o4t~LwA!?sSU!m2ggM4Rq3LZ z1vnV!r&6I7N$+;sZ_2}s$CU76z2o!jg3!m}2Q)Bk5%qzJQj#ukF>ih2;OQwgKA(_V zG>9MsqXPHcS*GbK)+Mw0e3nt=XtO0wfl67S&vGH2`qWsGRwG3weag|C`zz(_(+abA zik>Uu>1%yUEB^WYl5OkLAM3T=OejQXt_f#BmGV?k>iA;v2h8!R)ettu^q&phE9*kh zC@7p8H*o*mYG#Q6q>7oty%a;yDEDJd(a8{5*SqU!Pdb9EkB`zG2-+w=8>aruGT~!a z;V9U$U-1%5qWu&ss@3*dB(;MAD-5+2&@hiMlgO|PjDixCA#Mhc?T®cpL zyu7?2gHl%GOcPp1iB);=0HX8q|9()U9UDS2ZI;+<-PL!sG~r}iL6(w5=~ymIOb1T= z67q=JoDs5Gr(vVixC4L!4pm7pIn}s8lNYXYx=Rj=PIBKYKz<9P{57tCmO@IU4FC8= zEs9-dJlCWz#dppxFW3C*x_u2ENka>%8U@C1%D$X^3EGH@_-vKDBdyltfHOY+Y8P`~ z6KEKZY6?Tr^V9b_^8i#%Gp9NkwZzk>i}~ZTdCCdB*HW*k$pX52Z#yL35DBf_=KR*CxQuMiDhF0*BSiDgEglQLJw> z{YvoYX9JhY&z)yBi^ga}GgwyE(E((^j1{yHl2b2{jD2B+B8Sryu!y$OkZ_(c!a)E| zDJ|7)id8$Y24=Z}gt~+iKpwt;5!L1IJ{z1Ri$9fEzoN+^7im=S0twwFNipYAw9tA@ z4X_$<$Qql!SkbnRLqxjvpT5n$mXakXNp|Y$3Lj>$YP$uilDc>#!KB=$UasTAeLH5Q+QJEDQkMN|XGOh?Ce68K#Y+r5U0rF33}m^Fg`C zv%xHo7Wx6r)&A$l9_YSBk4wQT%wTzD85z+_bIOkGBsxdmoMVSyBW_cd?|+8QVu4Da z;!ugw?_y1GW{A_G^+-$xY4!Z8BM*y_5!*F=^SVFm{#R>r|a0m8*4YiWyyB!vs7z7!%9n9JdO zE&MJ+Z-0YUqyP(7f-EIvdY(4a5^_fnxRhdvObG|hMgLB+E`vgil<<=4>VQTSTT4s_ zkf5ew!;{ADt+u-dMl}BTx?W;v&LvRMQ!dR26n(Ls|JPyS1CGn4@ohrSC2@kq>`3*kotbdZ_^{z>(ibpr{#RyEye+aSsO~NEUaTIlNLMi^t^6QM|$ApH22 zg+!VsGEkjSi|eV?Xwp^u8zK6?S>r0;SEE+rm zS#&Nv?08-P-|ijS`(ZBy7p_wqtF+qAlgu=;g)hLW5{2;ch~gEFg-4% z!vT#XFIQ1HbRaYl!Zg|^@JkX@Ox)lYPAX;$C{2LX?k&aP@C(yNU?utQ8xqVSBP_v{ z^pEAH7|EYsdw<(&B7+TcHa#lLsA4%G18%}BCb#E=CTv3;WJ3yhN%caEV~~||uNNVU zy6OaZMzG-)tL49OTyV4d2Afl$xn2me&17#Il^(Q+3J8EBQu$6F z2cQ0IqGkpTBKb~%`?cm7ko-0xIlu=Qpng9n9*pYH+w9%&JHe z5AAx_b{X*R_e41gbS%}XX0QA3-`I*eF=z|wlImci;RGWlBX$NgFxtMiw_KMXY*h88 z!J&RKV;ad$Xun0zY8h%8d!7gH?h7-e1fE+aJJ5ka5`^y>fN(JNEt(Pb2ZGz?bF?;6N^nf&1KS{fleLD zE=K7}HpeB(q8(XZK+6hmm2jMkhm}Onei{JIT_zN*7HOwIP5crZbiR}_A8xDLD48%f zl$SCO6t0&`aDgeB?tRq4rBH+FBx{@Ms_t@{4hkwU*Bu3%H2TRmF?t~YCu0-y#-zgA zgjFtQgK$&W)6NZ3*c-v`qY@UWWV2tU_tI2ti`tqaLlK5!wu_sOeav&3HJv=P`P`;V zica-US7KVe%=|m2Vfr3ixjw*ui6XTcONfal#0~*|YDz)%3BtQyrm;Ks`b)-#7K*(A zBI?F+m0|gT>GlHB0nX|*BLMY6xsyTd{^{>&HYA96LW@jJ8>b(!RH<=vwrW|KOj?DA zD%7Wf@+AL-(bLqSsRGx9E{CvjX=PR46Ec_H z9K!X{B!~f99wkNFn3LLb(&XI^m(%tYGX{g`RNCU{3#er~=bGv9wc9K086HuYO= zvVAN$m0JpmH6bFpIub-aw#Ku1lf4%sAOiD7!}ChTNBv8ssluV^%1BJH$xQ6Knag6W zpozt&OEGWNyJ@Pf<|#TBq*2C~dZ%flqbH{;sPpeL=EpN4@B$FV`&_)OCMk~|jxH{0 z=N~v>rSUr@UY}Cd9Mp8h%x>Y1?*o5-i*ij0I`-i3iO`rb^epP_)7`)Z|NJap`%JPIb6ej=?qocx zfbUWh0y`c&`@7fV9S{8LA+hsHu#;0*@U=W;es7fiw}SZWb)6A&n0-C`<|wJSO`W`M z*0uZK&3DFg(A4gV6WC6)5!cBCOY#G@RI(aK8^4`&J1O=(BG8cRD$!u~mrA9<32bAJ zpggIRHuiKu!Oi#PZ%Z0csEJto^H1(U<*ccH3n|r&_|%|8Nea4frIHSQ8gg(V|3nlF zMEtQS!1v)bC#xI)W=&dymE8V&BPZ#dT1E1G2I6_ zB1|?r9-_-ej$dD>35=HdkZ_+y`dOe#X_?%kD_LI}S;X6FG-?|=Hg!LT9Mb3S*({_v zww&$p!s9(vL(dbOL5#~Ldl8t!UfmuRKXC&V=m!S+UQyi3PHOFN5pfCQiX&17-~)^6 z{@_aiwcx`T=XBpvZmHY~QA;L6`W(M2EWLbHmH~Q%sj?2o^jqtX;xbzg*9)v>@MJQ2 zH0s^lNLat$T3FGleP_N3O$eZ(qgY(3?ZLo}Y#;B5`xhtJf)>%I5!BVK96p5UcJhjy z1i4xhvH4){eR$8LZpo+|J^`i`E6VVFG39f280gHJz_F36N6VqDRMZEGD5ym~j$*+& zE1RY(83)y1?bEc0{$0G1kIu_tYio~L%+G``-g!kk7+`%C~rh*?f>Dv!kuBW-jrUooco1x!i z2g#Fe&(}mxvqi~o+xsA8^C?MUf`HRClQ*S@C`-BjkOevGT|CctSMnXRwzBBdD~mF; zk*E8ALVSn4Du+jq<=aLjI){7x#1_ls`dyXl05mHkq1rubH{0!arN9AFO*wk(f_TpK zxPhNhBJ%DbecZMzv^>&PF&M!Dy?UrWbE_rgXY1G!3bKmo!BbfJIcUg&?o|6;62kC) zPP~B@=(rJvITYW?RWw;2G%v0-XEt$ffITNBy(V-_-N-tY$)Z1J>}WP3uSyH4W!CM8 zEgdvhJXU)Wi>CyjW!iTWMO2{`Ey_Puc(**Z#5kfu7|01X`C-%*4I(&~cQQt?lfB-I zMwn;$PtjflV1r;?e$b7!kC}c}56kfPy(_OWoMIc3oE}!>?^)-Mr28s-BB`vQ>D-TY zvSS7{7OZlfo~I>ZAiBr@t!df>sAcESO~XoBPY?KYPVa7_Q)&wcyLn9%137AKkYznYgT`EUDj+mmE)5Q?7j z;Vk&vtlKJ+Wi?5OS@a3$AqF?dUH~EtC>CI_n&#u{c0)eBf$BHuc5BqHU8*jjErhqn zZS4z4h(dMCOtAr@ng;<{tQw75v+Nr?4iwrosM{2fR$h!Gh;qq+hW3%r+6#SUcJe&} z8MtDac zWDZr*p`GlV{AKb{aL3*M{TtC>$Q;DKd@CNZ=+msP(Y^8~|G1_JopAz6ezg16_+5is zV2xMclfv$Qa?4kmGW9*Ff#b-cvSckAG1j#dm?$#j9RlQhUIQHcngT9U^>U6)o!jXh z1U}s?<_QznoYRJ%f!Z6E{5!oD+!?IMSDzCJ+Esezcz&A_VRVRDazj&Ok6Hzn+k<>` zstO61BW7MW=&i^#muPPEB}}xV;nq3|7??CpaOosjm`e(~;u8E$X2JAab|v^BtiOgM z;7?n`2aQduN8e#=Yg$`FPCYuWiI9CxC$&nYSQY6(VT$)xh7auaDVTz)E8Ty<=s3g-cO z4Ecd`?Q+WPPIKcU8{nXQO$=jQGR*3WJ)MdwP*==ifneVl(F%H^ojD7a&EkD$i_8_=GqJlC#wEAzkqj0a( zI2o4G`md)uZpc1++a9`#v2!qpH&Q(}uW+zZi2#gK9CJ`L6Uji-u`vDmA2f~!Q#75g zO4@T6C!_Dx;DVIwx|XNFy1GN2W*r9+JkLv1losFv42da`0<(HQR8q#L4H+@QfC!!qO5vuDy%SBGScdF!%fdRVb}Mf6!VIqcM%#1JCWkJQ$U zY&LFk%*5NkVeD6d7G67{Fv|K-i^3B!TQ_=q^H!)U8(LRT8jKU;?mS@N^$8DCj?#oA z%}A1z?IxpSMK+yt74~U64!xi*_v$j~KlivMMCm;Dn&R!&_}d>sxQ;pE~{)U21euTS0%-+u4Awu-ag(fZOMDpd_9W zRN|G|`L+S?bpubG$ziJ!tt%>H>dxz*w7ajGYaFChc&i1vM- zgGPYsx_FMnmkF+(GaaXVu`XWQXc}uUMS;;8<96}7k^^TzV?QNHbGB(tvRad-895ua z!y#iwGVdu|C7_gO)u6S1pL^keL6VUq32C;y{eMXE8oN49W*#P zYSL@nD!guw)mnqCDy)^*Xdk!H=dih=5k<_U!Lb@?DvU9dhOelGEugiftZr?^?<@`P zaECkG;SR51FvhS+4AZ4ynbxZ9AarOril)`96oi2!4m|cehesXB{iYyz^Igzds>-6R zAn16s_Z=R$ENuto-&jZ0jm>#caXl&d?4sf{kt|vPdwt1!fh6`Nfdih__-V#)ZJ2&f zzV{EqVyq>r1Qm+zz~`~&(m6CdP7KRTvq}xi43es3Ggo{vaWSqWY73g`yWIlr@b8N| z!s`xq_!Ur@swTUZFZq}8pZRS0oQ4qAwTaybYb|M&a-H1p+2T3BAAiK> z%Tu;hf3D5?^-Gg9|9-H>P~{cnrhr@%2nBIS+76MekX9=u zbQHy6jbB!@n-PO{gKj&b8Tu@?IoHz_quGl2VuKU7go6&g=TVj#$5HGy0zBWLG=`+q zOy+Br+8}&I5H{G`JK*g{@6u|u@cmz!JiL9e7aoC9gv#LBjI7*}7dfi}G7VZ(*!2y` zMcF>$@V&=$j396nQ3#GxpiROiOPM7Fo7|v{;7!77wb5)-fz~dL@@RSv zO;6(~LGilSP%GW!Uo}MYR&9nvNnYn({4`jsHR#*!6m1y4Rdr^=>WuI5ah zza$PjjNk8)J~*QLL5JN3AM&WWwKIG@v|deaB+qbXA=C38qsn@9T}Y(5Rs8(L3$t2% z)aaBn^dB*(Z?|Jt8fsI(&NHVB&zp;N0h{u;2lpjwUCj-GKj%=l>Ja`5n&4}SAD$9pY$z9e!Agk3W} zKjY)+jEm(hZ~7I&tA*Pb5cwNgW`s^M##6y&Q8Lb4!Z_w$!)L$g5PJfde_JNQ?K+{T zYQ?aM@%DE)II8C|r8BBz&U$>!)#p#R8qGKxO*kui^cw*uyFLNxYu9nTesO%Cpw}Xp zEb&zW$&^i!Fis7TkTkzdl4MITYgRWedHK<&Ty7ML`yR~)P1>$c{0p$J)`BSwRjz9l z=9{dPF;LZ3q?-D<5GY0L2wWvF;uYCsFt;R=c{PrvLRT4DmsI8ltcJc5wVkz+iolV$ zrG;Emr4?0Gf8LiC!xgrAMR@TeY@}b008p#^bzV{=C1qhSw+c1^^|s5bw)@jiq3{)W zMM83MPJVsGRWjhlxz9g7I^Z|&4ru!hp{Q{4A$s|o=iZd*)so_N0yNrSsvM&$jMu~K z?{M!>5qcJ@GfbW^eeo&d^HZKrmOQ->Tv^NHVGB2iaB_uEZMsK$^xkrD(a0(zUyoUg zuDCe8;`y^{R^yOm_W^jCmQtv=iPJcs8z1u9e?H>Ty%w?hQeA6}!DK1AEGeB3?KlKR z&@5tHmEn*gOhIKVmEkMT6Kidqmsx7GHg|;A9qw?4JKW(7Sd1~Owwlq@ut*d#aEKyB z%ae2}!BPq0P|;`v9CQQz>qD3S=|I(uS=7&aYb|MN$a0JEByOyTWkolGm*4uahCHu$ zIm-F{i-PB_$80C!9}hkLmlMTNB=N1qZgTRW=8u;JA8!S7@pEfmbpudZKqVA|J&&E^ zx`rijPLME;*RjT!ySGFSZm2m$+k*a zmTSiOjCrv{Sa5|y>_jY!T^ykpXEQ#TKjq{37hEPoQk~%|kG|in39|lsJZPTK^Sgx3 z->n`ol-Cw4iUp(W}Mb0%bdGmmF|A?Jt#7?*&Qn0Oq3u>#FHyi|i)S(N3 za0J4wJNJ#L$mcVbFP<`Fmopns=@R>W(weVs@OEEr>w7Lrzc6akc@4TvEO~ z<;BH{=QG152lJ}vWGhU*BwviUJR5O#y<)Cos<1~pR=81&*KE>khcsdb*Qq<13JrNF z(5B8!@uCKeMu*n7jFZ&`)iNO8sGpF>_N4{Rk2@drITWI{Ka`@v+Jx?I_i*c z;1P(BsMnys7vm>jY{@1mSZoT?SYZQ0v0bqpPgoQ#smf@WfMCbc2xHvNK2du}Ge~Ka zTg>Pg%V#mC&0ShqNY7bP&CdArTdDZ+x?0z;Rh5q8mlYvY>ODL*se8&zeC*U(+vW;t{}Lzg%Xaa)jg^vt+K_G<=G2{ zUcn35#yBPJ*XIL4t?GPp2du$n3FUf0vRSiA4Qb@ydQJQ|rWt#Lg=Lu(Wa|Q5>f1~{ zdn>`BR;+q0oOqu>x6S>&%Q9axNvCWJgDwR@FQ#=kVCUWr?M6T->qT$%U4vl_dOIau zpR-xccyhjBZ3F!NK4Iw5R3TBTMJoK) zE!ZXrlSGi<)9gibdjWy}E%Sn@pqh}cPPsY{`Qt|}%N?IXzhw2r8N=rbo{bFKA|`0J z*=dICHC_XD=+778#QZ%Q^{J(ngy}0$&)abjx-&XS+_xWQ+|vM1fDshtOA4jzF3elg&ueF6*=) zQW|Aa^lHuadd70LAHE2Q>N(!U@Q9PBy+_c_5f|6b;AV>6BuzU~w~L zwc3(YhVmy7Z1p%)>jgPCtap57TZQjxVrkIp73<-c`D{xfVocZ}3S44ULY9ziQnE@= zHX8WN4*hWXZ3n5#oGP1;rc17uF&o=pcdvurY7#pU0TC=? z^g?mHs4zuInHA_NBTq_}!yBs1V0#C6{RVrnqSJ~neu-PISdSDw1?ZBjH0UTm`7Oc{ zLRDc*PGuCOit!sREmxp)hE6vuS6i0z6-m&=Hrg0Bq#5=If{d=aBrVr0rz@_;8-k8Y z6oDw$WSa@|)tcd|U?B}!{ff;9ceukH?r?{{9oSbrGM2nFq$LO=5SH4dNgL9nV7@4L zGAj9K1v4o*-g9^`aOnDySW3DbNp~&qG?3SJ+fJb9#)7s2TWE?*^CGPnE-WTgG+c%9 z1=b3b6-cYGT2PdhZEne>!Kn<}%rZ|b>xMx4im2&u+*O?TAWO|Uu`E){d}YZBi&j4) z!t1sPD9;U7Hw8CqOW73!UByvDaz9e^JxgC1ro)0S1dOyL(Uz^YRQ8)@+jscu;g0aS z!yWz|pp9m$6JFR0gs8o5PvS#bZp2MCwmz1xBENV zUK3aT{obxxYbKK^FV4=nna`+{!fD2MvO-Bq6nnIr5xsT;M@g3JgfuTn@`^$yAOyx( z)@jZrE2$9pzE2PZc%Dm|7u?L&SYvUdAoQKOKokcowi!uY;b?>Jf~|5^tJSSXy+Wy9 z(%i@e*}xCG?C-TXISToFv*g)q#M$#Ftc+!S;4*9$WW!4)r{`QxDz5TAyTLK{D~oD0 zXx%?VZck`mp5YHyY@a{jY_maL9WXy?)3kG(%{dn@F8OS-Wu}g)25)h6^pL}Dg9C4i zc46`t9ElJhJ%#io!V%c2VmrFT{`n7_W{>HW`;<}6PWdy{A3q8TsT|zcC+J7Gms|8W zV==r$eEbpZ?G<}ds7BAZ`tudfE>_G}243;P%7b8lALktO+dS@DlJSV^Rl@TpTZ(MN zQO{>TD6oqmqo>a~zut1SbZ~neP9F4m=iMItfhH(d#DPOAG=%ONDRRb>m;C8ZPuO-W z3pFOYe#w*PbDoV2^IRfd@m;pWY%fX6kNND6FZh2xnlj9qnC1Z|_x5=2QIDq3wQ=<; z8H`lm$B3|rny?c%j}X!|}bd3!qvf%GKOS4c--s*+@M&G?f)^SmnP=m*ftIsCCERNBJj` zh+}q7x*XnX(`@*!^ZLHa;ldhCxx8dE|5t8azU2S-XvJ00#@~CF!A_sI4}6XSD9x9* zsZijBE>X)Tj9eV~J@=__K*qR!o88?uj~+DmC{57UH(b5^j6!Obr%guP3Ol=D^7JX! zHzk+F4lTXIy}SbFHVOIL>^{l_o6OlJTV}Z+4FkHJkbc)A@V_OC#gq6opOFHSyo+I{(z%)%yHYJ?aMoxj$afKWPnm0twzZ4K#^t}E>6$5 zk+2)=(h4QrKtnobb@2t)S5rP2x{OVq4_t-lHj(o7^7tzpL9MhlnCdoRPC7{Cpd5u# zfT$?5HQVtu^Whbf^@ds1L|KS=RfG_rLX_K}6^9&lEa`m9i_?^q*vGg>h}I4_k2DSd z03ZNKL_t)s@6%S`e`k}1#hQ|QIc9zK1(yrKXH@i$G_Knx3_L=|LwNzpaZpO4P$*X+ zl===5t%h(yWV}z@-g4Bvp;#?B8@}Z8t$~M!=;Hx<-H1-0@Jt59hUxHvtJ5!d`f|%f z5?FP*94xTS?WJ$hRu^Nt8&QlWAL&ZBZ zqJVZ-5f&b)ClS>KlUTN!9cG&WkrX&~O*)-&b2?-?%Gl-(9^a;@`#uOjA%uq)g|zmY zMClfJHD+CUjE^Hay$~5!P_9{y#*AKGGaYAabgg9n$%NNtOqTh~1_7gkh{$vCy^?e> zWq5kQXjHKlJBa2EQ54Yf5>hNiTda^M$E&mVq(#~iTW;CRXAIA77!8-KGwS>=3p!8G z>npa?YfjHYZmJ#|eIF48>@*a<7odC>FHpF?#c|v^N7Lq%>y+usr%Y!B$$=#(0@#mf zdJ4~RaeWuhb#SG^l@iyH_)$z44p2o!Y!+-+71!fFvKiyb20;v7zNDC4arOM1XD=qK zI&UEl-{#(~Pq({66s_3_3P#0}@%4-{=+Kre+O9xsZ`jPQ7*1xqn3PN+LlJ!?-t7)| zxWgUp@ZTU_nbBx%DJsLZ6s&WwQh>Hpl_AeHqhZFyFymj&H6O19rt9$DvBP`&4&6`? zNy%WK7;FU2c|o$VoDU?=x{5$a;t+IFu%2f8`HPbO>&jw#9`}Ug{l1`MUil?igf?t* z!!j|Xz9rC>!a!jJRtTh$xJuv)fzp~PEtxG!KEAB@)0JVCS%hoRxB2cre*IEgI%kRI z>^$X@kzuxL2@VARwC^woBn?Lr27hr?k_o=@=mlLKG>`b8_crgg-{L{@ z_^*99KO6Q{AyaGevLG*Vkdmm?L`aSFH0TQD)&a?Z@8P?OM&z?eb2eE;UTW4!j*x<) z(g-OCd>7+6c)p9lQdF9#y0q{H3_HB@zU< zVTtfp-Oy~N#IG2dFovWwce$L?DXdGOXT2c3vcCD^=S-S)E%M=6}R zLD1?DMO#9#qFCIpe*OtpUO+pDXbf^1^4EF|zE$M79$u$MI?E{w1jN$o`Wcrd9*_uVHxCu&3vSD<& zC9ML8cR9HC7H{9*;hm!f-DXL!jcIk-4E8&8mnqFOp;%q<^7AKDO+o6dFzYEdg=VW9 zDgo}Ra$Ftoh1Eq}^jqFAnaoI>4Q0MYh(o&mh9KKA&pzwss%+=?g;`%Xucb}l&q_cm*UOMCG@&(f>XT7;${KE1w z3t(nhpItD0amBQ3QnZ?QS%tP1;YBF-i1uL2@xh3<24kKtZzjlZ zHenn(bi@jgt+|{OOsfvT&V3FZyv3c3yR!-X*+k$w{|?$T&%=mZ-| zy`@@QF}qyy5{o~-;^HP_xDn(fP#JVuQYK|>wFDUDBfS>gUYFyO4yTJX;dqP0!>ATs z++lye#qmKzGn7AK!TX8;%&GD~uXm=X)$9g#w{)f)<_qLyq2gi=*`gd)qmSpv|IxBTf)l(AT+v4*vv-^Pc_wzVH+e&uV&ng zDz>_d-@DJj(S7a@+8j3=8VJ(6ce!5>wH2&z6mhe~{?UNx2)yx_Y@`?kC8~^B2O7J+ zVR3fFXtZLqYO(U$WZM43T%}*(2!ud7J}PJscm++jL=-EQlOfk8WGhR=cFcA(<7SqS z79|=3T2mDoQz@)aAYG&r5;r0SeV^;iigdUoi!?#EP1x%ag&u*b3nJfp*hYiSD7OWx z%{3Kv_!e zFvP3{TUC-=PnnMsR=I=M8PM1n&~5}Y{2V`Q;m2*7!Ge}pP%KA`E_`SR)@;cq3#Kd0 zDz6J+KVCilCL{<4Cv4#E?$cR~=uQ*z?1tq{lXI`2j4L9&Vl|vGoTaRi3T@nUO70aL*|wwH)F0x35z_!?euBv?b2z6w7e48h}bkjg52O1E0Vdx=rU(5 z3uLxpJ)AR~=FBz~l>rAsscUO(V=Pja6ziPzHs>aGaD7eE@Yn>9&Jxy{q)<_<5N?u=0r)O)tIOeTgK}=T+|HO?x?DMJWFnB;YtreM zo7I+OX~=$ZX4@U^aECkG;qMX_Ol8QEl6)q49w~xM5DC6Qc#VfS!)e83CfEip`vaE` z_8i{sN?M-4N7C-P>})hgW6f$+vY8irekHJpWEDZR%2`iyKDp66TL_{M`WlP?9EIly z8V>kYGo6<_y;ekB=vlCipcM+1!jdHwiz$5JNM=$~B?Z@`lIJVKD1*F!z|^_#`E8Ho zk6OdLZdkUMN|U7(!>OXM0$*!drJ`juGS^(Ekh_W~RJ3BrK`02FJAK#R67C4EJKW({ zfF~Vx!#)y8X-W{5PZ!U5wmM^7ZaLpxF)x<*(j(Iu%Y2QmT=v2NkJ}IUP46B4q4ypK z(e7XSQ2rv+;l~=SHH|pt;NXBp++e%eu$;NfZgSFOLta*O0Yn>|z@rfbbW4rn7Gy<* zHip$U!}nZz%@|`XRi!c3uu5|BqN3N1ag?MH2S_1E^V*_jxk=fiMIBThg6BF!VTkAX zw}jX)3?WcKlj!~j>{`KZbd$n9$RSuQgA=-by@!S8v`@egS#|Isb zn*lBJh~Px>yMGFZ!zNEouXr|GuuN~5T+FfBptL|n`*cqpa{Tx`9>4#H58v5gzwP7A z#T%s0&x3RWg8nhB{Rz9>j01m#PUp<0hQ-YhSN&s-ypnqj^AmCwe#U-y5#G)NI*04bebXCXH&F(<<~3#7ccG+KK>0t399~c2%d8~ z%^9voY}STc84M1&*P(TMpWXZK@b(8E@Vf_lJZ?JlT!A07=$_o? z>+?^!Zni1gO*%o7g9iz5*N{EWC~q$>AzeiCK4I74-S-`=ap^o8^L#WVy;<`4_8doQ zl-^QQ9(j1g;OHKY{=-Awd;frYyA2{o;{Hlw{Nqq}YAe^FxjUf$=#c)ppf#Vf-7HyO zo--aC)0(I3^>$$AL-rh>|2TJP1Xn!0Nf<6pxk&`e3qhV}3fmz%dd%^|4|(sy_xNx( z=0VdZmiyTKhkWqcC8kV}PcC?Ry(XXEFwJw4Mu$f5fUt4JW1BHt!RTkOUQ&6uokJS; zf5*X%;oYkO#w)I_{>a(IO5x-6JM`JDrSu|B7rgXEl>7$H!a_HcUDtSzvz9HbN*Df9C<62P=-Jibs>@n8k?Ui~pN1 zrV*>BE)A~)8?kNgAl6e7uaNjDP79wkp9kF zJbw2Pzk9pQqkX?7mVN-zj|h+7p{L-(?B6J>&p4gtoG->CBRF4xHi}#{2@mdZ{N6wF z&WG>v_^8iGpa~pUetmMcmMUG6nIUPKl8P7^wApF+>_m#t5s0t1HVOxV9h^p&PAE8# z3#xU(i|Z|`RFSFt z(=DI=zt^04F1`{tqR!STw;_)7=n6qrR1}LG-BYz)ECL}FUL&Nj-z8e*co&+;_h|3O zboOEb-}y51{9E@%A(Thd+rfSC7O5I@@O;ARX3Dd*%L`ZHP^(4D+#sA?8oRsf-0N`I z4+$OlJ@csqJp4wV-u-tVXi`K|g5ip6bx5YG^K#;ZK>1Ca!CMSck9Ue|f~zHqt0#PR z?QrUVBMs6P6sAF{N&;Ep{iN&-fshgrC@`9Ie#y;~AxmWuSgOiWR88FW9w$c!>>cc~ z+X`q3gMYM-wVIoYl+o~GESbuTFCodGANrBJ>R28`iF}+9Z9UXCe z*y5-c68aW;JKTH!A>w*UHJWjAbHmex%c)c6)`=>|B$lx0l81^y{*W#59qw?4JKW*# zA9W7-Hp`eSOa9l1;NN_KBS4ti($MJI8AV7%FmU;>>+{Z`$3J!@$G#v|wL6g84cMy$ z|0T&8+*Dj{Ql5O0^Qo$v&rM~hN<-;5v`>5<9eVt(@9<7+Y1%puY&U|g(44(Yx!h_t z@A^FKDg3s>JNp%SSus1`@X=5(iUfhGbJo!ol*H`{qM60l2CXey11oai{0{8#BZqBSVM$|} zqBIi4p}1r1y}P>hyXNi(x4Hq500@8px~uv7ASybdP&eUrPFdGjYl!2Jc3EMq#hR4KB4aww2z{Skb2$s-JS!N_ z=A2IFR8>W%6%z+OaTH*zVUgwJWyN@&GF#-B(8F0v==p?UND%m6O5kOT!Nm#j=2eD4 z$d&1W^7w#qHev749tS5CPisdV4+y)fbT=-t_4U`ea_b_yqnM3`kFyO5FX2`N!x)U2 zGnwvl^6-?y(}Jvuz&7c(Rv4|{;>y(*JDhRU&dH*Wn49PeoXiXt zuJjn(T%(=%96g@0IDJ;O%mO}s+@P~_l}0b2_5+xnQq1ph{A8cgX~uqSsN+7tV2#zy zOI-Nc*LdxXTU=j@xY+PmKF{04t#7cNU1nqNi1j;rOrD&wcYMNpvc;@yVb&X5-fYq9 z<=lBvaQtkDoeglwCQ+x$?v*()Q>yfY@zG=6el+IZ-IBC)KtR&lrn9xe+SVm*-MYYQ zH&)qbdnEQ{IAMDj=aCHh^tM;&-9Mt~pJ0zCjOQ6gdr#RvNja_C&~Gs8B)oBaOj1nA z#&~UGm=Q&5lbuG&1@n+HhojkN|1`#V1f3DM8;l+uv$J}iqoaE~e6-KQndPBv5_j9I z@AiosUFu+lKgn23QYKHQ%%&X*d>Wk&ovl@R_aD;^_BlA2agr7s?d`FDGGoy6NQ?R- zw%@XZ7eRxSiJG?*!g(f7IC*h==*1FHsFQ0v;JVvdB8Au$bNpx@jT=7@Bn)- z=Wx9y?bpAtWQ27d;b4nap0fJ*gp2P#V1NHH_a5KpZW(gdHR!JOSiR82YdNep!DSWc z>4NF;oW&w1uc5->S)ZiWquAP@{oWys;DEp-q|qvIW5{YZVZGx>K20xWEkSpkR=Yzl ziuwA{F>W^Dc<%uxdt>hBHS^j7*Thc-tai6p-MGos-3z?2(`L0D@@b}|a$$s9zl7~{ zxOzCooIJ(mk2rdI$itJ2gW6DqJzS$pb7h;&*Kc#>)+H{j#cTuzq#qM-7V|M_y3c$* zXJLjUVViyGpfo~2h7V;x>2CLVQ&o{aI#m^kq13}QONh#+#$gT0ULYnCICquzjqUy)@w zEBEhl`edJbr#Xj3B7*;?V^MvqPA4|=fr0fRX=kpjlo7uf+(b))_B>3YW#>NZ{6j+$1~=&M;%18)`m3O zEo$e8Y(emuMwFT5yqLbj6ea1&V-6l4F-;4OGJ|QY(p=qTXLE)13#(jQAJC7WSr$~Q zBd8Yi_TM2oeV->!N)9r>1^8h?bI_%`(j)LKkw3wilDsHc5J78=BnjB)?~@;lIlObo zd;4=HWlairjUi!sm90x{u5GNbzBQ!V^l4Ov*7gdvny~leEgsx^#QTey;|ywiT->EO zyvSg4lkLG8msS!+&49RWU<_QmaEhzO9Pi)b@qNC>!2pT^}2)UM#<@Cp0-54d|$u%G(aXh;&rtT&*aIKs%{K4NgNh={ymYADl+ z*(~K@4i7)#PmD42I{|CMfUk8zuI|RXx#n@RZE5>wXA0-AzF}w#-oOu^m5 zly{zF{9;`5UJ3`S&?N0&RYC1!jBsSaY7h{c%Ju(b2a>So&_xfdKYeU$utRf zj@fu~!ZfeRu;4fF;x66QP1be>Z1)3Np?zM4(0Va`Z<9{txZ#Gxo3C^0WWi}#QB)q- zkfggtZ|x%6TSGQ`5wSHmV==7}t?U1Wi_sc?e5+)0(PH(DE3B*~Y{!Pk3oy|duKy<6 zw>J2T@(tcBhM3`HRyMk9^#ZIl5Uvo9YA$^vp=w>`=Ct5AH~6D#G*@=n9W2;rG>AJl z`1hR&M~v{cF0r}QW~CY8f;O#d-{zv<=Z|lfT*{loJJ(oSYqFDAJZmt)h+yyr7hfCj z=iU*wrwI&iu(~l|cNpS14^uaA$r>Bizr~yWkk!K(-<(%02(e+C-Y{WxGp3r&czyMC zCX0xA^(x)_D-j2DgQZ{6bZw;Kc(PS`n|FiA@?p!O16qsL%P@M#Dc!X>3x7y+_X;bk0qe2D zx`?QEfx)ihroTn9eZ*_y1>>})SdQZ-Xmp7?BYG>V>~3~giw%k6H$N$4EH)hA55K|o ztyTWaJLT%MMYw&1-bRbvUPu&n@piw?x=r}KD=Eo=p?&Rj*4LYCH5^UPU~C`Td5zU8 zeg5#zUuW~cqv&3zy*}W^T0|T;OlOj%4#P{VQu+82R*LhhGio zUVMX{XoOiiV*AYr)2w1q8SomoxI=$)hqdh?7lsjuZ=p;fpOVg}94$O1!8W6tZN9NO zr}P4d6N2`TI1KRLKf=A^@x&TDJe+Za&Jj1-#2dfI1-rqYb*9|Py9Apz7+j3F*!FPG z4l-lG#n@=b;NolC{BIK4myfx8nlsG}m;gVHX!jF*AF{(!(pf>#+QE$0cyp&kI|_*G z8fNqBT>9n;|E~FU4kkGZ9NuV?{*{ZYbR#x8cm>WlPrfIpns19P1?k26_D%(3NY&AVvJ~-=v*MsmPTh|PK`X5($ zZP6iZ?a<#?V|y6kW3dDT%@szk{XW;D5$?p}THfNq?QOO;htxsLPCR04+e|h$SznJC zBp*5lIUk}`8mkr8zTF^Pd5!Dil#|NC^{~=Z;jX) zMR?Di);AWsCP90X^{e0H4R1*Q)+x77bMo2;8xXf*TD=%o7tG%{CMzR~_GNk-Yg}JT zh~07&E7pT(m0bym06xHf9hh7XO7bhx-fbN8EUxekA@JEy-7 z-RrNjzS(5A5zz3#b~ahP{(F?Ii^P|XIh>>{O2{!_B5cs0HP~dZw!!XJk6z#i`Lthf zmdCpF+Vr+>u~R1mJEyF@IpJhclGTRVi!ni)?&>C+mq%<20y>ey_zk??ErwSTzGIH* zU9ZV|mq{-6xE##Fw(((-czgnjTOo;Wn47aaPxjvg-6V_Ywnuey&Sxx`55#U=l7&X>- zy3)^9PkUSp&ig93LTr{A+ zafNH&Y%rST?9NN_3XE}hL5!cYh@7EOY+X1xG&eeo5=-+#-IDcVyzWIhb&G4Q5VO|d@>ZL6;N!bxyIZR}?7kk-+MaM}Q8LY! z6Q~<6B1pP4+I`m6I}8#_{UblazT;kylXF7O?nHgRs$K zxISXF8_)^PMs__b6C5;%RxdL!4Q|gm#Q6$Wx0|#=_X%Z;mnF2lutn$6O}1JiygKB9 z37{zPvy`$bD5?s)CN}QT9rW4gg|z%<^PgETK3?3RvvHX%*TB1wvN0}5YfJ3~M4c9$ zQIC2prQFygYPRT)+H{(RCfD$Tb>3*TnbKivwMoMdaP3XJ?TTxy7M-+5Z_uU}`*>%8 zp`Y|fR(9E-Ni~?VeVUPD!Se}Q%TSDYK{?LoU)*BU_UU?`?uLc29yaKc1grELj;$oe znwmvvFky?Z)u%Tcvfhp81}p!545A91Xgs@L@1Q%{K2v;s~aaJ)c9M*5(`#$X; zCkZPS-iWE!XJ<8{8Gh2pM-dS@f0oxF?Ow>0+ilEx!DbHoA6qrWEKBz#9vdT%&Aw$L zGH2H`A6}-x3molX#0AgsN3D{r-HM|eW;I|9)_1H9eKtoP7dw`ozZ?~1tVg#Sa_x%2 zYe!t1!KvrCuoJR5@K~=Pi7X~AXs$0GPMOqDTZ``*T9IL;W$=yRV7%o!HVN zA~f)OiK9L9*&2FmHY^P$LSfg-#?)WvBsLe^#hYbDhi*DBE zQq?s@RWQk?+!;UMCr7{JzaIRYf875O2k9}^Sh_)*i_Hyg^{?_rYv1Nt=MqgXA#lOD zpY*V<>kocdPHJA3C5y$Jw|? z+eA@>G3HAMz3S&bRasJ`DS4KXK4KV|?GI895@~K$TM#8R;TpG0(`WdRhP2i?BgN=qe(YQWOQVc}0l_ zVL})LG(t;2MYWicr3)4Xq_xF{(K2-O9FeUtbxu_*NVA;jydW>_Svx`+XYDXl)Kx*c zSdcB!Ww2R6g$3*IJc}RL<^GlCRJC0OSV2f5v_!tanu@wADYA?_O-b|R6T0Pxwui9} z=Z6GAKoIx@XOp2m+a6RoWuB3y8ELv4L0Hx-=Weq;o(F+fK~=KIDk^+z6yrxRv1f>E zMU|(NXWKN(a*DF1c>b9BvYeB3J}&Tyq7dKn@T^(REAk1u>a+IM#ey_Tna)en%3;F< z=Z7={OK3~#Y)+9bSY#Em!eHZ=AZZYKj?mV)dbwX@XWJt!47H1Jen=zoh<%_+Df5&x z%~>o;imIl#p^x%-Y zkH$PXDk1FP$1Mi^n0C`A3~TDy5l_GOmpu9I|H*%Tx8@%w>#Tm~5BZb-@h`Z2eUmHx zjAorNUzB9EhY2FQD52pSqGuDz*7b63wYnlp7i5bCSy7S|XY*|tyA18UWe{DJC3Ok4 z3&D$N#EvAi*ea(gQnEB-J}=0tWq)V<02d|8{ivK_ii~+)lhqCv#`tkU;8`MDW9yQ- zE-12`G|kBJlDsUbst@*0>jgN^CkO+=(8seD&s59OS#{2Qo-&=Jl0+VH=+3rbg{d;CB4d#*-#4o)wej)8fTXePA074cFCcYUP%fs+{xPeV72u-f zJXxV{2yBfh^RvD?XOR^Y)pDMs=Y6-aI4{8Sm)jx?9nRY4rJp|eta5gKs7i`-L6M%F zAAa@tFb?Mj1W`obd&?4IrlKz99v z;t8DmgeUL)i2vn(`vHIX-N!s>euw7nxA>FazrcU^qankV2W7py8GhCuMrZHuJG0#1 zs*F6(n9Vb0^PJknm>?pFe3H=Oxep4;o_BSbQ}yk?ObD&s9nefrDb+;G+}*U#pE ztH{!fA}=V7gJAh^O>j0c#u%uI;_N(7kk&q>@d*P*=oyxS&TGm%Tb`?mlA^3XI8Qx4 zM>u>xz&qP6%VmGIHC|UGb-6qrEpkIzT7tkQihP`1-rRn+$husfTk@i2UOFl~;>ZyO zXJbI?8be7{RTNoHmS&Vy{Xt)NHu}a`hqYkq0)wG40kszp`+<<^QdLlw%k`a?hI!$LqUHK@=B%G7isd#bDvJ7p=k%8=WvE)`_8sQm8X!bQQZ5!0ML~|CVA)@rs(SWU&*u(wp0_-2xU%oAnOQ;IUvLMe3vh3M4&9m*dER|@T zU7owj60E}p5zhArY>lI$$TG?zCo7(Prf2(^Ti!JDd_3RB_mZmDE*EnHOYvNl`A(9naL^6Tj2~;#-lRJxs?Oo;+-ykfDaSmevO zu8L21m9++E!S@V-ZwQ<@yT*NXW;nafs%pxzA}?T3)GUf+wZRYF^1%0&*M6Z}mL&Af zu7UHsKD+L&Nvmbmo-kbgZ9t7Jm;ZlWKvDC2E^lL&?eM&1=#&@B--V$e3Jk8Qv1P@i zFf2+#5Ev5Q;Hruut;lPbTZ`{m8Xkmpc`Z_uHS??{Eg-L#@A-UvID_Yw_ZH&7EYI(} zxKhA>d-9L`uZMp{ZA$*9(ZA=9JKuN#t$4X;gqO&d$Qnc7d>S-ZOGadMPFCeiiy04R zdlYrSDC%>wcZJ)%YiuU#bb=PXeKiivUk#LMYoC`9?DvOUymSTQg@k+ir0E>r*2^!o z)+~PjSJn8796QZOrx`~lbE>Llk!9rLInl{%`6V>*v2j2n4zZrMyzx@i%oZgTjz+UX zv(+Vz8#w2_ocX(!pPFZYOJu`G7?@yCJ1;1hW9XWTcI;S2c0&Eyk~4~O#{t`!otKBhnUxQ%I+ zqY1#B?f<{w_=!I0Smyb9ch7hzqr>_a|w6Mkqcj`Q&vUAzXwK<}ge z+6$3Iu2c~*ujh<$>%_34ifEGB3X1}(zIuO7nzz{j`QeAJvr%aNyH(x%xT zG3ZCXVWiq}B#-aKbmEWe=UyGg`55OD1$=V3O0yijVw@vP{7)>IVm%-0Ez5;|+~0XY zj2Fb8szm1JWvs*41iO6Jy*Y}BHIAPxxP&w7Ua@S-fZ0OJB8AEHlK zl(8PhdIbJ*@Bj5bb<4RL!!P=TyF5bdy=7aR%eE*wNr2$)?rx2{H}3B4E+IGsm&V-* z5Fl7^3l_X_cXxLWc6;u%*O~MDhI_`R{?JuVy+do%uu6U5Z`;x%ox28#B`k&!Pfe>P(dpW;d`gjYIBly%Op?RT#K^q>m&3Vp3#_yh&r zO}p_NEi&PW_UZAQvh#s2*hE)&)0owrPugx%?Ay&=>z&=ICa(98_rNr~{%HpDk96g> z^;}_#9A$jq$4)N4+>Fbr;Z6s@c?>Do8!3x3bfLaOmL1s4CXFg zr2!75hr#ctC_^Nq?ive3zG-zo%Z3rswIzW-JfH6&u%>vE7u>dYdc5{SBUurj+-Wik z){dsm@1v6T({+YbBm)>dp!QOZwmzP}zu>+cM#N`465F6Ibp}Mm#vxwq<;Tj#wE6NWh$AgNSrUnYAsWMNEEV9KbV2XCv zGP`b7UwdDe{Z#DBy*+{_k;Dlf99dVMp+FP*lcut?kGnKAG^36}XLlADmdj)kD4w=A zIuabc=T0vi4hy>ynei0&BXg3)?w6HrXVH(XJ{<&enVv|idLY&yLc64q-pDT?#Pln~ zXP$2ysQwxhTs7A^Aj-ejy&B})k8gc=3~h7Ix1FZN{|WJlZ3jpAGK2%pG131Qyfx`B zF`f|c&23;5n;Y*=erE5f6J*2F5d>rYMyNtUr#l)PppNth!S8vLH+V6v4lpj#N>K;-;UMUAk zX*G`Uvd5xsl3bE7g!ziEJ}U@R9L~c=2xQCe8&_Os9aC8bupd0Wkz+Ejn>d-A>aGrd zyD-&t7U|=&fn39&*Myrnes{Dns2Z);#7!lo3onUKExHE8VItf8Cq_v z@H<=4ZtdVOE8g=L;{pRE{{y@2s+-gl#(d^Tu2=v9l#Kj5H89bYf;ca4kYTG zo(4r^G)=49HZGNUnLE$SZA{8BWRC+>>~}Nssh;YOZ*wqOw)AZrlG}#kYLMS8YK11y zr#eZie%(rNF(0K`r6w}kTuw#F1<#~^K%zGFCZd>4-OZStUmZ zii8Mw+_a}Y`qMD;!R6mR%1}4CscvHa_@NMgfj=kB&+h8(6zZnRTBfe~c~P;k+A{A= zI|44rM(WD3o8KcJP~>HSN4}CL6;$b-Hr3VWJOq!O@8TkDk$y?_zZuVGbR02mJ&%kT zy*OCu?SCAjzUF%19iWGki{W8-;V93^0fn8c*qpDEt^ewogiaYBd>|_5dQl?#TO7N5A+iY89)|=cP!5Qyv_M z7dGOqf(2ki1y$3a4|K>HqI*c*n~5~SeI!6tSHZEN8v<*l@dsB|Xj9Q$U#~Tq?#2<$ zFD%pN4_6_gH=a@LvuVvMRk$V{f3ezOG#j}jOB%t<_v)ZdfQ}?5y{FF5(Vwbv3|VLs42Xp8hS}ZwoUV5c&9LoW`E_^uNx5;-(5WkQFqvkkCPQf+s(%s3 z^EgD(-vz}C#>bl)ZR+Q@%$2nAQ{kh?`WPme=Fji*$EK2xL1u1E9=t)i)?-dlt!WgO4St%)d_TFvCD!4ujqY|^$7j-PJ|#%6DBKJK5kz? z%)*7bqw|(y<&+-&6e-G4gFcmPuzS|3^#Ag%q~xw<@o#xaBRYHL&Wjmhq?VWc1sz@e zB0)h&1mfGzGJ9}2qz|>ON^K2x040UZ0JM;VdrR!a2fZ;I*$Tx~Tg=8Rs@Tv&hdefO zPR1w*GyMm^6$(M=k?krI`_~9k68ybTPxgGJrRMuLe1CZus$;=;rvb7RGh+s^fKUTy zgMcwDkW%T0$s}zw#n=?}e^o8TwSdguyJ*DJhMInD98uMUtWCqV)I|fL%Ou<$rjIY4kVOkzkhbxu)OSl4u>hSmBJzFjp&J+}GKOs8w|BwY8J<(kea=?EJZ)VFF448Q1V~Ew*Wcqzcv83mO#kyFBs>7G5Xi~|GykzzvB=#(l6)N&!Ov_ zC5P_W?5@N8f1#-zYnrL3Qm)c>irL$n?lLJ|%Wz>=cYKv4?Mt3Ypl5IGS>L zRuGR?6?@m?>ZLL0F8`B$0kVvNrLhM2%8lBvbTll`ytb_c?VerI3szPH{qv0|p!kTEDNjgXfv5Ry2Uuw-r6QU+Np(lcWT0<=u z`GCc~^aXqwl?@nn1#Gbi?9+)@8jNP7)7z#(23~plYwagoKV45V6c`vgR&E}BylP2Oce&ky?lm@W86TYxL1f*}5WL3klcMjFTY z80po{OtZKBAK?py3jnkAu7|8D4wi?kEKpN>c2;9{;^1;y!rhCxx1Q*xz`LCZ)|zO2 zN!i37RxCZ&0z_CP-#*_dXXBq!yNDWl`!8$YJx2$*V1HneU;v-w$N~nYbW*Y~7L zyLM?yo!vd(5+IE$XZy9MPIem)=_j!?+`^Af zv#-IgVn_1*x8dAPEf83zF#Zbpj`bIS2ys_0s&c@%@_=*KA&sgUgk}l{93-rHAGjQ= zTah|nV3D_R*ChWrMIvh`gqbJG${Cez=uvT%y{x~)lje8!3(T|;Sq!rNvh6AUD57DCj3hL;btYk=x0@+|oWo3}f!^+Y! z^@I?;H!Z#%_M_j?m`~~)*DUocB2F@9UVC7A#Kp$J5$E3bznmQ-=FjaN?g$L*1eWu^ z9Q+w`etDrKB?YuTAjXl3Y_7|W=Vlh$oo{rJ<6zIyK;zEn64&{dN;andX zP&c-2yW0eLv)nxGGFs3}+@l&*(^qh_D<33~E`j4ly4uv5!sOYpe2Xgwn>OMko~C;g zAyz$h4zlgqkC9C3U{pGsh?kdxc<=Yuj{}(nw*;T(7t#;boRY=wsy1#Z09EY2M27mN~$ zs*@!C{NxE`K4HP_tKQOGRYED;!_U5z{b_0MVqJnA?>L|0&)wgtcGgGLsBqzY1q_xP zTy6cASOce?*PJhwT8pWONjW;7QJx>pT`o8eRQI=haHBp6oK`k38N%BR=8ZGw54o-3 z9K{TUJn(s6Fq7nuw+e&+%|mLsMr=4YkM;WWSiL%(8Uq65tzV~3Z|R-8UPFB!lz9zB zu{jT&jZOBZQHqp8QYX<`ao2SI=GiBPzx75A!~}fDyQ#lwd$DqJpWTrmuiZH_Z6Z7C#_zOmTdxwG zo~Io6?P4d>q%O~~pjg+^%OXuGUfC5jWa+_C!SP=oj9ZhnfcS0|{Y?^Y52pH65M3 zTbVyr|1x0P2+Zyzs zm%v24A~tm1h}8VqhdVH76f(bl>$=>$c#nR+K39L+5E@B7#BoCG{9>Q(#Awn;8gNx0 zVW?JG$`nU(d70j|iF+YzI=>OL0BfKhZ}Vw?4yC;uYg#&m_TqA;>mBNSVxzOaNL@ZH zW-;I<+pn#y+g>#}+!M!Zjt}Sqe!NolcCn5R3~1<4?HI~-*5ePsoA6xx^&OH4dGs7O z690Z=_!dtpi;0f=-&}wkw}T%auRNRr&bZ(G+oBfiH}&DVVkR`SHJ(x&ULrdl*0+q` zj!G5Ua5uG*5sf0kt!(?;-f%$YVZH}NfcxUj^F6S zlpy(p71oV<+{1&{%O(2Y=Nj*xEwAqHKfvV^kHVT_h6;36%6$A@qrK7``z2y+dV8A| z1A^6bPkxT?RkPH4X2UO=@Po>btZ=r?hSl#g*|oy-;OneEd#f-LwR=BOo{j*6zVS=G zpGSLPs8$BOW^!DO)cxpB+DlkCqK5!Cz0yR&nU|ZcqDM+LoH#k72z6?N`r}dMp?2bc zz~Q2Yi)&Dzcee4%Hk9)Q-{w_|g@Mj7*&xV{pB58RNFR#7)9a5yDzZD&vT2JmC6u?l zk9|{EQa-%Lw|59n>l^R=0={O`?FVvj58{Ig|Cl`=zUOy5_|={MI=|0Lo)F?098b1p z@pREis(3@HRPvs$WCX5I~s$sjtG_Bqw z#n~Q)tm}}7Ql|fGpdCrXp3R9jEp%4fCbZdL&2hu=p@0mFSQ9X*Ni3lb(7-nY2NtH( zZlqy_Dq?)*+gRCpieUA2XWwTB)RdEFmyzvFxS(&dX3QFI8n=IMeol(d&%Ze}uqvri zTN{O+=?bmINbvo>mGE{X%o^|%bJh#hw)0h--hy)cO)yi_;-53eqq+!*LB8<;W! zgqPJ)MWBxH$(Exv-#b>L5r+eFNRlKC0GtvqsQEtN%W6#_(nxhnCR=ho9bc+XBj{8S z>_y8~krkC;Y%~%rYEv-EM2D%_sQjeUMql9+*`kR$(t;3vlaVE5CeCDe}1ctPMe#4@f+B@7t7ZoeDH=C0D_Q z2XGSebQL&tR8zC|Sg9gfimWg|R`q3{zGV?``$L*UKa4;tM!TdSZ9Qsc49nwKHvQmQ z0x!=Lq9Qlx+QIyeZv%+mk@z#b3g_(rO<1GHdc^VdqpzXbX6XIGO7dIGIV6TZGsZXZM{o)RsBh8eR59 zJ=STxIrv}dM0+PJIAE|Il>H5PE-JLLGcO*Z%PrUJ0FJ)BrWPXsyU1ogbhk%dXAIMX ztQ)w`IN0WfR%-sY4W;(xdK+V=}drEw<1%@&qA zs(P~qx(e#(=sRhQb}CUIU^vBDu&}{3U5UoaFLzu9PV78C@iw4)%BjbO$Zbkl)*W$1 z&+_JK<7x*oz}tJ0?)vwK6Fti$O>E$T>NI+J(d0Xne8P|(eydx$nQ5-)3A*QoEP7H@ zWi}Qfe`8Wmr$*b~blzPdhyD&vfh=2}0vClv46L4Ry~*%=TX+vab>j3kK>y5{PIc%m z9M17{0>dghN9WWYX0-90h;}v3oqb7^Yd9lZsb4K%Qy%`qHBq9qMJCzM8>oa$nBx@oW8TUNQA?>;+53Uh@&k=Dn<7s8m-|HOs6xGO<*6zviSIJD z$cfl!1a;8AUSPTz=fyN9v0#8n!flYTl^ErP-|YB*{VY21W8oZPuVcDtI0uisk4VF8!RqtoFPOkA39=QaKDZFpbO=yvET!B|Py3x1I0(46d41 zncGH_836+vC4a1}qabZy5QnF1c*@q__45_#dM?%rSgK)MysT3&Q&uz0qQ*>c=$9`8 z2m+heeGU>3*gEY;88QO*AXg(yAmSHb)z~qn94pSx6I6OIq$nawzyd?b45V;{ls0nM z2-=uJm7KB~=9v*nBstx$Xi&sxFbmb!eo45W;MjA8Xi~yj2Me782qq3E3>-wrM}?Wu zSjdM6qIG+i7HocgKCJvX%|OVh2%{M!C?829w0a{H-V2~r8aH4m`oc|@%E&nl(`eH7 z8wF1Q-IM{WqJn^ZtCl=(7cn-5O<)O1LfE$s$^3|KyNfw0&6^ArG8DYNIDp6zjO9iS zg^;CNqGCI9+F3agGY<8OZOuTA#538&MI3! z+hd-ey5XetEnwLp80^2g1?bTJPtpC4xwhsIZCjmgK6vjq8yQLp_P zIQ1S$g9kB*#&RqMGR%!f5`-~*}M=1NQ!h=y#Zc^y?MZTMG9bZ#-A|c%z;pb&V630}X zDtkILDS{TJI8Xfie3x^J7>p1}=b{vzmPXZsp*4cN;LsFns$}8urSVk5CU+|}m~x&U zT-k!B&<<&i_j&oXT>>xSp$j<85YH>kLOt`8gU@ShuzJEf0E5ltr(?YDSO2t|E7rK7 z0ec6*7Uk!C8fFpck*?LQ7|smz`)2&(QUfKcM-E((AQd_pAN+ca($-%C%lrHu@~Yx~ANqtTE!S!S>Jq{#xdEeKUIRgd)d9mld@1!LLaV zH$Y==k_d)o*O(Q@u4odkS~9Wk7?-PvfsB&~dZFH6Cy6~YyvJ80wsMk03(Pvd89rdp zgDP75wrW$~H1%h+8EcoZjp;gvscz^MFf)`gGJ;-|89`hEedK}RDhsPC4Up3*=HDA2 z$R@!5RkPw#e>;=N_t!L?-N=d{t{WO&No8F$z3V*D8Qct&_zEix-;G0u` z=gJ$2k@cj=6v1uwAJ_@i%ku)1B~`wIAQ6 z!$Zp3@6P4YBmOfFkhmB{&f+6SK3Q@UcJlBaN4w_pS|cr;8A__a;r0zn!Ay38$I;ge zx>wFDRajX_qZX`Tk1R%Fy)&Gua$xyj#6xuLP&*dn8J_*h3}?hQv$&t? z|21?pcN?d$X9ht%8?ov{kGx2b=B6QDskP{;jY#@#2q1U`& zdUx-?9{4V7*ZS4U#%5dZa=0W$$@n_^{Ta7?>BuclTc$#j#DDU;QF3kkGtz^P0%dBe z#{R&>n>ESH1MG#YX0#=t?dQ?GZ0plg;_FkJ-%E?Vab)A03}+c(s>a1l0m+|~H?|zh z`Y3_LR&+n$Bylj~l<@ptlpVU3w~T+`TQ4&V?Bv({s_pK$j9Hm+%*$0?I%3I?<}0)K zHpfl>v#Y&Q`+ySbgQzt4qhXsmULn za=#z2+n}|B(<69(ZLFFy8f1-RL>3mayy=alQOSNiSh6?F`Z+Gg}mshgIl zF5`Lb4er0CtCg2Jasc=4IgiJ@I%-Iq6~JQmkk|;3?R890DG#Fth~U^Gw%FO)5!%*d zheYULyEkxqBi`yoT7}Ci=+!XI?FiL6l)<{;4Q?txd=DfPoy@KlfCvNB?u=kG{ zDLZPgI&8$mA*f0UOq=+7#XX&@|vv`dezSJz$?5EP>$+-aVv_ zU^Tw+drw-hHphwjJwoj^C(doBvCN~}bwf!a)H$Lo$Dc({X^pvhg+B52N-6yE#4)~_ zV1$QB1~$*Wb%-!=Q)+%%l- z-c+aK4_@O;&j%jN>!YgX^M2-*Iu&`2419eO7Sg9^T3+>f_kW4aUL9xEU3!Ko6H0wWj!S~IEEwz76ky%sUS}dTiHhe<{)rWpm;aCh z!6cCo+|O0c=;?LR@!+lO>{42#!i{z7?02dL-ltlpZzC1i(SE-`G|ETfeV`O{V`p#g zMz{Wwzje=7z3>*)Od=%Cwcu46z)TXjg&&?G=<$2BL>#5|r)f^g;iYP3*?TUvJhO=Gz)CfKEv%sPGj&dlNEp4Ol_nB- zMs_wLz1(-}e&Qqpi`f8gmkG4&lbhGQht*rbE74N23v8W(83Dpg0BnZ|PAtxV?T5%p$=kMx$kpy||@oi@aOsbqQ{v zb?o#evk}@JJ`La@9j>RNuM};7$BO!+%H41 z3K?EDbn3c!SZ%Am>|TRCD-WV#o=N@ZX|Ayzt38eB36?!^t3((yYl#+%4|LC&rNlN4 zym+aOlIa`8jBph+$5YH0Ks?sNmuG&tG^Q~mc9j2X`rs3XI9!5-g{*j*@bjWIIO#L? z5cLR{9c)~L=l4d-hn7suGP6#7P$$8EY?|+hkA#ba1E@71V+{>Wg7?V`E}5j?=&Soo z5FVUo9XSf={>OWR-kYdQrU_3K(Ta&dL$6ChagF9VUmEDucYY#tkE+DWPs)Mdekudu zfh5^5MTUJUwM1y_4zkH;SQ!Q$MgnY!r^?sB#h_F&5K+<+49H>F)h&7#iIq$EmE2JdHGh47ePhCWNdlX*qjj^&o{4`zLjemxW{ zo8|b}0%S^NX3pU!dE0rXb)GKZ@h$oYYs^14Xd=n7VWD8?q{<>bbP%01J0qPHF=Dq| z8BjM(C5$XyBB>wRbJcQoqJ>OqhVb#HL|P2oDj0}-}G9Cd^8J7M({RxNfuOfyyb9s zLua7s-!hzdXw%{|Mqt<}sgZx}-e+9-jr*PNc+p247kv7_qh02)j5X|-5=+rv5?4Ye z(@GYPW6>Qt)ekQ%&~|!GRO9=3cxW#pj~IZ;p++8LX5zyKlIdaNq$2C67Yiurx2uv- zd@ua#4p|6;kHU4R2fr62F^>9WX+yy3;xeY_JOu;{^<^f2xEH*Ngl9sgHD;Q z1{XZDa^iIYIKMsv>B!iKJzYI!vdAD4qlID1Vc6uDEpKJ9DL$PU%$YCBJ~T>@l~b$L z1=5nG8zi?q-TcK}-zxE^pI?@O8pUwCk2Yc7CMkZWzc2GD)^;xx3>J^K0Ssg!;m2FR z&cn(9ERJa;ZR5y3swIJGG7Q)R*LF;t)6YW8@0I@Kd_M5X{4GtBu1GajvRfM38dtcN za?J0lwrfRCHDM+=*qz6G$q{*DFQ#N+k*JIo7CIdRzO6OPr!&}O7B7jAChAFFvZxH*_Dy`&I>m;2(m0x5SxOrf;1o|@U-L4DtjR@eLUu=OCcX3S8Xw2M>#41 zVojB2%@Pyg&Txp^;I3sGeJ!Pf)J+Wnc#Ib^=peWF|UpJL3kX^{(+fyf|6#QmUbn zHPN9^VShsXdgz;A&x9zJ*{CS3rsAfIGa~yAf$3`<5?|t2cnNrK&k`j6%8J?^$Z!fZ%YeLR zQ+9;WS5MM2>M(3D#(I$O_szKDB#z7ah<6{k&Nt&P0*neNGzbM|;HZ+#WWVXd>Mn0v zNfa0EfvNfmKpZ51W5a2ArXC=Cl^sXb@S?PT39kF;A{CR2&KFS{c8tKdlUxiVgR_}+ zqCD5xTu6_wOO7b2bhI+jBu!4%0a;P@;csa*7MXKugq%T1rBsO#9Qc4E!|5Aj1Cijh z59968d}qnXRtozp<30VcR2W42wb2n#x>`D|sz6mPZl=^=sNfV%w@WEn1y*I=#g)CW zZa`bD6A6sbDlqPkFk_DJPo3^zgg^xiekYld16YX(?G%u?V-GhKd1L1 zGk7z$?@?MKIT{2@<{63>?0(dV?+})18?0H3!z#3W5`3OR6#ioo5mGz$TX`skLaO_> zhTE(l`6VXN0y*C4Ospe@+P-%p8CO}mW`PCW#@UxIdC5!WJxrE`_U%K&DNIxf98`+p z81%aPo3SY1ZJ11|j8a%5q&c%sRp4|d*3z%aSS1ETJ~5x8`VirEa%61aqEnD^wa9yK zBbdem`-kqiy8>p+yX-ld)O!qoTu2(S6sVNfoHREyEg!pOoi>>{5q`&35!etESL2{&cT zwsvr7{T`2#wr0(bEuhOJ!-J_1sS*=lF>}O*0q`@x-+qZ^hp2Sg*&5Kszs3TAyoc;> zj}pOxMWjBFbktzz=rqb=fGwNV)C!ds@_%yy%`Uxn zgfGNfoDAVdKLwYfxFYd2)d0vK_z5QDHDNLHFA?Bu_KVCE8^d%nT6<@njbD@ug%WZ| zD|AU=K`O(n1$H7|OOP~wmjwg65wu2pT~#no3~fvfkRs(Lc)IQ zdv{w2L#tNjZ%(8X)?f+{@BMyDFFNA05YBbHm;AhUMY$TbaT5@JSKW_x)5H^8L>|2!jQJ>H^yDNdpCx0(ugc; zkuuER7MP;tqFFHfMEVgOdZx%jY{Zfxn7*?cIjH)!$?fS~{yDz6uh_`AW8WJ?>90p0 z5R2Z1wU{B_atI}K+O&fCEI8d~bu4*n^?~-Qpdxt#I*25(ty^%&#+qKvYA1}l!&|`_ zDxmV4mO}X%M%5icaT4e{^~QaUM0QREfyPcuIu)DqXZuL0EfKXWa7ux*=<82gyoJQI zd`hwhGJb3RX|FUHJUuD2J}NjqBz?n2^GVdW6kz$ zf`UQu(ZnUHVLiTzTEormXVFh5b^tfg?~VGa=# zP3{KzG)uJVh-sP2Sp6+jFZ~nV-uzp0L!ltsU|e}PpajofvS?EH-ex-!6L>jR7GcNM zoyAT%Sk$OnhSLJ;J0gKsF}XYPCeN-iGq!a1Q#G?trAaj61Fn;$<>YK@gML_G*7V2?yA~4P&5I3QayGkCUy!kKPOb z+(ZuptWm~Ck)y8&n=;9S!X^QyCUxn>x-ZO(U!p-B!~ykdmJe>@s~~s|Y~2|)%S^!- za8kx-w%G%99LL9rG`7U0)%TbXlrI#7Wsm%m^1B+4oqaJjD*P z$(F|c4tA{66VBX!0{3qWx(G&))C1&vl74CmbE1jG2c@HMiFQu1=i!};E+y#T6eQZa z7NqB#KaN>GxhP{3!Z*H2uy0{*srStC90R>XmgwqqdRQQ4hT;tIP%;c7_76j>RqC%i^Y2jWe&Y_j zmgq~+Hz}%tA61QOnh4NWJM~mI%B;SkMS>|~Dg4IBSaIm}Jmk23FwiHS?C_Bo@%|=Z zsR2g8C^Do{#9wgJWg35h{tzj1 zVfAMr!oTG&(fnrANN0nQ$wBLe%hV2M4X`*pT69m2C3Fn$mWT!--!MW~3r0tVK$RUO zY6*Nl^KjxWFbB5GCq!|Y2)g61u=Ijx%w5Gg40c|hEw|ghipyY(W*DBj-|tg9uituq zIYc-8mgRw9nVj{rAR3zQE~*!8@B|<{g0JLGVd{D@#h~v=Xp9CQ@sS~czCYoG{P`BuP@+2us+(Cp%srpn^ntF2>70i=` zeAG`#S0)2gIdypbkIQ_Cb^RT4AMSH8VhzNGKLp#R4{gb#7++jpRX{!WLKkGw`M`{* z(c7~}r--E`g_0+_4_VZjOlNO0(8$_{h_?>MCW(u^8~k`We(ETfrjZmsy>*>1a%sN_ zS2oIXWtWB1z=XWY&=Ve(htd>BQc+wA2Dc7c5Nxl|?++t1K6}!qD)8$mabIxoL(0x7 zgoFdQ_B?nbf52#T1Z&Rxu)&B+Uq+&Ib^V!QW!Y+JBs5sCB}oA2x16#NR~g zhYGPx^foB&P__1`lAv#*Yhi!w&T+tF%lF6}-3ON_`9DIXYK3T4Ni{a~$oY0xapjM= z{_cyDaT8Sq$Y~x<`$SRy8JV0J&`EG ziP>RPWMfnuF}jn$c?sXkcWx7$FQf21o9b}YPQO9GQm%@hkbs_gZQRHGC^9fs0^$#=RJ#sYU;o@WpVaW5CVR%GrGKG|5p4PLALydjt9;XQ zQehlQd4;Jx*oFpl@|gLR?C{du`2=ZCUc+^J?J`p2`J-i`3?n9uH&Lu|mbhJy zVDnresm|W@gGXfF1=KHV*zADOX=1HjsZvYzxGg{9N#imfBplA>8=Y&t zS_&{KxRX4)fB5!+a6g?sID{WY+Qsf4^EcWhW+i=-BDuF1cCg!#;$H77-XNqPKxx)K zhCU(i-X;(8w_1~c#L&0<>m|N8M9Dgt;4o?^h7=|i%99t6Zu(-r81?NsgfAHsc%49n zLmqU>H9kKHNNtC}2~M+7Od^G9_SGGy*sG+fQ)~>n`@OjI6$*5tTMo#^`ZSK}H_Ok; zeS4WFq3VieP{b%HLIU#8VL@UiAEMO;YvvanG!}oz=)BUe@1b`jW5)d=N#_PjA=jI! zR_I`9fIDDnIjqm6d2h%mCw@bQc`qy@1xKVX*94SG0`yl;bykfT{rZKusYlpxKu9a`0A`R{Yn-K~4Hb*+E=RM3fpzE^ zw1um}&br}peM&e>t!{$K`|-N}KhB`v@c#r%31xu8ym{`Mx$KbxeSwr^NL?}c;gG~g z0!R{5KVjhc{tKB*dDO4#$mL^4l69vmuL?d;Glf%b|j?DYo9`h`!g4 zRw1yo^($BiCdbB!b;?`?pFwKAArTMS-hR!Bz-Bh;V4axx)-6=)#KjoEhdT3XycI|(!J?i%x(PUWPetMtbWtj*dCv0 z_PXAobx18J;)PCU#V18HBDV7L!ZVSEv;|Ub*Xc!EaG8PQ%E#dMS5lTfC1Jvg5m!Nq zW<>q59x%KU%m1)KVCmr2tbDPXgPTv}Y8%ng9lCfl<7uFji&Q~Y)5)+sVMCVgF6`rnuj!Wxeedm7V zp|#b>vLm68H=Wmdf>*e8^U3>nCT`+H`1c(|zv|d*4LVwSIgI2X{-`-ANXp0vOl7Kv zQ~TZ5yGGawoWrTVsvnX-GnB&oHA{rnaL!`fq!`}J?J6ZTF7VIhFJ+1;R*`0ejaLbG zzMhXm$tVx=inpuOjioWlkenN7qz@sj%NFLAVIJy5<$kqqBc75h1$d70&RzYzs3h+8 z75!uEc<1<_B4hW4{L8DTL9+z%Rs5v}<-?|{Z5-1iGN5_*ny_=;!_rFrju*aLLC|!& zysu*jtta8CU3SXu*VTLg@8ZtsoV`*9}U?;U4lp(x_Wqq4U@`BRohdv~#;yh9U`96J2`2BQ~@4D8~N2`7+PN&G@ULaEp3TilWM@{FnR-lYsUK*Il-ukA;2a z?Qj`?U{w!DQjtNV?}nLftv!J+*BR*o5;%zpC7MU8K7D7TT@z<_*$!GOtNL8J)qlJ| z7sm2HDw3xcyy~vrA(OswD(hWBuvF}vFRG63ZGyf>ayB-+w`&X!@8)j%g!4U4Owm`b z<-zRPjAzftD#i2mXG(^W;Oh$i?FHQ1oKTr+mDTo<<9OlC?y6@&J;Q?l%kFZY?!_bX zK`8ejs!76(G3N8G8=Q-F&-6H!ANx>SkHNYRp*M*WOX9%U<{v-P7t0xSHgsZ+5oD_i zu@-8haXB00?H~!=gH&6gx@dqa3I>IQGmSF~P;(vVqwp$g!8MUuL4h>>oA#ak$0Ay}D~K8$I%$Z539TKo!SuhIAA za0ur@XvyN{oO#(XcR)kxsS3U;&dAU$lpQ1p z7Xkg2Q=45E`Rz#`n9~{aK@ES-;9z`bkS;oA5-B)7t|f*N=LeQRvyS(H;rUu%&(0k4 zc@f>n(n3Sv*YeHfXl_LHuD)nY+RvQ?N){F)VI;Kt6kU#-C@uvnhw;AjirQF7O8pzH;up%1kU6nOe?TR?uG* z6zQN2DGC4f_el8!CBiTd4YpTxfFYaP=9^qBqw2qj#-gX5`bfQP8lI9``?!DJIzCPu z7ZxenMKHLuyDlrNzErboM8$N^=s!f@U-L>TuliGKG^-K8ba7z#`1Ir9{xxiUUEie0 z+JfN+Q5I;C!eQdiViUTtum{aM-P7rBcFV1vu2Cc@UuH;izQWrZtZ%6G$T5$SQPiW+a#x0C*v6vlcDdO3~;v4v4|Bq6Na#aE%Bvi zRGpvx)%M)0$6In2)5KVO>2dU=6y6$h?(dnv3?+i*3qbt_&Aw2MViSs(`1xc!F?{%b z$H##3>7YT!b4QS*YIW-y!-k6?b@3P0Xc6nPji7ZuuFdNe_VG^x;9WMU($K3+vly{O zoeGEJF|@WUf(+IK%zap4}o2?^0=XLBl< z_fvoMSk};9+q${)|F`}+TW(O+%sa)^ci=D;`18$^0gCYnA5tUg*w$TG)Z5q*IADyZ z^1fU5cg-ukL=UWzwft?t`7izzAk%b5IoCoT?CK0;KtT?iDa}cEO6i_%@y$Biy(%YF3tT&U%{DXIZW!=B`~ut+hAD=j|-b z*Vpus2q4vo|CsdK?I(N;WgResg>#2OEf3ma7J)tNRf&=J7`^NjKV{7$!}#UusP#fF zsxktx<6fw-N64=OK&QaRpxR}I$+ePE-GmprqltW+61gw_jxVvk|FQng^Niv(nPOEB zT12s#?c00b^S;A9BvXmUeE`YIMb6LtYg5x0^>l%5NQyCl?(c$|jknw9?`=a*Z};+R zO*NxQ@0e@Hk376^#Ly~=v}vC*~((OPMk&PGVM~=^7 zIj=5RIjd#R_*jgX1OI1l_p=q~RcX&l-oW7_GRZGV!>%7`ukQwlrXO%R|-{lEaH0!}s?=y~mKbLv9cMtj~oDH*Ntb zOuGuoN{cU0-{?CA1@(-&xkAV-yZ%{g&-%+W-&)J@6t6ixh6vQyy7)fy%Qn50);~g} zA{cGeqcAD19{*Z7`e8q&cx5$u!qn!-iK>G_ffH#{K?V^n>9W z`nCFF0#kmzxmvA!<6ObpEi#K{;9X`iONZRfWyYWOJO1bWRfpFnJG?T~!QCfbFqM}_ z@3Z|s;B>)az$>i>1^)PPs2IAa%V2!|FqUP57uBxOlb;8aTid5ZwQ0YdzVULbZX8yv||OsX%5`_RJLVL>LZ0&Q+;ZTNsHp%^_bD( za6KsYABYiwT|yA9HVfRK?r~&sUh5bJ^}><;%01WFOONr@X$9YRPp+{G$233gogZtd zQ0L)S+Cst&HL@J^^*|NQPi}Z+7`Ej5s?>fVUIUOzxqu`ys*h@ z=T>bDkJWV8lQatbU+;a!jccn=(>kfY@6HPG36LPsmmD?DSH;iU9j|EXLPoj#F@%iLoWM&-RRaoli82S6QQ{{Qm|2f5{ zWL%h`1?OTYRDBDuli&97%EqBtA+;f_7xjmW4B(7skh=&Hc~Lddd`%9^(ZxRJr-!*;{ z5_x;DTPfM$tu@~YeG6QT*~8TxrZ^j&yi%`I1uHi!QZQV5eJ&q=u70;8`odg4LUDe| zvA%I8ykNx_I|B-E;q4y-xZ~02ekd2uPtRM@w(&h@`QCPe(9x=~X=NGB_ma42lw=?0 zRLw&bEv^8h58qSR3h-o}Jav7v(2R}B;N|e|^cL|t#jZeYwm8e{&GS z-nz)Y1p#ycp@=~#X;y?#u+=i^6noUxV!aq6$?Qsh+oiU_(~#n8jIo1DMO!B_1?25Q zT8BZr`-P_)XkNC5ijQ&|!ZnST(d~10zxBVVdYwYP%Wf#1Km`}xS#_=<{V29TW>7Ku z;e8=bU2rf*8)Pk7Y0%?gzZOnhZ(BmpDw&P2P~B%|Q;y^!2jWs23<2t85toyor7Kd5 zLsX_Db@&YCi+5PQvFi|H=T5*&R{p+6xLFMPs)IgUZtkfGwB1Y#V->dWD{FdLMip2# zuh7juK{gQTS1PyQ%Zak%V;&{XGK#D(3U%}k)#DSRY$evRPRMPPZ9JX6plyc>4xKhA zg!pB-vlZEm%gPKX%A|&5vSCZrP?iIijcxUt+CJYz;T?`LYwB<5Vlz1e#p_@*pP*?u zwksN~=LwFNS{frjMCyWvxR?y8nt?_;ff=*@zvky)Z!W2)2B%m(WY4|)+#FmmJ z=3mD7wt)<|SMQH7ssY=Th;^ZYD- zcnuAf#UhB^-4fPzz;}MO)4pt0?1q14*&3>$A*wD;{QxYs#_y zu4c`U>-()%KNn9m6x980Jv>Lt{YVze_CV<^be18^;^s)Xzm>(u)8Lu38k$n!&ePsu_Q zc8S#yCx{`W>V)7}neqM#?*+V17U1xZ?P3a7k1yNvW=Wep2vME=Y}^q@w6B6UdRZAs zc!F(sb9(HG#-_gWVnpB3R1Nzo+-;iB>s_%F_3#_cp!e^Sc_d@aB1j`#fS|bi1g3}M z*gmsP$`L{Wd_9el`M>rZkp58;mOl-?a0609$n-XoYbzT|`pZB}nrBa$_)#h#RW`;x z2X_nBNA!D+meNSLh*atO@0$*7{f9{i^WR*L_j47FlC2exGc*u10?@T9|6Cm2kh3UatZV6TcYbte(I;uW z+@(YBNW~1|iUfrq#f0yXpmu{*;chsxQVD(r7s7p;{prqhugN>kI^ci7^G1Rc=*vj280$ zeMU2{R3?+Cmiplu$2!%T!W$`vGLdwIF;1y(ZIYcni(=vy|KTsI z^F?8xputY5hKvo*GAq@KTNjRc4KO&hoBB%cP#qJ`ehaQIl58vz$PO*gmGLKb1tKmo z(`zu8=K<*PLss&5B?Je;!T`~PVDo;nVOA1Is*>)uK+Q=Wi;#wE@He_#Q~8ySdpf+I zD0#9Q4GZ>~O*8JLUXVX+J1{aCel!${6duL?2Bv4{l~v6pV$rPs|5^a!OP#Jp>415$ zy2esI!9N)S^JaBk2Qj8sbHayTciG~2XbQIH z+3v-PYaxQ?J1aq@0t~$WGHu&CX=!#mj-8=0`xenH? zJ=z;ktvrd0D)}skiWoT&s|^aY2i(i=)iw{|ZCl5k)^}pwcuf7Ol^VAu>rhsFqLmK! zi+XnEzk%jI?^n#|lrA?hGsI9^731Ld2zbVuC&rYxRUaApNN;A~#v!?|VYbICJ8aJG zH8G3QqC_101PN0qv;?)Rpp>MfYIT>Jx-}wdHSF^;gM5D#Z$Ah-L=7L}>D|DyY^65K z%8mvwq7j84O)t>gp(zh+p)B_wWuHwG!nX5LPOtJHO8wfX!B;yVaBfmZ_+gk~} z77@By+S5F1YH=9v&K2UbG<0$d`#wJmzQblB`*y>eRe`XY#N3`=9TZi^tfTaGwbs@P zj{g%CY7ytYf;z59=3eXnCKHZ<|2*Wv+aTi$nx!hHS-9o_>3J4wOV`__4Gtz;h)s|m z!9&+Suv(GT5^4r%q!trRKmQKw#Gmu;?jD7!uKDxxR4<`|g{@`v-Fl76F#KY#YdMHH zq6kIFK9wMZnq^q~;yGP${5j$>RupBxZi6L*l7b8EE|m=ZVcIOTOkxRv*m_(HC=l3hf_zX_*HXM>|pM%)xQdEFUWSI$mPFs&16~eD8J~>G&)Y& zk=K5s;7X#x;jxZm2V^s#if7af@d%(L*{K_zO|`Ttm8%zTGWy6<$03@=_WxdM3nBpR zugNs0r9m39m>H*#E=&fyO2#iveNWfd_i{zdz>PduN%bXJ!SvZq}k>Q@DK6K0 z>yimZHPB$`ENzydW5O?{4aIt@efsx9&u77*cIjx!md}=7|GFjRpt8!v*E~4nYzoo9 zN8eOLS@HRElbeJuN>zi{w5#n>;YML%H=0kY9Q=TLY|tJwPCd01w{;8a0TlYM6_mhg zsROVm8`3k6%HZd?m^(VMcPA^0?#Aa+P?`7(3I11kb#N~{8;Z4Q1_!f~SY%>|1b;_0 zL~7;4%R(SCn?joAH2Yys7!@~l=wu3!6z+Gm1^nX5RuaWq{4A^;Y;Xy4 zpctvz6bPX_fR4GRj>iZLxk&ZX;V7opYkr5BJL*?e)T}v|3SjGNa3ztKR~d-m&wFB? zsVoavJeOeTXO5bO)L5LzJ0$-MwbZuZJ-YVNJalUWS;&zbzgu^+^@?x#paqAEkY#t> z9C+p3MF>jAK`UY%>+g*WQUL6mpp|sjgUQ+`sM5!D_NVy$Y#mED!&SLtr024x*eA4bQ;TpYdG$UH~=v!7|~sI zz;UKJi%QJ~6qmDXsaVMq{}5&dk^xXa7||Rif4UxBio4=`{37Cp<5A=%~PepMZgCdiYhiq-SL?PVP>1H z7R@rTtsPfLAG$S$)bsMba8BbAsKueB4L$37!uKl0+G%M2FoInaGAzVw{3^uR-3&PB zihVc$vkqe=2ipHJX`Cx*`i#f^kt487tK`p*^B?_yZvhc|xKNy_A;Bw)$m(~-NkuQq zW6Zj?CRIj-Eq=kXT1jp>uFk{K4g4jJs|Cx#=IVakak50**4b-cF}jk zsv2`&>e#EM?Pp8A96rBGk}%=yRyi~1hPBC#76v28bxfFLxiZDHP_9oeG)lK;wNFkl zFR$A^IYre&lSP!%txh@gWRWI?q7LPX^;jE7zoNPkKh3+A&XGMwMAb#8ZI5yfy1**< zbQ}!W`K0ev6^jT)*Q!a^J^*PouifaBCO>`9)GyxBXqu8eH=3kP>xPg8Q0D4klz0dz zz1D45{!UbG(=yx|I#if z0RL@9a-*!0#|7j=6s`npanL}{3U(A3QInSLcCDDAQKyY5CZMIM9=d34%L6#IMqI}ptYgr*K{ z0a4FdFZ$n2^_RIh%~ek6Vl+>UFkXXYXJCYqsPX-r&-*_F>%Nd_Y0VRcGF|@GMiYMAv3km(dX44D#Q3@ky`*6YH^YvXSlmb6gn&yhE4JUY-+7F+e&|o} zYB|1~payU+4wFXWy&835a3O9sn-g2?mQRaK;LzwD;ie@cNU0%A#tKU}U3DYCP}0kR z*)KTSQPm9>J5Wut0B7QlSYbJ4VuNltw>PWT7(a3WKAfp`Y?5ZcE&g%_Oo24EKq8$P z@}Yk+aq@sEzl&&K6}`eJX3Zn&hSQp7NM;&#f@#ES@^I(oPv+or?SnL_N;LN?5bFNo zj{!_GrNECkO1}F%o#|kt9c}SZwZPxdCsE5lBh5|-zWm=lrVG}$vNE=_~}QX zqGi2!J-&)6oW6g8OygNtwWy|ZCUaEh@15nQWC=>FhQK;s)*TZR&)2W!j9@Ae|Ci8% zKVm=iMYWbaM9&x43E+O?CLfuJKt|G^;|viDZ6E-Naq9wS3HSDFB(hpUGS|n6o%U0n z*o}1fZQsXFG)mS;X_L5;5^>{UQ>n7?IJ@w{o@to0zb<``r6P0n3J=R57>GeTRDqw3 z3MEHtOZA=7?G)m$83eAHDCe+g@EQgYGf{<5!s%U$rja5S!-gydEqFJM_rsuV!l4XY z`OWOh553(s63vZjvx@0$Je9^?GDg>nSwY}pDu!vSzy$NLr4#9=vLxHyLsqIGUV3Ai zVo7brZ`2I(-1rGWHpy|;U*pVSW#IMCo+qrvD}>RKW%_pg9VSKz2jeOCdO6&E#% zb+4{e-fs_CG)+Nv_lj_BN3|QWvfGCRGnlm;tm*43pQtmQ+#XN|I>0)5If6q-)Y!hm=1YQq1e1XkgLZpXBsLL9X$>p59w{m z?F2ruaKB|=)lV4j!V9dlb$)oS{4v^j**yYG?XH={ ze2FB3VO=34bR1b;TrAuO2dO>xiP6)!N4y<*_@fwjNQ@spS3=Gm!plDz5C3EQI(&H( z7BCffraf<-KKe=fkb`1}?kjUOS@?-eq7S5Duur@r%7KYg-=zG~uma7qXs9;8x;R2Y zCip}LP#I-eYCWw-9ECp6`|@Mycg7QYm70JIN8Ur?O8?{Q_lw z+dcAP{wO1zYY+~}#+T^LPx0AWMz#Dl){UDA?6cr%863j0`OR5|D{OybFGo%FaVp+Y zrHfx-lgU1uY=Lq9(Pd1W>T9Ws%M1L}FS*j#=qNR0rtC@@b-}S<^XbuHWYOq2=kR)kBFPC#2r5>wJ5YtsbXILbO)K6T*<-RgU>=KF^A>3!s zk6+SV#i6e8lF>tA5Xmta4#eVO)aV}z&FaN!4%7Ayke6HimHSEkl_i^VIYXafMh;V? z0v6Q<38d`ww;e{AVF8e{cS<|Ix|HV*lHmA6zY1b-oll~A(L0}QRPPz!uWF&h`R*DY z;79cIu*}v$9Secqb2e9a*x6h$>gjmtxDzs{*i0XzaRHa5UbJFAEvdkw!dz9TA$fz7 zx&}p;ydh#bG9xTc8;k*0!oc(hF=~5?9X{vd4Ir682t5r8cQ-G?Fl`@?AtTKv11dy~ z#eRn3##m*AhsZmQ%O8FmuRT|~#x|F{43Rt_zie*BCBy?0S(H%MUvL3rZQ)9$6{7O1! z2bC6k|4Nk0KVOdmsK>+T|Ngrb-i&;Lt&colh4v8D(o%uV<7ugo34$VtOj98;5m$Y$ zJ`PxL1@KBFi%tE9w_psvx)>PX&FOBAzBtMG|;EW-l6ou1ssA)iQ zWyCKP67bepp*OArT1c$gW?%9Yrtb=s@x)-wZ1CPshvDChFoP(L!K*E%t^=Qn6y)Uw zXEKtbt=KVj+{v)}{bSKivh3AC<(ra{BQ$v_+-}gOuUDy9e z!jzGxbO`sL$CGHr@OSUHTVOw`!=**>{hlJpq^8^5jkVS6zL*?yR#sKygHT;4*{9`^ zECIYxh5#9^J;=N-iA+j8+5QLPS8Vkk^c`H?q(ZHL_0Z3>Y!QCsT@m8oZf$lJr3{iJ zhqXV(d#n9yq^3~Wlz-cqv>z;dE~hYd(uRnvaH11XW$d9lvh@sU`yK`dgFgv5GDv}0y%FY?i{K$cFgw7^9JK*p$gbVDRZf{dzQ+s=bN!bI?IMg`CP+fv6w zu}vNP%8Kf2T$(2Pg~BM-ck z^!8Bf(9nll$G2ZGe%>i>XEC2psl_e+5-#_v;wR69iJ7p%9~$5$?I_ZWDJPc?BUhe5 z-060+^gp1YhZCEji9-w$b_E1;uwcLFG2K8O`B0}zSbE8PZ7FZ7<=Rer4gI}*Y^lCOjm z#JJZ-v+RPkXong+7t{CYH|Ajt8Mg6kigdKv`{bBd7L68xd1Ilk3{23* z+qz2Gy+NC?2c`5J?{=_A|JiU5TQF}dl_?jWZXv61Twi97liEV(%4X)4X1vf07$g+1 zx2DylYWm_x)o7_|t0N$Xpm{Cb^-9iR_@Ni3>AuYK{aAI^Za*M>Hf?WdZgn6ex02;e&IM#t(AKXjqk80hU zId|SCQgci*rBD>5+IRa@Q)aP@vEF0~%l&bWWd7`47;v`C_^{e9Y8KlK)N5(?NfpbK z)=9ZKSH-n{A3T=|6dQ1*XTVr^Ill9Gz94^l4E04X5~$IvWNT1cfOyn3gdMkbTy}dn zvAbx+H7Hv9c9S@gspS#|qX+Yumcd>lrH>MUsA;3TJqGH8qQAtgRZV$px0>~@fstRu z(^GVVBjF~P=mlxU`bk-7i{*d}4vkU9jvMM-H)P+kyBo4es-B{-e&X(4G{+V9H>~{l zRY<-(A*~7-SFAN0E^e^g1Rxh~Lwf}K-w{WEJzV-t_E5-uuBrU*M+bZw__tVH!@+@; z`haA{d2idmiM^+~Vr_-&FEv5$ep@hk`O2F6w%+;NQsh8JS^&lz<4?51eytPido8We zV$>Roj<(tOo$R-w_{nunY?@L(4C^F;!nqeV)fYUBMn>8!G8>9N>XHSF_|9^S%e+#72rWbWkGL<5#m z)eM2TMnM~S_1YWrxykn@WnI>p^J&@-e_)m2W)--R*XR8rtw7nDiOrOzajMK9%E}}7 z5bx9v&|-D@1eoeWOGvid@;+yiHd|N(JFQBY{X+W7lk;?QSlJiynw#5)Z{^U zwoG~UDPqcASx}xsg5!7fieOT`zCFI?nj@-i^Jf#zH12FgLJVmIdK{}-BIqL6HWB9_ z(_YZxc7r`oDJx;V<+_D$$vsW)ip86icDQ`wvsTH>(oDsBQGBkPV;uSjApvVGMk!gm z19zLbd7Yz=O$;cz$&@h+K`-9me@I_!#fTSsJ>kbIYt*iE?V2=%ZqPVhf_t9&qWr7| z{cL=SpKI5_E^LV=`h_;{DdB>q^V-&|eA`6-9++PNpBUefjFdyX+!Ev!bD~+u3vHg< z6IIuTsOc2BL8=iKJKA_(L)f_?t(0sdoJmDrkrIco4w!~KaspTXK0hiyt*mBngV`kV zrM=A)+#5%}wMmVHUrdqp2hyA#E8K6Jx|{B1$I1)`!YerhKNkLIW}csn64q7@1zW4r z*D}=(CBiO>g`g?W7?VCkvNBvbhH(8Qpq8)C)-g$o{cE_i7Reih{2lVyDAMp zf?AmOP-LJ2!BZVW6`f#^efNe<$}+|3&b0E~B1P{%3ADwvf_XhlyTU%X>7rBCY6w@3 ze_cvnfmkD#=%>pYIa#83)!Jj;P9bh!+IvpC_Soj3_xIE&*|-F zTbm&|I>rKzdPE*2I#1p!7d-@U%DdqICfF85{_Eo#9jT_LR%{5zpi&goZCLzE6!al+W*S zSM&RKO`W@#p-*@B1X&5>3PWb87x$nFkKe@-nUTy9dklx3SbNgb4xdcFFqT$x$t0M9 z#Imh>_n;~Z@8J8l3pNvnHqiqm`*k!1SsVm~LK_fOZZT$vo*X8*=$B|t-@-rL3GW#D zJ;mmSn^H0J^7HHXp7E-xLkryQ=o3Cg#B*}m0bCpKRD2PVnb#^Cz*3?(6 zpLOlc<9K72`vp-IGyQ-Mp9(hElL)7hlMlUwm$KfT#>wmA8c+|N=IyNbVf)!$P;Y~o zqOeuk7gIMtiKbfu$0}BTFE1+>_XL7S79$G@YhoN!Aymx{sS=Ecs8AUXVar}`dK>!4 zVfou0Ww~CJ0G`ARJcPe3>BX*>d_ct|K#ze``~jVuoHOdzx0u{TGrVBSl*y0dJHV2y z3oX%i_{r;e4Z1Wso=GSC)Om}kn+I6d(lB#8pTXBP^3Va;zJnyLq!RHiE%g1tF^&>3 z-fbxVL7|-o_Mx%}#yY$*#F)uvi0hlVx4Vu-BrW+S9=RPg96ppJ4>%i9Qs=?Z!;G?Iu|G3TT4rm zo}SeBj%9~;+H|ur0M|lgTv|0ZH@o?y4s5P1+B4L)cj7rAmSrP9Ss2OP1uzJBzV_ zS>p7j?~~YP-BZYsH90;^ru=ti@PCH~%_je?cThv>2E!wQX0|!9sL%gW|4c8Na!WU; zvcFP-;?J|B*e>zaF%ep-yXT1ur>*WpK8Q;lY<~&Y2xgLr0Lw6J?^_1VM{>eHv?}A0 zPf{og>~j!?gd`#lyW8*+Yczgf=hw2SsB}I!e$TJO{QOj0?BnyJpLK{TW(X40#3(Z= z|2PBg3c|U3MZbcgKLsL`v%F`)lkDr=hN>5R!q`_H$h3s#ftpApmFb(dolceO7&E)P z7NF!Sy!SZm_1GTbjAdio5;g@`U`m0_!xX=&95^2&mA5`F@l_h?Zn{WMO)w}(bE#Qa z`oK#Y5>MMg@b7W5(H z3FCe{hN5OUhliWIrFLlPC|#q3yt^8w>_|h$wFBT8KdQpP< z>Dm`+8&HWsncB(1NA5leaj7S1(&vNs#nR@x0v1|mcz-svzkpaiD=Kf4RM{k{>_yKI z&qfSL89Lz;^#{|Zj{>|sdYx-WawmTu>3!s;%gA;*f3QAY|J$=#+9zrx?N&9tys$N! ztXN+YqAyTrdBROg$Bly@DSSsxI(b~72Gd$hK`tyKkU~-2VT$d0Du&Px_-RzNM><`^ zeudJ1w{Nm$&m#TcKNHpdOp{5_lHZzRHE$lqMS;aHa zj`d6VR^MkeYr0{(I~gF$o*Id2DL`w4)6p(k*e=yrbpGvqZHq}u*g}j{fgqdu_cG)q zNDJQVJhX1=t>0gjxbso`>P0GcAu(hd-G0IkVPrT+* z`@lCzH7+c#Icu&A&Iq1e$t-G};u^I|wyO1}-dXoRyaZ@w6< zK9aXJ)zv?0n$8>2B^beVKK=egv&GKNKBwW0z}s<(R@OcHNNDdK*iRaVC4iBnYp$*Q zZgK;DXheRRFQINy%4*7gnqoqXWQdl>K_Xwl3RebG0%JeDmKkO-mMXL&I7b^m{TFEAe(JLH zLm*wthR7tv6>%kcjs0FeXOB&o5b{C8wmw7o z8}J>3zWvi1c&6m)&+j(};puGSu+q?uh zc%+ZurXtF@xU5yazu)WFcBb^uoy*m(=Dxvfbi4g)Xnz+}ROG_;L9Jh*I0fkG13q=% zUo5X9C6Pp;Y~9*A50id-22am(O&8G4mY19|7p)f^5NnJIEIli`GrX2CwuI>3IW1`_ ze39Ug9=s0#Svb2l_Y4eB72#fA$D4WeU*F>>kqhk*bO3hy;Bn^n0enXIN4j5&}u*a)Nv>VXGin)NRMW%XU z`q$x5DSA&gNOS0cQ8KDX!cz3tyID~|Lr|4`PmOIlw)bTG#osGQ-8zo=wqfi`+$jOe zh6U?w{+$uOCfcZ~I}&Vu8ri{-M9(;$ao5YtzJoWCB$Q_%Z7mIrdGU(RoLMOLOsRNC zs>HF#qV{vS4RYnowz~wh^U|59O3lJ8+t58GIXARMLo;_XLCe?O2!%LxC3cyS@q-*b z0aE*oc_AR)uv7Q(b!-=`cCIBN2o?*9qABMk3(YRyvUD9j*k)PJYq2>cj=GIL&c zrSXUBACzPro1w#Pd;~~GX3Qz{C=ip=xLmtI{{zb5oa%UMzkT!%i}O4?!C<8?&>5Z5PZ|cy-)nYa*7Q>%{Ud zyy9S!==a?N{03gxvLmXKN$ad<&o|#=I+8*#Y-NayvBn1!AYk3!swL#v^QNvp5(1Y= z?CZ${4?ntLbEg@`7f@jtTd?`_e7JHr`-yAVQ|1k*U@qd5WGQC6pgS1c+;Rz2j7x{68fqTcV|` zo<(n8<{yRirGe~-0Cv6z2qn{WM$FZ^6lHb6uX2H7L6ViL+l3#E`{`_`lGgE~fM||; zpvPEM@wfo;I8lMWhiwmpRdzVhlZ$~Rg~(`KDV$MVLVyOnvf0ONFtR5zE^Q+U=TI@c z(i$X`g{&^YnNK(#zLiOelpa-QXVVa4UZUf;M3To!m7(g=b^S;*PT{6<^F=6U!DjRC!h+O`#*R)AI1Gtt6T0Ya5hZ`H#c;!W++cJb!3!s7~T6IrUpr@(uD&JSZJGHl;EU!?P;Da-`W zT8b_o=30?Auwr#3>T+5W6a1^i4*FUTmYj<+H)9@F9B&r6aghny%?H4gbLd~ng%cJN zFBnZp4V>Msan+d~Gz5y98cLYo?_9^Hj6SM-kqR5qM=y@i(p6yBa3aW#@8PH72H zS7zcxGkriD$kP+Ca-20MMvxGd6FGK$c|-~g*~Zxk*q-W6d9N_$aj__$O&TpggtA*0 zsp&Yiv|?fD7F=Y$QSMr|t?Wkg+9b0oQGVmJOj38m({h^1ijpe5XVC7~4K<$Stg^TH zHT9M`bV1|yvK^K(pq&}zP)(8=PRmA@jw@4285B__gNnynvyX%y!%hTzOY8w|7-tBV zLtx3*dlO?v0= zA{EtFLd4O524Ago{H*i{qA{ifGgCV}`1&pL27Q1Z+IVK5Baa=CMkuzxFuo{~m}n}# zUTxIsH}x7eA59aVahGH@WDRjq9*qKA724SAv>0>&Klb~&&zoKjQF3h|1SyIB-)7}~ zX4RE7Xfak5Z%&o?SAeedDZgE`T+eiNx}*F|6t#xqLDz@u|>}iE%gpqQG=TRJ%#XFrMuUa3(x*E%KCgl`Ll6;{`d@A}NT| zPX{p?0UK@DyG!$z$)jyI9)w?)=XE6MxmxIHIYL{{i+1Sa10NCP z@p&%6v2nD)DI#ak*zARR_H$XZ(0MMMN@xrbh0xZ} z)qJauZ{OksTzNFV_Mluia|#{W|F+{k3IEZ6G^(swcocfd-6kuUd^BN1z?-FG!lu{# zs`h%SVhbN!wp;tHzWKz#b-hD!-2S(GlrFw=+_k6hm#qZY2S<&#My(tz!I7v{V9)H> z6cGcf%@=KKw9MlmTnIMneDvT#^Lcq-R z1he7sx6lv=mD#UvLQ!2EzNB=6iE0S{V3xhlcps~Ljh3)8!azP_+g^j^N2yIJn0`2I zwUUL4g-3_HVBC`@BMm*owus7RKtuo=cF_*l#Aj?;I%W`xWQn8aGJJD2K-vM=+&KuM z93A?Ou69XHBYPz|&INT4His)f(BL|zZ&M1+(#)s@?CxmFar=yJdv;l?hfbQ4$@eB+ zgl*3LoKFb*x>O()TGr9;HELZ|q{c%xeB2TA=+HqyMOS7^V@T0c#})K-L~6TM{LwmK zTtqV*f!slW4hBNL+~(sz|1c)SWcgb>S=9>3^L_BlBpXWocT;acL|I~kgdE>)B40o9 zMu6|BimD5&#q@dvUqP{|*l}W+twslokGAH^J<>z^^p!L3(HV1f5Zff%7I#gm#yH8Z zxU;z<1}x^4o5m_1Qo=`$B0`vUIdixKMW9|9PGm%hQ-xFCYn-e5AGIROno~#4&gkji zlTq2PNClprxTObvygN)c<)k7Lv33G%8$+yHL3j!R3>Z=d?XA0l$zX1hzRIcZa|^3w z(?@fh6=1LYzAmFIQQ+p5CTyoA)<9$jLUzz`OixHL|X+9zup~@H=z!}CF_Zm&9!estCivB zWhFL@S4DcJFW83Ul z9oy<89jjyen``ZJ_Bm(2-*wfG`Z1qp)-!5U)u@5{R$B})_EvCo`?)=f{YLYJ)mEk_ z9CW7Aid?~)zI7bN@EitjWTee`7bf3qZ+OctR<~j!C2qXHkfi`K@5bU9d9tRqjxyC2 zsAliX8zNlq8sm=Lp*PTiBuCrb%FT!sX?#r65~oIqP3^-0R9@Tk=ywr^!!_PH z+x!=@$piRSO{5+Ey#^8%9=|Dc^qV9}T@9ovX9+fbbN(L(N4`TeP0B7Xk+Nh+uC!aW zBv<=d2Ybe1AAiu}Kekwx<86s`@+KsI4ye}D4CyMW$}EbO*BZM!DAz7(IjL7vh#zb` z;y~ZnilBEF1z2A`l?FNfo==7t{0*RvkMnw>`~4$Vn6~Gd(My2|daj|a0>H2?It?wu z5xN)AcbaVQzMCAK2FRe|-cR=R?Z4bfPoow!r!-Z-ZfptL?93(Nf{3to8W45jii6oE$Z^l4n8hK5xKGZkqt#A%~3 z2e)I(kBg0+?w)W;`2+DWQX&^_zU`aeFeHk^E`$c1&2m)@3^+lW^)!H%nNLH9}wl!klY;h5+4ZtEGoel?F$KL{yGn?&;=j1Z(Ds zdv=(eGQcP+M2`B9|KpD;74ERc|RU3g5tAF3`qA80wc~M2BhV&~^L$+9Hw6eJ1E&I7m{le9X z-Oq^Mjud-HF^Zx&I5N(dp0m|qXOpLE*q*5&Mvw3d!H=&xjJ-S8r$$AawQ7NMc9**3 z-96Sg0|139*iB|lzqqaUCG~Yvs&ciE{UdQ`flbI zHh?}Y)V4P`Pu-*oTpW=uRV}#MspG+*Dp=}0u~)UKoxNrtfKA_W>dqqzjWpxq(9|Ws zFe;T& z_Wcfe`W@8QD4WBH8TW}G7CCLm`bl6+?)@_jLBDlABCyouXK*z^633wz+FXw}(9>(= zd&$*EJ(`&`MwqFz3@v3ppbk5FqO9NqC$#~sbMt5eo3shi=%*Yk*j*s7Zw69QQNc>s z{|h6(qAVwVej~rTS+NyVdRqS2E~#X<fU!Qf8O%M`>xSob|ij!JC) zYaN9NG`LyT+m zm9BolVXOwXYOgh)0T47O>5Ouf5*w`LnJh9;LrE?uLLUI&M^+IG0Nv5eb8K~%vXJ8o zt&EKG7#rm{M!(k>f6|^qM^&Tw#>#$j^`=R|yec^R=x#A)bXAp#TCsbB$;0}`XFd+D z7Ern7F&-qn&fjRL{~fdG{#W$4Iyrg2z*Y0=lm8K|NS`Lrns+vlMRvzI(+%6UnP4OG?M>1d0lcT*KEJuUyG_+*jvUo@on7xdua!`(?N= zoRVK%t7VOa?_(N%$;I{4DSO==UJ#L0on1I9zJY zfa2Kwu$}IzRE^F1nXmI@SJn|LxllIF0zPS#r_r~6T_U3Xf=`YbZ1<6i9681>;0we@1VNrHJumO8N z#MrL$=~uT$@3~A$6m;S5Gx3{`1RAP-tzlbp+Y6l6$#@Y#Kk!{SiPnjOlADh_b4AXL zg#)!v)1ny%Gdr90R1pr2<{Dy`<~Lj7LZ!g#=F1;}x~Gnx2gjA{D2idau(XX7q;g-B zoK4|xbU)QfRJV!v`DJeSp#Jb^ojR+Dto*23brGREY6kq;h+m=4c&+x8xp;yf#ua4> zgGgfeBIoQ3Erq>NW2C3%C}pBDc`T0D=z)UkA;Wb<9P zbSy;JFtg<5UY!_^46;y6$sSOWx+yU-)JhaLOL#nRwi4UB)GeD9+7;xJ7loTQX@O3i zPTk{yIWBG-`7tgBlyK^XcnD#tKsOsT8+pM;E4k*Kt3P`|1C{8SjmcIV6jLyM%@m3V z2!zQpu^QEqHYT~v!BE7%&gLr9x3+|&0#CG7HgKi}DO<3O6EO-4XsEg!=4{hf;-V@l zEo*{T?G4q#)5q|TX;|vyqZQ`!=Q---V4h%WD>(FG?wJ*mn~KZpfB*Bfz2CTu}|Wz0fe@rs9Q>#zR1|vl^wwy z-O}utJMju|;a{f0$>ytygvIM^epI^=RPy~1>KyF)MC7h%Yb#$cN^HI>xZXkgaW$-( z(?xu}XDWiG05=sUMa`*-jI=SRTUoyr5n03ZT~9Y)pjCGQ!TY=7z=n|>^LFjKs?6aa zdVJh84?zghA(=E9CIwn_5PzUbwGp@5>oXX-m!!_kCp_E-nYSj840wV}aN7FgW0tU5 z8|dd6;={IQoy8W7+3-|gYoMP&(-OL+UA&}ocmk)_Sk!ID8)GE{+b_9v*(ysZsq8c4ag`C-i5{7mmSM8Uy`H z-gL~6!wDux{Yh8YFj#L~pN{Cygx_E_ZwHbi(=mrwmxI%t)DzYmx}tI65C_g?x*o`i zx6zk#3IzoAkDPxFI!Yw_%`;`%r^wi(Y;NkgUtBnTxCh6!L^53mP(%E0%Ma(jEI-p( z4nbxYFwJyII~^8X#&BxaSjW4aU`3CtiYY&*z9Bu%Jj1KXB^!2jiN49S{$YMVW>1-r z7&d|zh-C^gu0T~VTX6rdFupAdb!3dB;C(t#yd()VOx9)N_i(#JV~Npgrx-1UzJAz5 zFpsh_8#YFWG_4(PvmGxVcxc-Y7u}76)prE+H9>=!W*+dgtfvCjpM;DHkDU(x92O13 zB(F{*hz^oWlAC^UR1Bu*SFU+D;s+9JfSLfWeA#7@|C}NNT*GAa&}XbE$RPtRzQb zaRX85Pc-RP2IBdsKukQ2A!n9~N-R(kPCqh_mO12HZzZ{$@@e(*^hm1CkWe2*6MjsU zd=w?c!aS}rs$k;taryIv&8CQ_3!@8-Sz?m4a}mB3Ht)J&?mxT z3h)o>O(&0x7p(u_F{yVHsjAP6#oU_Zdh$}H(D62dh|2ifF@b$Kzqu=R#o9 z>jL6arJH*uC%|V`cCW)g3s|q2Ad#)}fSiFHtuP>x@U0l$D3N^{L6}?}b3>=kV8i4v z*u(4M?d6w2WuqpSlbaH>(s2Q=`uzsyq<6|)Sf9{)L!KOFLlvpCY?KOVQQUU(mCvU? z-7fJSTLHaq)tr3D;Ic&AF6a+Ec>U|~;Kh`6UXA83<29`U;Yo}FtEQvX?}!NHUyxB7 zyMd=mpMHUvdko$OIejVt8SP@YkRhMUr9G-8Ikop~Irg9SV=Qx$lJjUF2h?U6NCtIE z*HkS()auq)%6(sX#w_5?*r+nZLRd}V(5Rx63lALTe);yEy}bBdZWDjXMAi_%&b$D} z*WlEKf1YqUO`WVPQHH@u&nROdZZ%qR$P?-SXR*|#`)s-D>2S58g2G6#c~h-st=F0| zsY=R8H}iOsIrt}*Q0|Vivd?*f=Vco(sHB1VM{rj8a75V`2259Hq&PrAZRy5&vkSu% zAj|eLwW_(Ygtl-#oU2|R3q=%GL@Tch#1TzKQG+*PEvWcbP{N!iF7c*JDt-FPkke4- z0?tm>Vy{luz|h9SLGQjtI_oZ?EIoE#1j#)CP9uv!Y&EsvD&rc z_xDtclqO7I!gp!qUyrGdLta`eBYtC1q7I6M=a#PC#B0+oBXOh5qBR4_EFp4br1Gw% zGSw}j77t_>T>3QgApT|aut;-2 zx&1Gr(=L*m_W`^29Sv|r#ZOPn<_8 zFGAbf19pPm9@*6`vg*v6q>XZ4cY-*qYA*^bFx3Q1@L)n_0i%C{j!k?2to7Zmb8vAU zRk>Q}7ndzVC9_e=ne=n-G?1{`+nBDd+b2@u2Ey5gZ+4+N@)3cnHPd*!zhNkTi6W<- z%uiVBk6T!>JoD>43-@>$4PXDYSvEg|9mDnt@n`6Ng3Ce{{~!o%1ql}*!04UOh~z(} z>g#M_!$eZ4xt1GAV;ZPRM>S_&2g27_qaUzhFPcrS?gjQtw>Jlo0jNN|ZfrZNe?~qnrp5-UCgZ_29^6i=xN?SY#ijB;Q0_l;~Q^Z0UR-N!+hEMGo3Rk6Tx}JNjBG zRa8NQL+?~kFcR_B%=;TXO?4f9BDNvRdrQE0MuXD)Ksuan3x>Nbt)P#q0CLc&Y9w~a zJ>Ak0?B`5QB1Pb$UQugs<2(E2%P5|<3&w2Ut>!A9R`DsmmJ0A2Le#MdVCnczpFqW9 zrRJY_6N&8te#JQG{6LQKrdG=;+q(L;evS2FBOAOikI9BasllJe=W{Qk0fQZUsxjY1 z);9asdGn4zRd>*dOxNraFj~_wWq^u+5{1Koa#W?cdYUd} zk0MeWSJ#0z`N?*(_S)mv?m1AJDYNR_!DZ&EoE}n?fz1QU%IF89b(dd@4EC$ek4nEV zWr+k4eY&r4+Z>LWHdS`jjcu!j&BhENA$mn`w6St=KRjn<7{3RilT1O63WSt&~wO$$+ur!w3*yo$X|qO2tZkEIRf!pn~{57~k>_0FybP!fvJT5P=po%kD3 zj7Da7CW^W&=rs-vt}94IAg{~jI;p2!+>MViO}@%H+@nR?6$x9iqO2XPk;Y~LQ&9Dh zbLZ?)Yy}u>Rzc_63)W4VnLo6p=j_ooy9c;6zw4;;f*I0md0iQx!26<&9>HhS6-;EA zfcVWb+5sABIR})HqNo{Nq+0Q=R%wsMVTI*FqssJ**EoFm=w@{!RG%|WdZ8%K0D)ct ztORz)!4k5r&I6PBlu|$wouaLO^^7!DR8#kJS==x%g84n z#cnIF#5w7zki00k$2&TfSJO(o9j=8z<;(rND-ts7~_GF8EU*!eNk7xXgaT1W_ewS#GD_v zDRRYjzZuwru7gAlEYY+)JFnkr0=2cJi5y84hR%=ANE($zYD0Afmt*-1CR!a=eJ95D6^F4T}%>LnB)InXH zQuFubD#5hT;(I8wy=X{WwyIi{YpJ!`%w~&{#o*xRiQU>L>{>JF(aAR|Psb>tK^GDC zKh_c4$?-ytwW%pNj&)!ac5ym!<2|(_EG9j3OSijVVQOaf8@C=WwkbcV2s&6$;$6RG z3-&OLJ(0)_zRHr$`sR|9&*-LB7^I_<-_kJHXP493t$J>=7w)$uLkHU|vlAoa>`R&}<@+YSqgtmYqb$R|bn->n{;+BTt?4B4_ zjSEL=&5>8wvkn{E1@*OqW#7QZenjgdd#KvhX$kx5FLt5l*{lOzz-0R5e&NOnHQq`! zYjrwID6@vidxmW~t~{5|9<5_%C6N}#$=P=I0oTix^sOs0q}OX|7xoS9#}QgN z%J0xh!HM1E{G6%Rl8FKk9aN(rS4H>!-00ui=K~wsHQkl>I@f9ou;UFAPK&mHFfw`+ z;6)YOsaX`qKfFyrI9Ve)=|Ir|q_pOYwEYibx?&W~mOJhmrR=*f(>%n-{L@GObZPGO z7T`Vgqy3#dL;FloFfQpR=;QA7V<&*`!-pw}%!xH%cP=n&0mRPt%gN(0&pZA@>Emm~ zIl3v?@D)t1qQNB_e@Sl4rFwYBJ0tP)r>0wioP$eV-b1ioiF%j%b^b^-u7G_ABz!N4 z*2$5-Q^6_FIQdccymiSnNuoU4+3KHNtC~t_Myxs|`Exs@@if9I;aeJAl=(K`R4v2J zR=!Wez5%~mfi;|gG#yq~Paa?1m`yFjT*a#O%33h4#F&<{Vj?ihlf&5CZPXQqeH5nr zB=3j3?sVZ5{3e{)Y#GoKJIP+K(R8+SBFe0`xO$5-jkM(1=w>iROy3lctJm_a zZ8+otbL1kzHKBpMq)hOvw3EE4s`@Z+ggVqPLc;}W^ zt%JV}F&l|4hJr-QP7sj1*OGLSZn6ScSzv;XP{2SMO_PM4beud6oB;1up^+6j92zHT z)+niTvtu#*If-E=5=jZ$+*G47YUy{fks!| zV&M|>Ctn#fFiOPsq>EBQ06H2Ig%q+lJ1ZgX`qdQ?^45|P_LI-jHrH|yh2lX`lHhFJ zG>Z)Wge*c=Sd;aFZ@7Z`0H=dkZ{}S(jA~6d>+)`hNP1bjd@-n4`@s?sv!t^UPLC=nBYmy~4Oc$@C<$YfEgGSXjY~R##|(r3$)$>-Sw4H#NVnSm<|8OzKu=aOH$Bd>*uTu=3RwUPY4K7HPcC$<}x4CmV;WxbLTr%gkvt z2rsekaquPVOf{k!ZiD#sEH zcSx(8xb;Y1kH)SJHRx?tL>(=TJhqAZih4Czs1p1&@bx~o*HpX1uX*3zny19 zS})vL_sogRrz;Dp4Tn` zR9@dpBL;K6mU)TJ@hoTam?h!dG-peg)%wGT+LlF|~c0Ub4S#u?<>m#ZNMVnut< zX6s^t>jr~(g3Ao!V6;W|c(ip~#F)Z~P#@%W0OWs(f zCn2Vfz>l|`j|2X0#qy+)Wlzek z!>!1A{%wu?N5_}szdR9Qd9v85tTR~14P0rR2na#^6cZ-bY}AY@a#k}7HryrhqP4TB zlY<{RI9O)V>5^H~FZ?L_*T=WCxe7TLI03r#h{6bwOzI5clHy+DldJ!lgOA}&KTc`q zB6$Cg4xfmag^GrzBm2MD2ePp}B-sg`2CX;pMfO>;E($bT!L#~4AOEPGk+xopq{$<5k(eN zke}#h3qQeaL+bzg<^SqT#b^HOKj7%tini6uM}B9U@`4!F$*C53@)6)d;>G6fGYz0S z*^=0&(>fzB9&K-ewit!J%+d~`VqL0;sC**>U~eOCnR)X3$D4(`nSSqcZ}lccm1vrP z)6%kkeF=SxXWjwFE`nR|cU`9ty+Wx4$ZJI^*y-p@zt?)zckG~6mg=6{gK#o?{0g@M zsRej}NpE$E>f%zkm(GO4PXD|4@9FlAJ9P(A^REGug6kB=9Y~cgZXXop<~A9^kOEjoMEh?Kj*U zRM8i1qtLQun`1EPM>Xiatb~T7q{v6Igmv>a`W)$q|NqYZzcdUDE+he(Ef2TJOIH90 z*TM>ms{yBmuax-{~zUyZCJ%-E(X5KW8D!Vc$B1XUc#S9eU1$`RMY8{}}1f zw(N%=*QpPBc|zgX*PAgi#-2D|IIsWIkE!_Vf4%}ugfyfVSMV)wT(TK4t^h(^9rzw& zPZTIjX65NNRUye=lRa}wVDP60jp=CrKP2(`i5Be`TNiO6~s41{tD|3oflxw@&8&~f6unTWc4;!NktH(2Wru2atG?B^;gWk z*MhJ=v*~nR9K216;LqHi{DIL!q4z-wV#-I@^l1FwTKISWu=aqhlkiXn4q8SEa@&dU z>_)NL;0Ab>)n3tkQSvrPk!U}&L!wP`g<2iIg8KX@LKZ-^YV)5XVGbU@bk1@h3lYvw zP)UVgPHLoe|3&dI5w-YYzqutwe)MeTzy27}RtUkbSrFQVo|L;3)=6C;I zLYf#Nzv1H%W)OL^G~-u~h&+yyC-!V_<9^{>bbQ~cAcxA(&n~p=Ej)pzo`qoSQ7D5< zR=Vi$`VYCp-DLkWAW0P|UVlswpSaE>g7H z5VXy@GCB)Ft+3Eth%5J5eAs9*V1dGGs^1G0gA2TXPkteP@dqJuO6rWnhWAk`e)#*vJ`=oNUP>T>nMmisXkdZV$~DiO{~CGgKe9zl6q$q1a1Ejg z&XXOlRaHe_C*S78Hwh3ML>9>Bj=6u#)#=&iilyeS$eDr9xaa`&IKs*Xa;9=*`QKrq`NfyijZcnURAM)i9bmhu1;5{Zy=^icX!)8Ak!S+$djc~oQ;P zsF$zD-$&Dh@SZw%#UGQZLBsO$h||3|Cr)YrPbj1R%sm`*&SjYodT`yi)Be{jPQl-W zYcM{*G^HmYOP_a)>~oqOn2x)QQQJvlqQ*&txdH}~$0+ikYrm8qkaf5H^ql*bY)~GLXW~{k*uyB~Slx)Komf^X^Bhq1CJ|V;U z07NLS9ueOH>IckSQpW)f_NtuUB@Zp*&+}xzT*B!YJPXh_!Zz+_on2-uGMVj>6xa~O z@kQ0-795IlHsU|rrbE9FdeCKgzyNS>owGcqw1XE}Id$r^tJ;W5Y@Kt@ufA>?Wzyst zwaQBDCXdcxv}pgyCQ(84BZuhDyu5~@fzspM_%=0cOYITk8k_GiqTewEDxJ9guXF63 z4QyCsdE}9c-VYGxBV861kVE|e*RLDL04ys5W@4BlNfU_eYDafZXg_9NBT%gm|6Xzx z5yH?yS~}gaqbApj>HBX)wanq2_*3b&E=%Plb&y5z6&#nRcF*CGS^U@BqAJK(fwgt7 zv5{OYy+?rcL84kAzbQ+4^d6MsC**`g{l^+fF=UNW(?-_lv|Ja=aDE$!TNLtV}A!_t1U$0X6-UkQY`y~d!ZU6Pf(Y!|62xb?nYfX38`E*RJmn2(W>*r zxOS5y{ns#g^h`cZk6KSJy={YVfgE2?;ln%yr7M5lz77P*#sAI)VES#x zi7Mg3CHI)Nb)m%xS)|ObA*h$HoEdXiYpl=2INDZ#4&&wG%c$ei=Qj`LxWZFz^8YJPavA3QbZ`};z^_=)}AMSQ%HOL^gG{5rJ9I=UD4)TsEQ!H3^w zLE1F5D@^jxk8AcHGwzVO_wOB<=(6}>kc13chQ_(F+#7w~b&B7heu>XrT+SX5L8o*6 zGR=47sAA-6RK?7t&Fw4Ba0&#;BFPol7>T2UY6l`xZhp*kHp3=Sr<+bXI}g7tem<8_ ztr@bR(vdHKDPyydfQw$_i>fx_fsO8?cv&&d4OWPCS69C3VYs~77-j~f$1rTxWy#^y zQe4wWn5F*s?E|N(T?H_`3fpk)cgAI<5^qAbGOE)Mt^cyln67y6YeTxAk?19qtaVG$ z2TXX`2^9?3e`e)>CDPIV6g?Uf;&UdG};8b!4zPI zWWX_D82YP47leXjrOlGF-}yc0oZuQV(HX>icsQgj*ZpNnZj}N0AqmziJ!#J@_3VKw z?H44BM(NK}K+<-4_DMbp;u-;hO+D`b6=LFI@~9XKI^{98bw?3;1*Hf-CrpKlIva4`y zfsbrfbBmErtvYa#@Ca*KGq*s(7n=3R%4I2ZX*kQ+@SK2*N5|i2(!H4ZmnHt>{?AsQ z0KO*pLn%sG)9nmA^R$jeaCPX^?D*8|l8qY=v_#N)dTeR)Z_i#<+y2zt;hrcs}&%49PMMl~RgK z?998URpACc?1i^qsDvM-YGrI3 zN2JjcHP#~Z#pp3Y#u^&Je&IRWT?Vww5<#d0h3X6KEtB-uoP2qS0>W4$mEXomVG&fr46QWm>_7NFrFG>DDrt#UAg^idb)3MP!2#2(HJ#pzMER~()@|lc zDDq$71(^LCph=aLgHNt_-pu=VWpMRSkC!QON_ce_NPd;r!)gkr5RYGpHT>bF|A>L;^kX^Ocw~xzd~#Ikk~)B1U(( z`zY1TmbU}EhWm22tsx0g&?-_C7?h^prNjC$qcU)3A9M&cV6&Fv9`oF%lGav5zl)Pd zKV59`>7<_2QGMB%(Av~z>+C*!vKvd}41Oo}87|e$;^02lDD#G4+p5dnWbFtZcO7}X z&jFQa%>mWP?>iuxrIhk}!L?A&N*Pk*Anv{cmcs+(<)uL6g@9>02uj1m6_~oe z2Lc)~7$|_@fcME#;xBxlyZe(MX~(9ulM17Q80bf$ozb47)5DddrU!2P_!ZGTF(}R2 z^a|^FB~ODmCTS6Z-^p#HzN$%058wDfDq_$CCSRX@yv<|&)sjbp&>)vo0;>*x6_ls@ zLr1G<6lhV@W}cSLM$dwD;pgBVhW4x15FB}|X87w3^-5OD@bs1WIUKrVy)soF5IqL*_0-wB_ItwyZlG1EL zYE+XULwj9ODCHDG+df$gGa4knq7pnsAT=DtW01o|xb1g|RNPdC)}l5_K+hWUW8NSC?An@k7u&b4YJ{oC!_a1Z)_+>_=Thb%je9 zVGK5zWm4Cxpc{Tk-Z^&1irdI0rUf`d4X(e)!6j?zyW}%R@B=TEpzaitGOC&l*}%JH zq=`mm@+9oFt1N*7sJgXkNpP`O^bx}dv1)10OW~Uhg5B8e&3%?E>i)~V+Xa1cqJiyA zyrR$dD-+j11(U?d49`}xG;A;lcb#^@O`7NmC=c$Q@>I1)U^g!Y#f> zX^_5(=-&G`80vk9h$r%>%E}9cUxfpUdM)L@?VEz)I8%SEFyF)ED~%1`U;}?|mxo)N zbLn-X`Y};aPa-*K3yv4l)N0%H%7orR`ECXLAmMl#xJNV@3%I%0qPr9SAcQ5(!GI%P z(viggBYR0Ik@jha01TjM&O{APy#)P_3X6VX?nJj}*|3U7O(8ajB;mr|h~L*ilU)V> zweRzo)1L-;cL^#4i03Gq!nASj9ncSEgo@DAGT`DvKg#5V2zv|qcS%#wHw7V3h4h-I zn-1HjHORTNuu*N*tk^@Q>co!(&!o{1Kd-%Rd)-P7O9v&FgA*Tz*R~-G4a|OIlgWvkDbyO9pJX=%!{sj(d z&$?Fk54+^}WCTT!2nd$-MmNB#J4A$rWcs1(5n-DCg4PIW2@5Yf=@7;%pZ0eBA>|ql zae4p|5^{P#GsqeKOE43{b(8OKeZen>LgXQ>1i^m#mtYz!I6b93)aAK>Cz=blKotyH zm?5|-^v6I&%4=m6(rU(aO)N0PRz}*{%e7Zg!rTDrc=xm_24#<5wX6+-?YGnmYM7!> zVPoawhh5wY`DDLL1Yh6c%M@WjUyaistrtuFeqnT21X`H>wT2$f3=VoJRn>cVY!b6g z&R4z127?(ZnQvxTaL)O?$;-v!#>L@>K4RFYkxx6iMWH1$@3&fXoxQ`Tko=$=8`_@( zFNs4ZRs7*?RLc%8HP_i1aFGoJk%Qh4&=7$b&~tf>SsbIRB4qt&#r%-)ISjP{l>lDm z2eL1DXnnXBcsbBjPYoaAfbX1G243dL7rVqjrTg`hZ7(w0z~=VOmd3138li`M+Jt?9 zvlr;VpPHmmAq<-s!>p%8hN3bp8+GfpTK3rEXmmN^DuSX>x*wx_-C{JM*S?Tj0}t3T zV-DTR;EOrfr{f>?k8=@sUs#IN?-}@slA4Ur#$cFPy;%lh`o3(-VBCT!1+Q5t zraLdy67BcV?ne=_~-#c}pwdivZSlt{EEWN6Nzy;DWHA^)Bb14tk zxDAd61ec`SYu)d!=vTIbH_LofH!<3iXdz*DOeSeqd!d6n+si&`Ym$E+PIUnRWVrfF>{-bAI&1U~!z2RdN;Eo(GLN#B2fSOI zEFw;;MM#k`<~46V-*;n0Pbm!^mN)e8X6iCUQ^F=V(`(53=t zvQ&2?CY@o4L3O+)fmB4TSfZ;(^5OSau*IC@F4fDGEXR(&C+mKL?@*K4*}XR7e;slD z1ixV|C2JMr8n&wI!l!$4bqY=7b}Z0tShsb^g25pc7J0EUaj3g#9fm0V@ndS#EB5$} zs4&f=s2K$>w?gK4D`042|8v+ptp|Oy_cQJI8IGW5e}U#U3xLK>xKybM6t92v{J^Da zv|^9Q##qNjYU^%Al2yO#;Ie?QTIplJr(_>1KFl*nwY~cDdh49RXk(Xz1Sac2Aful= zDd@~^VuNG%LAGzWekWJ3;u7_cY(rRy9j9?hE7n-w0lG&PD!Jj+kC7TH?8AYnZB1kg z2YQHY8itRIN8tK#QoPqV-T8rm$`0BPnsM5hW7Wxn=;n#y@CN$`SI6E&{=RHXTl4WH zbx+z5&1uT|+m&_J&9M}_3?n_1woJd$tlbko4?NGYC!r?vTOQKf+dfmjuzsEPo^HG| z(=d+%XR4F2G{hI^xiqnFJFY@|;HIqVtc$+T$)=TBoUzZb@11dDtm4?r#sK(JMGA*K z**?4q92x9AEXrSb$Tvn90nCG@EVyXXTqe(^8Yaf=V6o#B=nZG?pC*N`P{WB4qB+7$ zlN!@4I&=1XIp<<9$7MuoppAb2JuqGr?(^p?iJ)%ggn|iMkR02&Jw^^Apq4XL90i_~ zA4qwP=jxk72(5eThaw786-Ga-O2cQC;K_|gI5$b(PnCm*)47TqDZS8XD#Sdik z1`8QjOj*nEe$mr+^6?q4U_R{pLBC?lj==ePdeLgTi#Zk7L3k#v7KQ!fKK1C{@7ZA3 zE~7<7U`MI(7!pf-XP?`bH$Lj0qyZp(TmyU>R|ykuG57PCJi6u?SYG}$$d}mhncr^I z-R5@JMWF)blNHa5GbHj;|&{UnN3t_f|YdO+BYKCexpoEb8e;s{Y#E)+A8VhI;nVn~Ga^B|yi%!(CEr zf^EPqdO&jE?(ml6Ws2jl>#0Hr5ql;Ja z?5pqrT%#Fp=i&XLz^PtPwYTX~185LSf~)|Rt%|%AUO!{@={IY35|sBOogT?1(+21# z`}Hbi>wH#PCb9U;LGTULa4=myhCz>eA#Jc}$6(fzv-?d@xj%vz*^uUrdl`adcCd*YGz%A+z&7R0p~QO z1rY?wQBKw;CQ4=o0#wkcidEA}rB$3YA5LX@$7tLg6EzoPKP=ECcT+z;dVuOpLn;q? z%=*?|p&^5%GnXZ!CcRp93Ey%%7^&Rd+Z81q@0O3S*;hMXd@bD*VDWg8GrJMfkC|RE zY>th2!6Eh%a`xr^;pFv1igbr(d|M!CG-%zdpRHL7|2K@_q=Yc#tu#h112Q-Fxy$>u zV0DG0CBU4ET0bT|j`~;3lH4+FWSEHONFK0w)6CJgcQa46hj<2Q3?3mMa?)e(CXE5t zMSluQ6Hz)exkN4k@H4dc_m|fQ>r%;^-(R~lV&5_fw5z13nL&)pzTHRl=z2_38Ck?j zk&vPBfyT(ISud58X%6n`Wqq5UN?om$0xa-5Q6XkM2cIV?Z?+Ta;o5F0>Q>2jlXb<8 zWYRYMTlAww@ei`}7gy+Qa4zuZ<5Z$vR{6;}6|er{MK z0^?zyZYN%`8EFc~#T#6pX^@|+Mxn)?5s}?1t~k&t6ogE^rEBJHVZ69?Hd1qV6Dg1b zss9QTx8QSctp#6DT`1$eLrZ3%ROliFUHb$nNNc#HdbGw+%i3G;;W!0K7^;6YxKO>g z<>ozU6tbL!!*YWWxnNn%81x)EyAw!y3VW&{oi+ScmM617ICFBx!)wW6`BL@-I6Y1K z_3PK=vFotB|FriTUrhtx{XBV5If>dQW26dSe}g@T0rV4QeIn0pl`HIy_-=|=GMoqn z7C9s3tCIhu&)0u><)@SXj;}rZHd3JLRsvfg@;yUVu3uX+vK3ab`0Nw;^e@iS4_r9K zCwRA5rqdX%0W!8LSZ4LqfqD7XO>eOBwF=ahbJw3aItha7hOJzCNsneOPt{B#u*_i7 zy+=--m0>!oAd(Dkurv3AD!CAcZqZ2eXZZMb!6#_^bBeP@wePyKETy%Q!@Z?AOqZj~ z{buKJVb+IH();hF>5B3(!)vgf{OCN0=;dTFkES*02b3|3ID z-=eYjBJ_vfa0JJsWtkaoYvabcd*50;VVaA=Ng_*v!YVo^ZGI>fYR*fJpNNeWQ)3+Y z<|;!EmX^4=@5?hTaZiJwvOwM{g7ViU9x9!rnsfXvF#1P;XWt~7NyB2%km#eEwGVYu z90$okqIri50AXwbGdsW4#C>H?Y*%zV_d6+tCPcM_@K4bp znQsNHm`jD4y8icB zzQD}u!|-wBT1j)yh$-LE(;d7cZy$%(;ziv1Dfxbe&Ue^2hGBb`IqoD!z!o_qkUjq( zm+l$1Ri7^3#!So6f&Xo`qfo(O%y0G|p8Ip=LquAhC(-i+AFpl0EG6H)di!tv#^0yB z6p?1}A3+v_D%nfAIqR0AGxq&=^ueK`W7hq|I zA}aUSO4FV@YaeNn3O(COq+vDRr;4)B19^th@a05Wp+&7+JVw}MblRL<|3v8F5JG)b zUibXqIH_G2+pmcPwg|gn+s^%aOzN*kt>OEoLw%7WYHRkK`+|rsdc}jh{3Gc0K7%<4 zEZ9)qR63XUGCy93S>{un`ia`QDQtQ}vSIjpO;^4;CWY~A~(k5Ltllhe~{hdO(c zilhJ4V9Z~=j{3;8x9@*W$>?{t?k;_RHeBWH`6^2lw|jEkwQusCtk^0$(<*?QG; z7w9+U_>(Lcu{S5vin%*HLNw_jVTJTB0hdzNZ9{x~ymxlf7hd6@%hs&RR-FU1{Gb?Z zmuZ26QcJ^5nm71($)gA#q54(}kER_j^j*TM1%cB!6rI)B^sUaFSHf&(_UJaPBQivx zb7|+CBct(xk)Tq&BISx5sXk8qe)ZH}lN~k`oT#5Se{481=&P(_n6X!!MdrBj-_8Nf zOSu#*UVSlROR(AgUSp586PGj=I+8ae<(s7KBOt^^i4L72_2&dl*>)4}kD#|y##dbp zPkLOW5z!6u^yLF}We+(~yWU+=kvU7!{D^YMyvB%@A2qM|4HYGP{*E*EyHQ%YNH&pB z86Hji)BdDf=*aoXMW?XV**{bO*Fd`Pnzqk(>>Rk%PSBNPRUy(3{Vs(n_rW!jXxnFK z8i520rZM2lXV~jyB4^c)4ar$$4YXVGm3?YPOz4Vln{S453N*^*AR_6Lwm?Epf#!BY ztM5>B{PxX1`>{>+hH-R{LsqjJu46XD1R=qm)BQ-ivK6`!4c*T-74Zr5W<4Vd_@vz^ z(YMkkg7RhYY4wovG}fAF$A)N3H7nnu4vw+J?|<~$3Ud#k0bnV23*q#cly7JyS=n#Y zc!hb)dhlRLMPDDm)33+8b9zAnJipPow%(d>mp9^Ux!SeF(?tp1VTX zHU6y-Bk9Rh8-Df6#bM?xi$j#qY$04}_ z8q=1LICv*S)&i`&>tMK0in`i7l+bFLDX-DX9rn!c9ujy#cM=MBT3|GuHlZ>v%!}HO z5xq$5Sgan;;R(zYAHKg1EzDRo(u%QI-x20HdUHj@X7PB9lV$c(1PS7{Wt7L1z0=V5 z*X0P{-g5i}}cC7}UIIpSX-(FRfimCsOoVzgjiyIzZ)vd!&AULR2< zIS8Jf9Y)#O#>gm-3YKiz56#buW`W$CMBbJ&CNmqgnSby0o(UTw{(QKb$AeOrc(iFC?BczKNL;=f!z?|1(-{*w0)G6*=IIF}wSKXd*UaAE zbulF5Mx2-I{Nn$z3>U!9$8h!iKa*4cQ0OwUE~gLe?5cG%A_(l<{Io4CRi}Mv;9cfq zY$J^IXp2Z6!ok1<;)swfrw^@9eKFP6UIaIMz zhZecHY|#h)AH~RT-2ZL#tZK-(L65UIZVdo(>M#sm5X~UK6Dn&No+M?(D^Z(y%q!dn z#e|JnwlKO%%Oc8SB03{L<%;(tB-ry0xDUY`@HOIMpux=Vgy+C8q26UbTF`wkpeU#AyfH_sWwSni$AX-u3$oMt9k*PhP z2;YZ^eS$t_Kkz|f(VX%UF0kY#h*bKU+Dau%=))`A@+*0I-l36dwCEdoUy3-*&3k!g z`nyn1kC+oWo9eLQ-Zd^lxuWpCdG z!kGLZ#i1miH^{3L#}cWAtzV)AwqWOI{7Rg3*s2RTO+0;!*nP|lwKIBdzaWH?=Z|KB z7B4BRpBzjJ2~y(ieP)w0o?ip(MsxoCJCz2a5UUxR%Nr>}(#$f66~uH_uhDm)Rkohv z_GTAiq>`!m7=!+=Cg_1SBtN%UgRBETNlly*=0pPA4<$+V*w(_C85{+c-IylY zpsi(Y)0bOe>B=tvFr%I^!1u^h+lg&F`I~qcknrLM@6OuZs4VO<`b9#A7DACnaKI-kBVQa@FNM) znG6p{xZU`-MBjfByn^arukN6;(p3{E`PV8)Z+gmi`dT>tDxpw~8BPM5vD&HHEvQv% zH<#SwAoURswrY$Ctsw@N++#_((VFQd_tnCr7G;<7#v0P4!-nCh#S;0>T!+#Y-@(ao zFewQmm}k^<`eDk(yhX&m7@XBhH2jwU-?L(~;7QP5%ZI11p{m1%S&Q#&SznM>x%4;f zSFxgy=;HYtH*a2s^!hor(c;kgWrMrpxLD7$6n&mGSQ|Z)n7om&anEq|TERBcgFMWg zheY;~>1*cU68f`%Y)AO$C<=eF%tS*z-B2mJCkUlR{9>3h=7^iqE8vV)fRFYWTt(3| zNY6)sFF{v9XSKFpYO_T9qxkjqFa$zYD8XKegO#H!L_4|^-Dy!-^ip8zgJzQ`4-2tP zTi=mix~+v@)xGyMP*^Is7F=BB?vg%(xYIJ6Ox#H~b+M`s4cim1AsQSPTdAhy^nwtD zCfI_VXJT|7%Z)TFRZiBnG?U||f}4c^b>_;`*LCL5ZuompJJ)6bIeB*>9c#MPYFwHw zOebBzohjl%t>`KK`77Z36j;$3wQX~I!X>B`%mm`2riucm`^NeSh*}GM%#zkZBw-?c zROOXIL<|!wc?}z;JJ(OPrP41i`69%{dpIc{7pf`Fp>j+huPD^C;a_Tw5?J@lq}M3I z*k+r+o!Cwu;Z%2J<~sG~R_D{tlW#VNvZ%CC@zXQ{~B!HJ1H}v z;-8IpFBbNq;@Hqh)ekXjF=4BFB8SX!!Udr>%mF4!Os-H79dAN{MaIe+5tB6C$l8pP zw^lFep^IkX=j@Z%8KdGwMfsP8>S%$)}8FbwaIsGqTPA5P4e zJ=@Jvsexn3n^dM=x+QBcNPhC{>8zdYM@LKaE3{+HUXu5+^C~b#>>LxbezF2S16fp- z9+YVH(i+%})71589=HwTB+Wvx)*ZuIGo+UIigp^S_swXWq~;6IH6z?63wgfv3KK~l z^T21HI&v+#)oQ2Zbdu?b#Bd(1#P1=M-+CJ_{#`E^t^G88z4~h<;LId@82H!=a0TPt zC%}C%pY#{WtC{Cy4S#cc?*a3gdc!8zOBK(PVGchvw=#y3=?Mv?#n9Q5O_-Rp47^~v zQ;<@2qB(cnETS4*cR5{yHX8T$3`sLNWzKhpi7rksQ7LkJ|wq=;@To0vxrT=sq5%5Redl- z(*P6(>inBbvAc}8RZ^$g@kOr$bub&Zn0@2|xz{^L>346C&RIormE$>@!j5|qd2|>-As3V47I%8W22qUixU2{V{Ovbr?jGYhkYfkDCV0sM zAvJH#H0)6l05ce^g0E9_Z>%R#dm54NruWzXUcQF;p!C1*Fh&2 zml8U1K}7@+I`X5TU^m}%j5KP~LiB~yZ!sGj$)Zpt54GdiG^;k7mw3Ny#=o5K9Vr>0 zq*EWB0U3K*kAHuxl+dA$O}ujJqo82SIa?S_;Z(oF(b%UBt0f4NCEx0c=Pk1{j4Hgdw@~^?mK)zgT#A>1pn4G&}B3E zbeN>HcDmLL8mt75ALsVdNYuKd^b$7PuWJX4%agDzg!L+rh-|?t{a8bKXy+>w}1RMB`~~qLvj~ zixxiLw!I1qRtE6aUF4JpNwn*4+vy85kL_F>TLF=U+L5dVd3Y5_8n?XC^gGz9Sze@| zoe9biw7%1G#mpD7`jNUO`JGxsPV6t>j5Bh`ePCH>v%hY6~uBmStr?kTbi0iEV z%ZOir(jS<=5u^!o(Tv)kyTbBQqDHKulg$eRMmj)J)E} zj9uh1O355fSp2>pc(m=Fo0VdFgzeu+_1cq->sB|j>kUl^hn`aiNJhWkh^e`<{u~e+ zew>_#2Z#Ki*kksxfT%0*4vMuoN8g3#NGh|PD#puT8n|?o& z5w&f{Mj5$xEuL7dVRFoCUoG=_>NzD=#qEQ3+d}z+&~-;u$3Dm2!%yob=NPWsT8|KwOERcq^yR-5I zkrCec((%P_a=-hC(@v;^M(eM+kFUn9LMk@qbC@3))-aBdbmwc_G3?0iDzxk02(LN6 z;keEv+ZWN25|0gRJ=)7&-kZ&jtOcaVEc);6iuV>$*)fFz_#$7XX7>q4qSW=2t64q| z&4&EF)ErWIvvVLGAdr61e`AO`0i%YJvr2e`mq;DxEDn?Q3a12FH;De;{Uh-v^SOi7 z*PgGBD*Ajq$3Y@)W7H+bCIVi6iOphD723Sq`l2l~CK+G_*zqyU1Ub zTth%W%p*vvthIfd8X^An_b2`Fb=LhDmHEY$QmCQW<|HqenUlP$!}QyJ#67U6-GBJ* zANY*@ijVJYBCmq;mubLt;OEx7eMae8hfS0QrYweCbr<5xQWAmxmr{837V#Z-k_M;G z?H9iVca^8ByEwM1rPa_JB8>hYgZ#lIT~e?-Jou4zxdE5qh&}Z&V1$tG=vG3)#Z(ATquKoq+BmLhU6c2G zvW-g=IR1FxMfUr7C;=K+Tu>sezl{di35avISG-C4k;TfIeA)>hL*(39ru*tU~`cCbm@7VOp$32-YeE=7+n7r`TN{Pat0 zaFoTeG)-Wh`qP?om4?P!e2;%`AKgyh+af4bHL$pT4wc7;Vxj8>= zyVmCxTm#dlJDYM=;IQjifr}vY;p1mZ=ljj znWyKIo^l|0Uvn12=4^_51=Ia~cY(V>s&C3Pq(y*C3O~Ww5Ple^xoM>SB!K|$rjL>o zK^r(~r>UoR``F{tcX=1pu_1vr2LP($4gUsK+&>nuH!Y59*G?*TJ|T}Nh`*n)4Kh}b znsa`CF0R1Gzr+pO!7AT)#A)yM&6;nU47{=Xz|;_b&zsBe(H657Uec3EzhP`k$I;#^ zt-!}cnA$Via7iW}!ixhIiW=+189HVvt$_D!=7wF^nG4lE5+ONvK)WC20Qg8ZRESiq*a9?4Xi#I}Y^Jhc$&Dlf=Uk*cUTYkUtwL4jo z0=2n;v$yMn#Z2>G4eBmR!kd%Q8ApkEWZb)r64~lMFUDSD6K{A5QuF#TgdF<^kl;0( zayGSQVO4bbk7+x>J^M;Jj@g<)?_rw{b~f=gQ{P zu|yvyY4^|ZSZ?c${5f&j^Htx}`lIZc?3y;+&@{p_ROiB(HzPY+aOa!9_{m+wh>^bkA}$FTO@FG1(ui4z0AJqJPm{{#&eF8xoZ ztQ5{UWn4qN1h*|Y58Sp|oz{K-p2BMUo`Wh*6UR&ixA?y+7*1}*+|+bKE^+2Dr(eMP zXZe@#g=A9ic+c;G+hx?-UPz~ZZ_W1g-2oGkS*X_L?Zv%}erKyGzum2|Qve)FSf0ec zl}pUm|366ij!$ZagLfW#fB6fAAW?lgqWH=pBR_Sv=M6B99b6-6n^n!kj(+;!Ck_i_ zNwVvMeg_B=sYbx8iwa5V3`{Q4HIr{tQ)1k}CVw|Rv>}D&15cTpkyrGkJ(~hlr9s9K=eCe>0R=JMjB%bgyO^!8CKEqrVKoeHAX#nln z7N}BvOuj%;ynxRtz`K9Jbfh$q-<&9CIcCmVyp*}=G=1nRzUP@4D!{R`flQ?QlXyMe zlAK__*~COzeIiD0r6dYYOurtaf!0P21>WX{wvl*AQh>!vQ)AjOzE(+Fc?aEL*S|;# zU_YB`B7+KrA<4H#T*yL}Ag(h&b$M$XF@eAz6I_m{G9(3;mqZfq`K?Dnv2l!+3c2qPTNR>}2*$2na)N}tL*A+eroJwX<}kx#uY zuncT>(zTTaf>wuWFFVNnWktM7)Ek(bD73PFtJF)WH#CCW=o5UcmDs{J z8;ax+Sh)GfWG2U$_|Sr|bc+@5BhMS8vaL92%|MB+0XtElJa_fRCxGyB8F;ip-6%b_ zSdmx%o=Q7lr|-}OdDpQ)3 z9|KnB#C1H%I$>?6sU;C78XDy%;Y=pfLgz}qmIB0%5+Z`qUzz(?T}*e-ZJtCfr3`Ty za3#G%-y!N4KUX_ZYLc)Nbm%s2TYqG4Uphiv6-`i!_T>?LY?kQT}V`w87IavD*{ zo9pk_x!!bR9d!Cd#5f7bpN|K zJ!V1d9*wV58sUyRZad`$c}`NOO3ZUQ4?~Zi3P_9TRXw3AN+zfh%>5Sh(9j~HgM&pD zrHDQV$CJt3@RF`~G3-|tn^7cBcrV{$ufUECUpp{y?Yvxs-0z1ZEgJI1HT_wYFH(Q zJMf8$7Zeo;#jqq!m=}ShR?CJ9uHp|P z;hEW@U`1x%@`TP-QZ{ZI1wiO8Rg1wJhhC|AA$$4@w@$#bZ3Fv8r(pv6!#Vfd8EDLB zWJw!2)*DWO5-%s#gAFF)9IJ%|fP1Ac+qz*{yZ~Rmw5tg!#z3!5M-&RWz?ARb29PM!T!8xXlx|d$!_DaA}opVA*0B{+nD?{^%k9VUw7qHhTB&-ZGDTqyZgH z9#Nc~XS~{t>z8aF0we}ZrU|kfw!-386~(yoHkkD)jogN;1rE%8r8VsU8Qak;f0v;S_(g(P%v$QA4J27s>YUMI|Z95B>RK_Sr(w#zwN7<5H#q zONmlJRC4BTh@#ybBD*_H|23S|qgRYKs1Kp|FwVHkNC&H4kT@v0btV6J#DhNG%fvCo zcvCJRkU}~46Qun2_x~+6Aim??6M67|Fa4QuG8B`X#>S(jrs0RR;ZP(Zjy+=_=%?j%T(k+gog^_y-Z6}J7U1D@v{aUWola>t_$!Gx zjl$&&AA`qSWu9t>l}Eh5tju~$#U)hV2J zW)3_TUQwg4oNN>zyiuDK;+9-~2}>o(j5{UGum#pU-H@8+L0n{70MBU^(drGPNYQHA zYse?;$CTS1f~kR8Uo=yE2VMAcSCOMcSDxx4#JlHc`PxXwj;AUl36Z8<0s@aKEk@&d0$3sx1B)gbs zsnt{PG(IC1WFLq*Z-wZ2T|1@3NC8cx%Iw8e*f1(N04>k9-&vZ0qO>hC`wdB(gF%ls zmbjw3|C_%Y7OSz38P*}qR0mq+e5Y~XvS`!M!N@C$fUB6lEk~pKHN=&Wc^y6fCsgoX z6xDKO3%|gXmAtZ)46m@&*C2;wjLonn5O(0B%ScVqme!OD=JhS`XaHDv9$do~tkgR; zeRrURpffeE&q^RrK4&OK)64~_^@F+srywZAizZyvR9PJBm@C|7C5Lv$PkdLa^m z4O~hS+H&1AT}Gr)a}ddsR2cC!JfDCq`{O{N>H~uC7A%@MxrZ{$aG9AUd}`aF7{e+# zh;l@rhkFi>GO_t54P3{E;+0;xaKLi9AE(lr}Z3qcA0-afmbvrlw)w<+UvxV`pzV{<+@p zM}FjFT$}<{CIQO{g^Mtv0j;jh;4Wkj3HG0pDCVN=-O>e;NFKc31oyvRdEf2hr@8X+ zg~@1a)s&VMbpreI>lT-lmJR7U)ssT!MXSI(d2(5=01zO-AhN+{lJ!UlO67=g#Y>v< zalv6hG%gMq^u1k2G%exjvd z-Au{L_4F{!YcU>v1NF+b1Ej+r$nglgnNP;X27BK0iWUzL*w>E?hFQ){t4{tMCP|B; z)nzWUO%khi3j)7$d2Qz+e`)K-F>~Lu96McUMkSxz06zCgAvQzQcXrzt))(;z5UKCc zQIDC^VaPB_sg`xzR%khBrSS2QV&%E9-MCI68h=G9MB(5qRL~$)XE*54Grxj5T%}s< zxDXx?(V2aM0R;Vjb}3CDvIrFZfrN-|n*<{<^p6AFyeE_#h8FELZDAT2Pu2WG8E=(| z2N?8yq&qlG7voMT1-j{}&B@O*$TNxx?j%IJoa4gqr#pV&_moARJ|ce8O7=I7@y>Nj zoA$hvP&{da9z$#!OiCa}pO__r-4+n@gd+hS4x%+QSbA!7%e2jBlo_9tmPd`u~AaQS@DAip@AeBJ_Kd>!SJk}nJk4n+6G2a|D4lLV({q$)eY~nvXR54 z@Zu@1$H)a#dH)4-U2WNQk!BLFynzWnL-8o{uS97!Eh0A+mUBnr#*L)5X-(veX@UwjSX(P^LtPq%cOzjSb$x3i|Cz9||uwsPearNp=A z)LC3WKcpb9K%f$dMPLhrWH3;oqo5NR(<4)=55{zjgoxVG%ug0oQaa>bbCU?C8vnN5Eh^cZO`g5XKk~3@RRvY(wkVxIN>^q=Vfg(U(jpERVs-? zVl&MM2SqBl=4X(6M&TyOGOCk_!?Gf%ICq`8++h$k9H_N!3PI+V~O26alS9BU|1Q?oJ2 za4Lr?h|SrogL%>d=vvK`KoNx|La4l3S|M&)+ua|}rgs@pWu~Ud;1(x#3aX91E_$=Q z5s|aN+rvKL7NQ0#b3scl8in7OtgpgL3BX|*HmWSDkJB^Kq=T!-8VDiCiW^aqa6*wW z1|R31kBiT-3?j(H1kM7%T;;U1t7QUL)bQZhui^Q{1qyKl^R@IOsz#$LWjF;%^}Rk$ouzB&%8J+{2m3t(PCN zk@V`SYf);{Ymv!G_Tj<790T&Al|s=_l>GDZn2xi1=FAfOI!e_j_4Q;8fKR^Ye~6@L zr5N28b2cTJ52QSS!v(@P=|`{sR|_!AT)jXc1r)UoY+`Q;9Pjqtf~Ho=4!>iMLQ$I@ z0}{79BX^^X>Sx%m=Q;Bb-JnTxK4J|i*elk|K_MfDF?ZRII{*^Fb~5N$wn@gxH}Z|u zAsPn&`ZJU;;A!PjMdob6kcId;ruxR5xGYU1v~LIU#z;_yc>@SSe{`M6(UWfBKn=aqW_IOi;Go?|{X zV=j_RCkq6Rr({uVik7FrfCmM%OCure@Z=Y=K%zUm@rv6-lrr}97Ah6x74jtplcc3@ zbJx1Bw#<2H5Bo5Sl8fiRVG1whWhUm8v0tCwzJ`{tW1j|bgkapvC(iVF+M?_C$A`^Oj=AgQb;WQ%hC>^qHA`Esc zp;J5$W|~tZ&$u-D2^N#?3QbIeeZS#;O16HRynN!S#vvmK)Va=F8T{QM{yi@9om5-B zVLn==bP27Fp7vA^V)S{Ln|0FpMmbIdE3uzS+%PJ28TM;cBG&lCNgPtSl4_x>iagPZ z#4OgxU{TGw4b&~NE$I9S%({$Y?m%5PsKg(^>XTHDkxio76028p<*;5=HeR0yltT*U zXYW$Fv8>oyKvq56Wo_aG3?*9PMFg!>)&Iov^FwOUQA^QTwv~=_fGGo*QB*f`^`bzV z@yC!;GLi?ub%p|YVedsFB+KMPN%DmJ;&K1l6x=@A8>MW)bb2>V6L#2EIreU>9r$*# z%aC2kU8~q+6!IYjUUY%~mwO=&8f<92{?A!Nh+XHeqyj@NxQ=6FZM}eH|5rh4J8Kgb zDl98dn9ZuUQ!%f|5-za5EiBQ8(pvVvL|>=MoWrTVg`0PhUeI0p7;}X|Q^`))_0(j> zNlAKkT2=g}l;nITNMZuSQ+xlS!5QchC=sgaCb`$&8}D6f4FF2^ zN(rp2y-+8wV1JjBTdlTP}PQ{w(n-obI()OilMsNX*R zz4NhJ!=if1hm)dLBFV+#G(x|Td z^79mga?GT9(gUs^KcPVvREXs#yKy}He9VPNX7Gn@0( zrid;{SK7`_j3O671$O#hHjS1iu)J2Q#R`=mUVqUsI{2)i)uiY!cr>H)HzBu_N_X-K3TKUurwxRmHj+( zKJK}53E@wV88#7JqrrDb(!0>%E@~#W)nLu#Wj2uA;^YGJsoThRILN&kpjFh6(9m_m zs(8tGEASr**}B0kriaC*sW#c$;kuB-j%cVO&{g_5SAE3$@`FPEjt9}TZSBgO!>hS-m3=J?(4q2X1Bc_;*HI(#t%qq~S_mo2Hf-#&k;zl=U_zI30{6Wv#iIqJ>)%STL zOW;-$joFPnU59LSnAVn(n{1w3HG(I!6psH^IX4TFJXt;Kw}AiQuSte^L>P%%X*f_g z=Cvz3(0SYXc1lW;X|si+J|gr$)YVv-g%05akHIY!_g)pFO~f4MFj4~*P6q|L9VF-Y zOawYioSTf*i|Op-I!fyxDlNen1BIzWA8-czSPN}3X_=dg8&L`=7Nb^^PFhAfk)SS# z`3bA68!bsX!PuLa(agl^vJ-0rEHbulzwAl6<-3k}77by>ZHG3ad5xeI3}X_?og@CMY9YN2Q{FvQUj4FsveG zr2ig7WYuGy!T37U5fS|x;_YKTfR@VLrdXrp@1?b^)lGC_ViZlwVpFdaVV+NN@^&Sp zaKH`HIHhtF5O`78`<~d4y|MD6k-e#GbUbRxy8qwAm$dH}+=8+X&V~0lP;q*#QoXZv zTr-70%1je8&gdAhoMhmPH}g8W!8DEt9bL2nFos*%k>oe3ICBY zg9Af2F9~7nG(0BqRkFs4-ub(M>29!9=YxK$hJ`8)E!8pMz-P>LwjlF!y?gNMI`{AW z{}J%~kBX-U0x@Xif0mdcwF*Od?8;DuWsb$nQ6S%sDnheAK2!vt<2^pl` z-Yn@(YLwpFIc(%A?g{@U*3NcvFLLbc-Fn)!<>1dV7z=oD$ruum#c^c|JP=qWGRX5_w6g9Qt&K1@tLJax7uVWE|2 zD$I;vu=q?+{Oy6JxppRts}g{3%Aj;7vcQ)Q$R(=C@xp1s-cIunozZ?b-XOB&2xkXm zSvwxEeTSVA=Xh8e^QKfawSBx z!wDL3<1n3qNTEL!y2u@_S0Jb}EJq|cl{9tEnINQTBSVOkr+RvP1yQ7TLUCeA2a!}` z-F=lb-6}w4`nQMhKA%M|K*giK`ec=T(d_&4%p5;)|GoT*N30YDhJ78Ijrq5gy;z&I zO&SIZLV$vU1GfDDcRjf$|Du9bom}uxzzjb!@2vVPp27V!H7U?-k;(9LZkY}^kC7Pw zgN_aI@0ziLgo7L;Ji~^C@k)LAqF+zWB$mr)+ajJYa#D6O);48+)9at8P;-l{$^8^h%*jKmY% z=T!Z63rkhsBN>W;**J?6-TOw@r`IHFp*gzh*%;L3% zqmQvijhB`-p{fdZ{8=dcioDU(pf^1c?Y@%cg4Q%UU(nOW{+$mJpAKW=ToyV_3K^2qr483PyjF+rj-jfB}gFYl^ zS+gTwNnY*gjK%efZ9}RkdA`ZloIlxXz%y3f8bl!p^e^)GAe*hhw2wKPrU7z`N!ET_ zaaZWdc?%44HvlpZAyf0!${FJ%)cU@jZlji%(MM)SmO^w07hgY9e@-sArqF)L7!?m~ z|Irb4%sjDn00z0&q=rlokiWeCUEGZO#I3|oM>3m!g9$87@MWfQ*x+CAW9x+JN9KYBEp{0 zgF6qp%Usvy^%56Vo3!h{qCwm~Se!W;Mw+|F7#1CtwuH%Sj-N*x9i7e?D!4bOiSs)t z5GrdqEmzP#w1c-Rt3WMcQjvnhq1Kt4F$3xlI?|OdVvy<0Aa+dwfh zY7W@(%|xk>%pur7KT?S6jd|@D=GFDYWcJPae3#fAGwIl**jn;Xp9&0{1`6p@WNJAX z8FlH}x1#t1xM}sp=HgPCZ{%O}QcxsPXR`5c$fxHkkSuD`?VM67u&xp&*ISS$iaU~_ zbfU)7(Q&FFgL^@)uNg&^rq70OmY46izyMYNMx_z0(ckg{DpZ<>?D*OlX)yVSmc)B3 zdV^adn!cjH&RKv5I}su7b!Ua}J|PZyopK)G3p2dye;OVCvpxSpe1GoyU-<&!`ymp_ z=}4)IAW~}MLrD3$!|xeMFY?Xi{EBV66xV6v3HYUdz$v$(&JY?n%a)&B^f`hu7aMN8 zTk^aQmzaFFT|KXTxb`H%i3~#SqIe9EQ#c(N4lOYW6ETnLq_8IIRcVgDiSCwiPK7?I zCpaOzZkq+4<1+m3$LYzaHXB(ntY|ZtRiBzDAI=eZm>@KOtLuhVF(%Tj!*8JfPM=Tk zWpo!yTE@86u2#)>??_BEw9A*!qjlmjF9T2}kOJXIOjk*kkRc)I+p_?^`IR58^wU_G zuB|U%Jp9>sBj2Jigqiy;Y!T1V+}PB($j6YuN~R3Z8l6SgoUKc^8w$Y9xNu`G<=NJa zdqQf60XA((Ky0=j(fAvNwtj`V5h-dC;9pha=g;(^8{E&CbN^GSqC^ri%uu^wtN+7S zE(;2V9@-1!w#{;aqpcFYo?K}<6v`@_CEkqgsFCn5@r74-FypnCG4g8Ne?Z{FatFG9 zkW`NEU*y4lHQ8K^7-P7zVb#DQ0=o4&<&SuX#D)`mE7!j^VuQ7sseHcrHyyYGG6hTp zX$EhxL|89Did^~agBLV|$__f0@T~~RNH68c?y`qeG7@$@%+RnaSViUI%-fpFywPkB zaA-ft5O#AL6*ZaUG!g!5ZdUC8bv9N8q&H1Eg)0X!%{3~=v#I<1O)1QN5%+V9JyFv@ zz^39U5%_+Lr2h)R4`~zi2O&Mkx{j!zJ3e+763NB+cHWxZv(au$Zua(GTN zFpk|t(diVZyX$uGfuSPhP(19vScrcE?nst7=R&9ag6@!5dheCB!%5fo3F_2E_nL?< zLH)ARhEUA)3fG2{R-DA>VYXk_#Vo2d3-gyb^8ac6at$rBq4}jQ{jJ?eCQ?k)4k(4= z;9ULJp+tpIT9!r588;v4OD_}YjBulP3pfzG86}((Sgjv};m{Ilo}eDbNt$W%eoBlh z<(8Fbv`g}PI3_`oB>zr;ts#s1m)TQX7jM+TCiU@X;a$yZ(Q6fw-k)ZX{;Nke+9F^S zmJCVFEfCl|NmNp-FXNOgp|x!q-aM$34LKtx+s7~}y&a5y{MgD9kD#bW*nsL@HqKg= z$P8pfZ(di_AfM1pN&y16^V6JMokuKIfK4Z?UQUym-X7V*&*VlsR6rSuV#A3~?4FXd zs6QX4;l*!^>G6Ay@7~8PQJgCuw>H-WAN-*(zNTq=CB&P4{kfRP;s59zle%`y9P}~g z2w1VmW3(Y^{)tmz%!zv~cg%jkW}SbqXLZp)cdaEWACe)+1Ln z4M+37IOh1R?v8=C2%!NWFy(pn>I_T-(kT*BUj2mVhuOZocWtuKZxY# zjN;{IcU$-8WFA`nqG!5dEr@!CmBdRSsKng`w5nIa_NsEJ7-#x65Oms09D`ofLFU(A zPy6k@2idnoks96@9$qQTX%vgc7rmD>q^!Js?RQ@!Qn#? z;vFtEwkJX%SsKbw)3xeLK%Z%$Mn6LKU$`~=u04C)H_Xw-l}Te2o;^mnk_WYka6(;l zLc9^zoOxRZ$ScUqlFT>Wriyddb(woC@Ba>RNf0#OcgGrT>{nH{tT4JeNGeF2Y(;H( z4NypFQDRGJD=oSga&mUMZk%W%U%su1e~J?Z;q$s(tk#)~#QA4mfYp4j-cJEAM&PBn z@%!`+5GqjEelh`8iF+!`!aT}u{ z(x1Giv3lb4xGVoCeL~TGCU0TDp8r%kT2yPBs!wCj2;#8ZWSWMGIbwls&awU4N>&_Q6 zlnS(<4$@pe@X5Z=dEY=C8r^6HLwg2KueX&<1i=KtzzY>(^ISV?EGw%0or3kdr8^|V za6s4AxNXaY23YW*8~qv zLvV*caCdiicemh9qkn&U?|1*>+@0$)PLH~&QLAg!s+zNA)vTwUiO*B2bGiJNcrZ|N z%j!qGZZ*@mO@wp0(~X<7o`zZhP4Xgckq2&)24+a0I%2_+CTv2Dv6?s}T2)>{ow_y( zlDAbq)rNu01eXshZZ?Y~jCODLc1G3T@os+zxxXo?F;1}4FJr7h^p(-51hlTk? z$Og&Zgy;|POr6=6aj$2K@#IF_0Kwr$#vjQ(;GiFU6WumADl#`$V%JygMomC`=0iBc zHYLdXNk(mJnsO!SjF^iN6BDTQ()wo06lKW@I@s9u2ek>5#MH7ej#)5byNvG zg9`Tpnk?0u+6Y>)_1`p3HU^X*h+&>@6qr29UzIW|hWNM&lMgdQU%`=gsE3ubX3c~2 zQ;Ad;d?W;$layQqT73x%15oC6(;G8b8Rm3=4QV>7wbjs&YU#! zL!xxJziA~}13D%cYOha7Xs^yaA`?r7hHb>>2}rCFd8}B5=lpc*6qR0WSra`rnga6} zmvK<7X)ty2>w`IqD&T(2_n$v_vWKF(N&a$>UF15zcSy;|zIw8sIAk+$vWJzCp-}G} zzdPCpv8GrG&*)<|M{{+Zed`5Tg&Ht1^a#8xfILGF1B95PV_LJW-s0 zv~1JeKX+)aKf8m|*tT^mv^y&=d0~}}9`F`!JV@0*oy8hwGQ2mg+mSNKZ-|K1=dn!i z{%MIdJPz5MA<7}Y{U3YVzTrVCk*A#As(quIaf{TJq2k#@7{%kdDiTFD?F8k9lb1W9 z01yOIcUw(Iwz$d+u##9{4n?#v1hD9q=CY!cP!^Th$T2fgqY}~JHY_ZugzpB(+PV$} zIPJJBain;#Rz61yd8MiS`W#4rE!CGb@It_+IBS;)k&@QO#qmAaZ-b6_Mjj&&AGJ6R zQQo*2`Img~S_1~2Vmq#Qnn%p_8JB^6VWLL$`r{S!$n^C*+yMybU@4X@P$yui4nk|V zNqM^r4!9dSyqf9|6>dCq)t|?)7SF;NstSpqJQXC4C$e9p$r~xYiOQ?!XeQ%cwKY-0 z#nF&bUD$irgrvvwtY%xJ2QAKRX2qrz7XK*$Q?fDiw{Owl%A(MqHdHqXe9wNd{PZC0 zJEIb{=73^AqXCSk`KF1*hlXep@(aX=ZzPe?PPQxTDfW{dJHoYl=LsBns4URgvHiDZ zRDY%L0_iFLNl@pX0G}8+#%&{S^rKrGQ%3bnl|mFSB=}^JO(4LfAF>|YXpT=s663?` zl;dHt81vuL@a{YQ|4hQe7_w8WGxoc16hTR}qg;iJ*!FhGwt}=^vY6jhbzdW%lV@vk z#-Q$Ex-}?dOdqFABYaoJRU*9sMf0^of3Wi~B}9yf{Hy_yBytHy9TIqaMmk(Z!SJ!= z3$-gqr=sU6UTgk&yfVBnmth1H-&9eYcJS8!-bhN1Sd2L!Nr*2)qwOZqu*{wcLWBvr zkTp}A#z7jk8UO1(?yRr!7sFg|sYLu2Qu=a+v)X5Gs*KOH?zq*g#ad}Xf3%?-Vn7FN zD3eefr4vuj9eYLBOz6N;lW}^^Y>_F01ZoZIpX;b`--#Ah*(lh>oN|yz|1XdpQ8RA^RtmXey6^7T zvPD#)6hPnFX8QAc_&nurnx5GvcPZH)!*UgeJn|F_lHvq%J+ixyG_eC%DRI?>jM?u@ zmmdLi>P`2TG&Z>0-gcu&p;VAnWy4*mx_ud0vcnT6av0%GO4dS`WLG0PAskllrHJE@yxA&GF157ZP{ZEPpBJYc<4$ z0a5#ZzM3J5*Pt$q0IG5^HI%mt5nU^G+v!}otKOV86wbqt0-`YM?iw7u8Y`N0G)}_; z^#cFc&J^q&upaI-Z2hM3JhMgnMSV}>&r%gl?T8Z;(;t+yVqvk~*sN2|G{K~ehboOi zGM^I>A;i)oPtC>`(H@hBZ>ysj#DFW`N}&fxDn-7I0BY1I*e;m&d#m*1Nk76D%qTST4Spp{c(tR>iXADn2G7jS*z)I13W!vCzQA_=k# zV%FL#4fNWnqBUAV4;LhB+2|TD*PNU#l|Mfqgg8spC=5kJfG=W+vADnS%=I@I>G2Rn zi9eGiWg3<6xV0En%cWH4*Og9}gmUJ}CNXO<)M`kr&@*p#JG0c;_`had>&J*uh-o99 zVLjoU$vVXslohG*j^a8qOy`E+f9b2ftLNHzEiK2K>EVs&3KbA2-A}tS=^gMGwrfaN zyha_i>p4A7D)ov5{n;A7NzyTVhjP;sb-Gt%joYp>&$F^L=7*5#|n`QMP-oshRn z*?i+2&<WHIwZAdDz~we2T$>u`mL?`1y+>LR z_Lhw80u!jH;3Ij}Sh%B!Q9qe&O%`L4CkpRKwO%#s?Gfx5{usT1K4l^VKo3)tL$_-!ia=jmgjW(Y7UEm!W z_^Jb{AhH=nRQ z-p_xr0LIgv%pmkZ^rLzf-&Cp@0hd%2eYpcgxl{}0Btn3uJzrv5`a($|D`QDs0lj<# zGfk=@@HY?FtW48C#&*;0w(4YY5Ey|@GZzELY9(N$^8!Fz1(a=j(z4WG*p^IGpR{^& ztUKBSU7`P%jkr3TP`X@!F4(itD8!o)OBL77uOE#g|5EPX?=UPz5@sLD*e!fS^Crve zGAbp~FgGg|T$r_;*l63{zaduQqcE>io+Iaqv4d9%O?ay|n1Q9ox2D|bGRBao(Q$s& ztQs>xp8tJze+BX(wt6>3a56-<8i{aD06TEjjM1nBwg@F@8p>c8N5c1gDQV5d7mL4G!}oFQTfB z9&rDZ1YIov38jsn$zm3ZM(Bd=xEfe1TEe>yohG}{V87_bjnnD1ec7KcWlHK6fpiS2 zDDR}5O?u>uEya>k;+r~GLtX4d)c*s$LLqWCgutC^mrm_r}k7m4#)LETiw;Gkwt z@@S%*k1KQwC$3SGW!~02_9&XIrI!TW_&NBI211?adgsy9c0NwyhAPF<5QD+cux$EW zgD>uvU6Md$1ftALjl8#179bg^Xy{T$6=?kBff~s zhQf@(HSIvQ2V5N_C75p-!`@j}2b71E2AUa#fg7g$B-Ug}0zpze$$Xcc`qm=eTX8ns zz1lO$Fd*2D_Sfn(@vv=jCSfuJ~d6sDsYXWBIlE<8SLgHCHxCaFQROYQs`1`lYyJ=Mjj zh@qWK+(&F{$@4+L<3eoVugAvJOMYb{I9I2tIqP!g81=eJu@2uT zgGjAHTX~|{`zoWhd8=Hg=lB+aA4_@9^2ge0B+Am+r)bUQ%${O})ysIv8HfSP;7GRp9$Uc8dZrU9GcW12zGvP}Z5 zTS}`6FBG|;Sxv(6r996_I1J^6KdW;*#%|Lwbx;DXJ+*aw`ZmyWt{rX>t#F=_rGNQSs%DQI zX7X;@BOa#wh3DSK;b7;jxqHw<|9&;XH`CGEt+yVj8I^~0u)922eUw29U(KM?fW6t} z!iBccUYPC{qxVDSSiDEFUGRakS-5XB;-_3Wh6Bs7$_dEze#8yKQW*NNaLtkyk@7IP z@3j42aA6abVY1#PoUc>$WUscd7pjfl=*tdI!Z4|2s^T$1ly8`d{1zKaNwh?g%Sg0w z^IykS8`DS_-7?@hlwH)R|7<=~(W2_Hh+pY^-Iu^_k%mm>u^(Qz*$fImBeGqo9;g2X z%b5?<=j`m*>Q9F=Y}eZs@l|-BO0K>>9`I`u2)<*94B2{Vi6m{As^-v?cJk;x-J#K{ zWxxlDYl@>ZC@EPR*Y)t>dU%ca))*a(14Af?OC_N03?-%Jrpzn5Y4-Uk?uWy=h2V}! z16@^CZ2QTHCUKYDb11A+qO}KyDd}*ssUzj`R?aySA&NZuv1xZofpyrxzh4T^(FQB< z4RO>}CBBar-rTN^m!N!#zTh{e9K~12osWcmlKM%o5CFi{{Ux)S-)~?ZG=k!!fE`P( z+>(+jnbVjWphH}zx*8=TPf;g4JwWQ8l?;h?ObU0EO4gS|XMEXv$gk%8C zJ4X6DuzsIgcNlXDrV%uo>##Kt^!^ zbG>9|vxHFup}fL$lJC>rO-ONQGZ9uHtk%>`K-w}ku7jR`8YQt^FG<6OE_Zc8e}ayh z7hskX=T3N*P5Pbpt6b$>Uj?6B3PN04mfj+uvD1%(BO!^$qw?@dlB390bAdMgy?1ws z_zrw)7*tRCf$r&D+$p}pf1go$61<*F^c0p{8(0ywO11GxeH_y6*ZmyrM`e|H@IJLOCrSWX{kyqLtt};OHws{lJf74rW z7+y-e23T#t41^2(I>#;ALvtgT4@(ej$q?E?Tfx*J2Ukq>D9>^f60w9&U~XkgpyH&` z1x1(`YtR!T8VT_G8FR;;QHPCN&?xh%K-**>AYhTPFoeVI65 z3ky_XZ&mVoj}j7=`SVY$Ye-hM;t4E>#y=GOeT|KXY1oF?o5L8Z%uI ziyPoFpC4|E3YB|i*&pU2)0Edx>se$N@Vwk{4a=bfE;LNBoWhp|N_?M!F_qV&aJta_ zGU4E2kX6`*N;IMqkHqhoJI&)>4>Dx@)mH=ig9a(O%eScZJpB$&mGYOw&rc;ZQVmAU z2DM|7iMYA1tSp9@C&8o3iFe1S@wV0Y$jBB)dl984f*Vgot8_>ANO}lbNVh(v zR=V?&fT@x^2CnOQ@SXGc%7biwgZUzPOlaZQ@F8E>#HM3|&XpG{yJqhn2Z})WqCX6b zfDmb$5D>H7pwY%VjX%kVLN>Tk%zH%lsp`8Osj3D?L5hmMz(7Xolhjx*;>NyY7}h)LNsfA|kCQYvZ9SdhPImZqd(G#?BYZkd>CG8vyRfH( zr4Qm3=^$r6Us=P2tGuvJ-R2L6C8QVN?t^o6eAIsdj$O zI72CRqf8Se7d%BUNkdYPWC?fPpSTQ5%g3UmE23F0?(oN+EhVFKYrmGE^Tk9_OU5h= zJ$!{M=ua%UdEG4>K*zo1hGV4HSoU@)`RRuYP)mgQO&nf0IBKMMan0is4h4#jutdHXt!?lyy^~!on0u*1c?uP2fvapaSz*)crDj-rf?4F)KGLmIUns08 zp!(r5kL;Na-MTI)ODkitWA9YUA*F*`d+%ET^-09<2aHN#i^rV7prCMmJI7k^ZoHiX;Gdvfv>Eib`k}t>0PoX;=(9p&k5$aS+9qH2lo@xkL zb-(&TW&=-@-Ehbm&g?6*9+uY`d$uMJqs-@?vZ3VVb(aB5+AfF%y6K~pz}&=2;=Zwm zmmgs)fnT*9=dfOEN*L<%OaTKGpBr#nijSMXyRw&>vJ)Y+jyR?=Q;l6-_?cI&=&-?k z^*9LCd(Jw}Zt2%$c2#PzbybhWAcA`#kzr>|w+Sx;M~H9tyA$oBpnT3>D1jQ#A~5gp z@-zF3o!KLhoU8>`KDfspevUd>kbPp+oz4$!*WkQKqbA5?tKq8?TJk_;>kRb8siE?V`4Y>rsIR{pwZq3 z!o*-4tJOg?K~DpzNI~9&2sVFUyu-bxBhDf`0lNqk?#y#mfKUxg?%?o*IZ<41q&TScHQE0uT2jOCJc1me*_<# zy}6lt1@C?oVzek?n2l_tcA$OfEInM(462YHQS7nUR%s?hyZ5<$FnOwu-hODBM1-5^ zc3%AyOFpHJI}YkSJ|Mg~bb$2Nj~}*3FA^>@npejZ@3NxD4n*Crelj7BgoP;O)Zyk; z0M0j@Up-KgO8U5IAhgD{Nc6h;WVa85XLm<3yjd(NnN&S;E;Stmnl3dETh_O?(A09s z%-kHXM)+JI`#uP5z5QcM%Rl*51IaSqY+~@S(?$u0R}HU@Pf;g7E!C_YWdBA*0u3m~ z@I!B?p6EU@4ZTmB+3_hhIms^FIaMAEolFX-*}gS=F>^C=nK{EoBNBO{r${T*t2C_j z^zPrxCeG49Rjer-<4jUSc>Y}I;X5=vWlW;E)EP~zEStF4(JQ@c_rLacj z%uG4{&;q3}LZm=d^7yJ}7Ihaq=)gO+Sjqo>8@gIVq?Hs-X<2(o--F%qY`SBdZQ59< z#(1!wu^UNvcc%9C_TI7EobRL&#L6{NXHkzdMVSkN_0m*jRxNQK_Pb~hWebLDi8j|D zA+t=R(ZiM3OwPWtcnUmDCR*PM_wD!A=W@A6(xs9P0}W86S7cMoT<9h~Tb4ITJ`c;B zE;ll>I@6{nw?V>)?G2W+6T^KadF5s8A`9zso(72jw4rIfWA={i?!JVME=QccaCNl! zbf=bAnKy&Xlhw!8DXsuvy+Iu4rs+Rk{UqKS55<2ti!*AktG2(zwh7_uorKaFCeZN| zpH2sGn|W4E`i~Z!d?zLsV-+CEQpI74II(P#t7xvF{!xKBA38%EAI4z6ur<87TJY3L z1v~Py$|v@SoZmM^`O+Tix|GdR#Ap0hewDe@FBaK^#E`!;Pg@iSgJDAUt)D)kG!piF z5P1Vgc-9(&p_;qTlid+7#Kl9Iolwplscsx+#Skaa^P1314&_;mTGlH|8K2P3TE8;i zaXxq8a!4sl=6R3meoav1o9zLFa4~xl*v7lmBE29m>=e>VTf526&FkYZ=p9&m@lHIJe6PkyLdq8dsj| zt}-ZYVLwp4Q1&WhIlVG&#Sa-VsgnE z`-wFfAK&+FCnNc=F@se;$r>>KK{Yo65!>}BD(qC)zb`+CO!=P5yCXUuw&l?j-IKV= zzES9fyQe)hFAa4744eNU0As6#=tRaZNdhtG%m+5}!}|eVyJ!FYc*N>&lLo6<2w*_$ z9Zf(%BFNQIaw$N4`zh@yGI>nYXJVC)#=}}_nLhFAa@FT+5Q5Os=3xSzFq*eW^*^Ak z%W07YefH;l+w+aJYf1(n&NowxnXL!qqjS9r|+7lBiQ)cGa*n_;dXGjt5X({n>(o!%Wakes&#RTJK!Y>n#wwbAJlDJ=>cDRE&o*8| znD9w569s1oqP|ah_0*0A0vVclh%Tlamwx#&!y4;fEmk_et>2(l^0AjvNaZjCia5in#(V6YN> z1txIE5k_0LMiuOI0_588;R5a$cQ0sp8hOfoe@0}M7EbRJs)9hM!m{spe6qUgiDF*h zj|=qtTDBb~k>6fhwp+f-0Ro2{>kh9xPcO|T4+4%*eC^64ZYZ!?%OL4m4DaVsJtuCx zb%E;0iSc!-ys=RI^WD0ft=>Ka0pxYU#V)OCw>#TM9v>_P-mj#_HPDJb)O9(dK1=o* z3ROdr(e=qZXiSR=>m8=SG{dure3MumDSR&%0%oX)G}eQ7`9DUL5zqoSW?sydr=B`Y zioVg)b;WqYZh-NMm}%8Ipm82q#o$GZ1H|5^Kjw+41wbNR<{2tC?H*ko%6 z5{m9^cXN;jOqo`m%yO=}hi)=?GGtHP5O{!CHt367q zBGZo_cI7mW)ei6{`TY{EPHVws{(R&d?roFZS1H+KcLE!+$IpKse62J;lSD0AIQo9Y zumKzT4nG|T;Nuf2FVX>;v5RzXf1aXiH*>SPsEex16`%Y9p zBrPVOi1=x!@WTB5ltjE!2M;?#r_Qy4W#I+j82uAA<=MF&|CkLs1H;tCkA^VFP$7q@ z46XW}rV_faXv?+3F}hqa)$vj07Z5raHV(_r4T&}3@Y@XQ97U?F%4c23kCoj=nb~95 zA4SRPP)scdl!j=>UEy2{y~YUu3zb;2k4D<=b_yW7ulSGw4yqoq(ADgmc0p1rLS8x~7`bD1;f zKbpHchv@gAzW2&eT%dLPct2Rn5ZBH|5!76V4u2aflh}=A$uX!VDqZ(VMFBg5R9%Ms z^jliZM8m`P>2Jm&pDh?ULR!TZ0o(nUjD-DhiwZ3k3W8a#Bl%fD8a3tnl7RExZyRCQ zz7K(90CcKtaO_K?&sP2@m>~U(*X_3PInZfRNuS;oUT8bmxz9M>2eTPSlnANwQ(uyx z>GfJ|eWRD6d6d44cwUMrbW&nSnDo5*8(5TG*`%|N1H))NWglDBIvBJ|rIaPKC%&0) z`^knq&lT>cZ~xgI1KZ@q+5$ae}#pE|A8Pt50=pXcjNUTQNqZyEiQ+vVATrE7J=0Ds6u`L zSkK~&9O4QSMU4Fbe>(^o0L3nGEJ@qDd?|n{4B8Auh^6X@?iXjH1RHg_v>`^%u!=kM zJ@zUi=5w56O-?ml+L0695r_(P%fXru%wAK+8?P?x=^xeeTCH|3i6o66l4kp#P9bsM z$D-1AdN2X_dX7nFlvzY<4qUI}IFDve;Ob1!H$6!*`)X=BR&B=p>KLq?N?W?ruv9_e`W*{gD!_LW+`*|28s! z=Kb|GA}EY#+dcI+O(~2Gw`|bjMiCD|)h4=iE4-G2zWb7hy{)XE1HT+|nbok6gErJ% zFyrZ&Nm2gxJ;-0oj){cDzYEtxv{WL0jn&{i_%%#ewKLcHg3E6CHuHHW6xXCa=7>bV zSG^|(S%9pbpb6O!B{*&h>LB?U=0oD2bL1)fbus@!&W?uNAkxuw?9Vq}oWnXf{EHmL zBK)a(gs-?HvdP)a95TBYn}445t5@3))Wh-7`MqbRDoFE;6(;eTM5+I-2Ph75xek!U z1|JxDaH9-fvbCh;Vm_Lzmav{LlL(=Yh7=>fZ4}gujr*C=PiRlS=JzFf?hO^GPOe2Bf|r~bCy}*f*s8W8fi?GcUE9< zfl<;t6yRvMvFxr*7+5@o(6;?{tuOJrTNwN zkRz77cSDydj46ZFr&ojC!9mm>zE8ZUjr|RZ=_j`L@Vp4Zy+PVo);}zKUaus3t&T2@ zsemcqUd7(FKn3JP&9CtXo|6aNVlG?qFf8m6fgr0!kW!3IlKs#1(&q6-QU+p{YLfmO z)0=6lR(}?Glf(~`Q|J6bE6L0ppOR3My=vX2WdS6F-k@|Ds@@-+q$dycI=B*n_bUF` zp4v{=xdYjJ%xU5G;<5gFyEZ6`w#Ms*YK;54uGlToX(6aynnZDgtr zdxEDdH!VbS(?;()=+&Vm{cospyc;P-I2;kExgxL*y+5OlfAZ!xM7v}U(glAw&O1db z7w^xH)hnrz5v{WlM0H=!J)a{Hwa28zdk#uT0y!#t|LN^I$bioIqjl)#A{+#YW;(+V zBw;5&C-4wjsd##`h3c@7W2mb%nzD!-M>$#063ye6x04P~$($@*-_Pe;bgY>XN^Nsj za73NWIb0*2CbwDBre)>+j5AJ5Nhjp}_ePA$gEbI$&^=a>0bm1XA4U8?c<#1rD^Rz7 z_6k4}TP9$lK8+zTqaj`%u%ThTiTLz`U1S8rAw4#C`pDdSk9$G9Nw1IvZ9 z==Lz1#-)=M&xUA@e_FT-b!Exa|1q!}XD$aYBF#woV3tp(yfuxpyI)54$wrs?<(-EI z4f5pP=AO(t!)|!g-QysB!|UNEFx}M-?P|y+$7~QLIeFHL=yYeN!ay|7&BUnWF>|}#x1WDBLu5u0@XdHqtw#f}{5_7y^m)nnhVf45 z!v~xlz*li~d}SV3ui(T84Sq3K{grbvltVvY zRYZ#qtzj`Yz)`^%M2{dD%+^RLc*(k6CjD&zZ^jxk8f{tapIg!Vik$J0%>G)L91CJ684Hu#i`h=s$eW+94myg`r|Y7<5-iY+-S=Nw0!bA1$t@; z!l<)pn??Z=zPi~64fy(%h?e#3@K^D=S3CLKf)8hT>nRIIaj&YXjl8gtfpwbdE3`ze zR%dS3J;5dQJr4DR(yZ0LtE7^vW-qr?(&_7$PWppYchXY6teCnI#HYeLEER#cD;_(} z;gHRJnu0#Ir0y(j^x8LnlDw;*RYknI3uRt)z`naDFt1v0X|tEm5ucVNIx~8~yE2MB zHyDFTMcr5V`d<~yz%x1qGE(aj3*Rsi+jm-RTN5DkT{}ZkqdA!%aARAZk;MqJ&*c<_i_0u>Ec$HD_AVH zjC@^$B-68~bjNX=$j6U(Y2+;fXa9xb{ttlLiy}>Q@!&%6ZP{6W?Ml?}jqPRpn*1@l znz}}@!4b0jPI+ZK90JST)br&F9RSgAsN+teeIp;A33UDA=Mzg$=-U@ zigNb9B4*7KxocGb=fazeGWEwZ3^wE{IIHQ;P>+3}S-pv{?&W*EhovA`55Xk#9oy$C zYyF+-`nY`rRUog_GCTKW)b@{0=%1cPl(%GVsU8PHKYb)amxkx2G>`7j9JjA&)Zyf! zA`G{txx!?jsCS$Tt!G>8@9mJ zz~Yd1H(V%k8m)t*%+D%A`q{{DYTuodd>;^AE{g(s(mj+tOCy@%18NB`i=50(c|VPZ zy6L-g{IwcLK*d%e`0?Ywr2pU)%sX;43I_l> z$H7Ig_bg3&u%zFG&f7@NDr-enBZQlP>pl-Njxd_4n44*BhZbA@C2narxUR9#cUVT7 zJ+GK9wZ`5pqh?|(VYD>(y=UFlS~cZB*hsumc;LJ~#Vx#g_Zqgib9{sg{_4?|;Y%RYf9VX8jvqbSLXlPb%#`3s(r} zJKc95`WVTL5r#DIvye=?5+REZ8-I2Eg80WVqkRX-i7{@qER0m0?xI~QslaMf+tQ~p zL8}V?7!28&aLUM(WK2HGR%_1p32Vb5%+RE2hh;mPiAg?gQrCh#jea`;Q;t#Dfh4C@ zRR(%;6aJ7t7${x9&chD7Uz1+mTkB(1x)}I1LTMlc833v3z z=xf8xwY#TWHVRyEn3Dt|y*kosY>1F{lX4w^1nJ=DAzSc9F_DDiQK0P5qi!NK%!Gnxi{I@;Ql!in8yrc9$AO5M%_-5 zU(E-+Px@X@0Uu87VR2Wg4ChW=5knhLJAJEwWvx zy9>RukZ&hF3jO)d=N6q=8%-5;8uKib-_xv~?HLB1wS~)bWocP0#1(}b>(E%&0BOx6 z^&3KstZ#iks@AL8`SYz2G>5?)rTp}!7RyaOouGvb2OZW0VLx@OYk^VV#I_JkB>a-S zRa)98G#;@7fP30o5Z|8d*Gm!R&Q0UQv$B^~+ z?Wm_6hJc$J^v9FVC)CN3R=a=~wYQ^a;+LD*C_cPlajJIs4 zdbBQjz(^#5&2X)=xU%up%_Z=>d#ct!i;+BTeBV5F?OU-Bog@-1sQ;O9l1H4PaT&5C z(O{QH_+OYX(Cdw3tHTqk(mQxF7W6fJj0Ojha8I5zH8cS-BwX?8@c2k7~f4 z>u@O|jz~JNl^MyG2Djb=JE`@%IWfMNDMQ5sB9o)V;2T4i`&&%Wcf0O)Z~t`i*91Pu zO~5Oj2bxQX^=DTWzuMdiV%C!w_f{XUg-6r5L3&%+&?p!fe;(LBe>f{zH+fq+SeO0E zN9KPOm+jZIl{{&GeJ=WC+wkoQwTB&38gYA{LqkOaS0hWc;K%J>ZL;SW_xk6yn&tbs z^*;8qjo1WAx>DyQG=Tw1%{5{}jV%w*}{g;{|QWFcP=$iw+*yn(n^xVp9Wk&)b zSO0inB?M~;Q#23{R;|~xx8(pGtac(4-MfratC*=~_;>bwgcWXALvxFl=ZH1kWL$8q z-482M9Pb`1_M^PWxY#Q0C#_A0z7*yzlhT!@wVZimD--_Ak2yPtn)H7Yi6s{~+}ZHy z6Ma(KXbMsExN&FO-8;@kGMZLX%E@(M4gCSD|k8sdM_88jwmfy!R4ZWimikK?k1T(l= z$a$@|liP03QrR)<64RW|-MFDC%>F_kKT!V0LwGz3kw=+W)Y8Zac}{ zF4XxDb;iYuuMFAje2jYtfY6Lz5hn&*Tef*UhkM{-Hlj5C^-uk-uU;`Z;dRxIy+Bc< zpB2Y~modSWL>O$HV<>8Rw`_RTPnUg(Z%A_0y)V4sb0HG&X6fMg{JCgrH5F~YvHN73 z>g_~E#@F!;UKFy6@X{J^gGcji2ghY|X{p<@`!B>)XdfK65=lIMc)&I8WWw&(8 zEhA(-!2(PT*c6Aa>>MAh1>eePdGyZCf^^fyX-)=}J>QOOvodA4Yt>?s=|~Qc5ID>X z&=aVw(WBU-qx5AK?W`uro!(A4lqVL1NbwSn|M9ROY>Wu+`CAocuu(Nv-R?QDKiIi5 z3YyT08WZGcXAvv&Z?0$sFc$_md4|pwaKrx zK(=$4NF7Xl+;X;1Nc0psu7&RPbUf&Lbt_<~Z!eIpl(O00KYWZiDnx;<#MGD#gMpnF z&)4MUI~03P?(|-lH->5eralW>ckP#K z+`gVJ5_;;Ngvb~8J!jDUU4y{1opQv`8K^GHLeNS{BOeV z^0`DFJqyS_2dD6t(?WN#dvwR(7-lz+5mbBW-7T#<*i#b;!T22hCwqrjk|SIyrs zew_aCnB7vB)MYn%bxo8(paN?K@d74^+HVv_MN@U7V-oe?<+H%P7001#Di++N+M*5n-2mttPqj_{)5Fvx?00;2dDd@boG@qic50dfnn3w=h_K5-D*5y23 zd;TL1!xDFXn}M!T$;;8Z>zLCtesJ^2HI!oFC%#9@I|dK^mDNhgmRtpMaD`4gU~> zTH9w@z*Ewh61NkF<<&x~nTJk{L_cpPN2Th_R)XxN-UoO5LG(@Eu$RlkU5dg?tQ{Na z=07R>9M*f#)6;X_Zz9+L%2U3wE)05(hR=#glwz>1v5B`e^h%Tt+ws_N){^4D_$Q{ zsi8XXU+F%r`#*#J-^phjI`sdOD`>a4{NI8Z11`^h3#hFtp&I;eu5J<9*Z<~LZ2wES z{=4jq2l_GpKKB2==Kp1<)$g?+d~TP+!kVEshG}uiAxFr=zzlI*mrVGdzC0#h<5rA{ z`ZPPlAJa!cNr|!R=EClI%!7kTpBDvG=j5m%v`s56_ymVx_;C4e|34w{aZE=Gr&n7D zy6sU=p@;{R5|uY}h0i|i(^bMNM!;{(D~2Ia_*iEaJA z-~DWCW+Lcb7KUo*F$t-HWSlK=D9hPf(&50fb#47#{6=JA|9h}bO=eDHr>t|!I$!|d zF3fkuAAn~bjG-5*{69i^o5`ny{mMNRqib+`Xjc^D#kp(Xo$$O2I&kt@m-J;JXuDkj z$TnqW#>o&Wuq%92e&!MZkyjHEuQ-hkd9L>+{@WZ~e8d8Z-{G7|S5f(9!|kWE0qSdq z_U;KITX4`ih$8Y+%(>|zRe4P%xp;18yY{O#gc}6x326UUDUMs&D8GG$vzDdL&zQXp zZe3AzdH6GuuDkRixP@4>IE&sO>t!y6v z;lmOGLW}~k2PD)kNmqf4?ynWe{|}v1> z=iBE!_q*rp=l*}^dDg66y{f8L{kpn(bycq)DoWB=uZUkEARu7L%1EjqAfUD(ARsrs zM18JliROKSfPhJ8BO#$8D|L)Xr8Lo4l?zHd0Ylo#_q4N_31vXgSgz z^@OHg`!&06kNVSc9uFf>Iu7Bzc0b_`d-mWev zrhXW@x-U_XL_dV{eTER5D#g_nexUvMo1_*8!Te)5jRK;mQbu-aztUIW#)}x{TT*h0 zGo=BC0}XsM7n}{*)KuM=&61eN`1Yarv!)E$bw1tl1m~ov&!*-ShEbG3+{b@1qW;*Y z66MthyP4z`y+VH9V-|9*m6-MJgXK>$n=QKF5HYPlpV83xP{W7s9J7$dK--OaTP-c<0*PN^t$&_=lQ^gh_RJ%K}NOauV!tT z>FXqX`0>N%7~e3piM_c*xIb!@z7b094@VDXLTjd`6XLM0KMZ=%I3BAZ%%vAE*QGdH zFwZsm#)YarGE785VA*9Ud#4tB5Svc?j?zB7*V%O~u%FB6mvFlp7Gda6H2HK$*YxH) z_W4GcnAgI@(e~SxH_|fOL;?x%1bj1%j07xfyMo3zlRE0%g?BZcfr}^fB|M~PU7aL1 z2(-9eaW85`iB}qMxG+17CWu8kk;G$=es%gxan8KgQtJ+d%EaJ2ed|HtTR&|3(C{S& z?a`yVaLdAXjPP@)O ztKyEzB;I+?+fI42^?UqB!%2VN2{XkidT#tlj8o8<#4jwTNUJ!IRLU0@ zopyft`SO8$_x+|$5sy0QN#!TV7WBAy7rR%7LaFC9*~zEvGD%Q z%8^VoV5P5Ms;95Vhd}w%E-ymiNEmm_^ZVDsNU|?6Lb2(N!tIvXE~3)|I>LqLvX6Jw z>XjZw=MU^gnH0Lq)ipFkl>os-B3n_69YnKFv;bqAF9B#LDC%N_-;qv@@iQ(pV$e0A zh!~w}DlgmqVO{DMffg8B;sCPm?eh}tE<7D4$EJ(tQ& z5^G!v6PL4x*jaKB*XRS`57D()$qy*2)bGs^YcUzbCf{m|aR;j&&G##| z2||1?VG6co6rU+vhcGFC#!TGqwN^x;Numz(%!@z!0395j0NG+*%T8R{URCu&S79fHKDdA6a$a-2w0vPDPDvFNBc38b|32aM6<*Zq(Qfdhw3qY; zJR(WzJmkgn(;luhpS6!`Jf>u)Ts84G-#SL< z0gkzDEzexqfwU6k`5?*$$C=-mtP8;{15igwtWUI0msTN!np4>_a+o}oHnohym!tdb z=djnkiFlu7^O*|mil)>?)sIx+0}#j_gnv*eesho|c8LijbCqXTbs$}eH=VGjm?`#7 zRa^a9RaxCpeR<59OCk-8`D>EAzG7KXW}#4yZ%qD>?f?qJc>{k)b6|SNeGr_G9LpPX zosdnF&1yvJFZopZ5D*vbxHBySQc9{k%m*ddteb4y;LX^Lsl+Q;6JwCkW5dZ;h;^fp{iD+`9n3e zOfA<%yfE}DJ0{WB2+Tg>K9fGhzGC#WH-dCdbmbW;0=aVwj#0jUzi40QIOVi)yjWQm zNP@asr+YI5D+C&$5s4Ie7HJ!CL^K((6j>L^7+IB$t4OLiq1c$}mXcTGQB+d&u1I@C zg%iY?HOEsoD*H`VDgGw@Bb|G?`iRiT`7mG@#7v0ag^z-dh0jX2udI~P=HRlwQI+J8 zM3e+#I@V**Rc!cJZCmSYZ*M!&SXtffAW?T(r)xK9eGZu!P6D(pT_uq%vGh1J8-yz7@V5LG2QG-CCgz(iZ(r{hCn(nJ<+LmBKUAGKKjU1h)ApJf4Pn7ipF(I)>^P zq||?@Unu}LO7|fyOPllSeQxq@3%fJxmFuG}Rc`(}x!YZ*v-_u0lRcax=Kg2~Fp&!*@I=JJ+ve6l2%_3(83EER>KU>H9#`AWP2yat}*r(Z#&3sMe%|L6k z{mc=qcva-ka7P77nw}2_iiBxL0>0}P?L;1&VU~*-He9Yp!}4`gRu;qj?D~E@fU8(Z zaZ|gsrygiIn-&d#o`KB9nR?cV&OGq^M)wg5ASlTc~f)4H{}iI_Q03k=7(ncbA|1SjBBoC)<4O& zH$GPnNhtgnNh~`CJIzk8_vWe-c}czA;TQzh%%?_b4GFbezPraLNhpOVj>)|&fR9s5 zU({}P=>BBehHZp>`@A${kaVo`wQ8lp+Q_EPWe8H$o3S=TlSE@dldWB&ZSXX=w>=^f zU9M!f)ZXVea@Ek&@X){uYpyE$sL3?K>RT^t?)!+k6|#oq$U6%6Dj9CH>!*d#ju&N( zbfuFt?KtYTZn^?(ZzEq-g|$==>uWpsc0|@y09yO6=E>3oMW=Rnp)YJF}pD9y_9)uyXy*>3d!ob=^I^F zk8a{><|4ZvS!?ng;U9TNr=e8NC(Ebn<+7piRFXRxIEiefHY#YFy5w?gw>Y=IU6ctB zp>TUzL+QY<(ihTKHgE&a`V8JqoDM}LlT^+cl{nS`1g=__j{D~i7xt_8zt|r*?OE?- zOaN<#!2848*#Lq{G{X)T>Iw#kECm4}tZoG6A4x$T3T^^e=56qgB5- zchGMAsHk%Z-@9I6_{@w+=VHr)Uj0 zniOOrjecQ|7j9moRukJJMhfj*zF2qOjvrjjB|GtIKS(2D;&bs41;FkRa(cV#klXrI zHwvyt7n%a>Ik&+wn5o2^DXZ>qpw&O zmF>Q*^`h=PFBQ<8WOQ8-5b$aKIuK>mKAa#RAZFQU=(y=9D)5^*+OwFLJDOUsc-cEW zQzIY#yg8#YvS2gQ<%72i!0fgS`D5_9OIJ#I+a4sIr1%nq*9{}l57$&s{hHFL3XaL>4-#?$z!pr7A zTXJyy54D~ZWc}+7D?1At>%U|_vkLxI%dcYNWnrfyX=88U;QHK#Fgq^?m*77b{(rar zv&p})>i#DyA2-|IIsbO&KR5+h|I*-Z8vPTlf7CwXCHzW|^3{2>q-0YMZ&R#HsE3-MqH%jfMhnD-=G{3WGiOh64H zOCY2SPqI2vN1;&g<-2SZ#1aPku!6I^fplC+#Lt0ew8Yg|?-TpV0urUc^aM5Ejob2G z(pRa)AT?9I2&8(EbaM7!t#SW58hYGuBuuZ*ZkIJD|D6*5QVrAVl=#0<^j|b@zgr`sw!Lq;)A9PB zfcXbKYMU3<{{Y@UWlGi~ARE3m@MRMGcVIu$wW0o-4naV9uNQz=^}SLUKkMHi9e~Jc zi2QFsdClAzpe?CQfua2GI=C}{pz7bzh<^Q!Z`{e4R~8*y{GXWqpFub742S(YnjBHd zXR~@YKHdLY{l7QUe@b~aKfu4EQTc!7_upE?|E>9rB7Fu*tLk7*wX$Hl=VwlSs$%Xd zp{8>pSGD$%puQa)@Henx9r>-f`5*5EgTCyxgtD$AIsATVF_Q`4$!*bGl%w z@h9h#icut@mt_XHU?D}G92jMFj}*RY3z3n8uqzz}e;KT-zZU;4hbuOh(RNXrL8VapD*j?nd9*8_(Z}J%=VF>QZelJ?dc2ma_DF0T^P9m^>Lh>g+swe$ z6K3FB-*o2XOuoAoVQMRgXR)K`NUcP}eU2cS+~vb}-0yA5PUo!sDZz)?KfsemJR+ga zHrxBir=15&N7Ox$y<7g+QkvivakZK7;?v$1tTAyPmMQ7$g39j<>bU$7iFkqK5~lA0 zUDz=G?h`enpFL%%S;bk#dnM!JW!kmWOOzuQ-re9P!yWxA*BI0(H=7L+O1>WXPJl-V zlA*SDzQNpFbMbo>dNEl4qL_9LHf;w7+TlL1k%0Vn(WQWs)Xee(SYR#k5^_YP&(sgb zt>utZkg}?A8P;KfCL{4G^sF^ASiJJOOz3->`ZzyZbt!p&)|!tu7T%pX@umXPIg(>) zOZPyhiM&57fVPSMA>}K~nn+=&Jsfba=0?cYuZ&7|n)RY>K5_N8ac5aM6h}l;wO6b4 zsxm=zSEEkDexl}aDRdEBIox2PJAb_;bK&h5Byha2`D^Z->tbz&f(*nKou$uoH#vKS zK(k3*UpM8#SKL%{E8kW6zaZoI7c!oYX9w@aSLkaevU>T60>>W1y1q4Y#;VM$xlmNt8{1gSceOL(sfjB*G_EKDM=gsNy=Te zdt!A9oI{ZWv;K7Hd(HysO*$@sTG<1SG8O|6sW}q4FufQE3%@YkpXVo_l@@FObuwI7 zE&bMoA9i5EJtsXkuNcB<*wns$ZLH1sLJ#rwF+jzW9;nWrFN?ge1~u(nS-TR{+WAYE zXf{&2vi=to{QpA1pX?m7?3d_i(-4~%+*_Vu;hHRTaFCgKKk!K<3RMHOEl|tyH^-~D zLt{okLK6No(MhYv4jAhvo}0f2AYR_D+w|QXt5L6>4sCX2LE?KqU(*h;r5`eyrYOQZ zcAZGl!^l|K~&H>9^ zUHrc69|>hsSG7rn1d9aFshrn-g2{Mh=!&ocCeZT|uMxnAP5Ox5kXPBu9chZY+){`+ z*+8})!Zfwbd?rw^T8KnCTT9meDy&euuff{MfK;zOEQmPbr8haAHeysJlX%5^S|@IrpY zDwQi+-r#_6t~b87ot7eHI@cm8KjdzLPAgMw^794BS+r??$JKfShIb~f!!r4)%Tj68 z`&0V2I0L@ZA7*a7PqMNLY*y$ZX`pTS^izV4)&Z$+$&c)Y!vNZr z_?BcQ0?4ELd|QW|{#W_6yLI&DkbQo_7InboZP3&<5uaa$H@me@{8Z{;1|lHnQf+tj zou(HO$n#LFeR#ic2^5*m595xwAGI_g1XeLNC;QLRZN`{RhgC9eXRP>)LS(lfoGjY@ zeS_R%KT`$Q({hidT_U$+$p$%#;Fm5VknI3ghUJnxQ#)NQWZl+9==>DjIb;5+cSxdj<$;fgd%~5>+SLmfnsfQ0C+w_8 zHcLKjlH0wYUGKh`Ct*EgO;@Y>P+}P5Nn%PoBmEWAevb1H$GY~?J|Z>#MH`NY)@K0_&PF)P!M!*26X+1trv-@Ic;LaS1Gwjk0t6LhzBw77K+U&815^=an_8s^-q6h5|jg|m9*LVbON{k^ueuR^U2;P{L3VkkkLVLA#6pd`O0=ES?;lMXR1~h+j03D zYvW<)kUKfd<0$f8pigh~=mxHM#qlx0L2vmfpH+a#iZt6YtCvjMDEa;&Pr&;32?S$|wv^?FOKN*vFg7(V^N z+UGs^&2AE1LJJn!4Q%*$a`pa$ejpaGdLBjfO1HJ*ph=bh^CAFlY z%SVby-^ar~=f@A5cVW|&Rh2Hjx>XX6RLy^1iG8)DefEajWhz-p-_Yqho*XI9?G@V@ z(AJY2V9M{^=fyAVj<$Wy8cpH#Om=Z0*862dQqf91X~3as37$_VZA!kEEE5Eky$t(N z99;_UX+;QwcQKa0BY{}YrjHIt38){nif!p_v3tD#^^-gj-_b+F zTdo@37cEz2w-G2hKovkvBREgb^kPGGXMU0R=SfnbAz+tJBsQuk|Tl|ei-WeBgv~}_YvE_COg@STm2e(8t;M`54@}gu>dbPe{Bw-cu`63 z+nD>(2z`2G9 z)9P8tc}e`P*kLW{G#&-EO*Oz}ekQ&GFG*T4ohaA97t&N!bIFXU_yQX^o&} zYpc`MEOnPZQvchg|K6U#>kIaHj%r^qM?m*OuV8t~C){`~hOaD+qkZCz41kcr@;>}; z3@eF(pHObxQhhw^aAf-f9A+rhtC#hQc{-vAo?3{}4(1@}p9h7~PwhUgCGt3( z&*28ucNx^!?&>XDmHI@R3#NzQU$%XO2T%AGCqCY;3gvVJBfz`{m*WlC?vCnGuG_oo zm~vUO$fg+wnjgOCJd{)}b16*S)yxbR%xxUz5ECbsK2Xt2BZA?V|gBsR68H|Nx*`o7A>NA zpuR05gmHNJTTg3Vf$8eGfPn8TzNp;0b;RPiwiH+DQqt<Y%(hY~9y~-cI2C`-3dkebr zRX0tjlpJY5HVe9-RLp}r6~!BKDqBHmjNo98!LdwMz0e~r94d)n@Oz12*?hqGmR;&P zWB=k?O*E`L))kFhU~i9nfNdkMs0Hn%u38|v@PZv9v72mHS@#lb1+fI`rtHg)-RkAU zjj9MVf>QmgyMt3Ueguhw&#b#5NG}>>9|Q ze3Co*V+y`6D|Cty^98(G=}cFq_N{OXP*pA_PB_h~r%?V={*cmT(|X-<=4pQ;cwBE> zV*hdsS7eAOa#|@^3pPh=Z2(%93Pt0Xdh3@=;M0aBw7!%s?>gLE#=H8oB_-J(jML}Y zo1*E5lW*tsa^IsUzob^m2vfat$dB-TJ4+%@hpqrZYWds>?~NgV>EYdc zd7oufaT>vvjg_=iKRP{Ar4~xQS@=ik+`l9F;-NZe^r|G{xHA2{vxd0CQ;c%L_btcF!CLBi^&8(O=Na2(VqtNNBtHYYlgD8?bQY% z8Se*sPBe6n&4I-)6N58n9|r~{x25r$VZD((&x>Stft$s$q$MBW`uH3e=vh6(ewt-T zY9q74?pD33c0}h;vII?6k)-W-Jm;si!morGo4B#+SEnrVS;bvP0?xNmn7b?JpYRX+ zQv~8pzw)Y!U?_c+5|M3A`}>HA{rEh$7R-Fwfrv&(He*It z*?lQtb%1|Im#$;f&qM8_LsX(=#_vZRK7eo%W5>rP)V8bj;Hf#b-v-PD#eLtXgg&I? zV3{NbX&32()DxIjG|?ES`}38ph{)OfacZ;Eqq#Y_l1{^vcY^6C4#R0(hRr4>?ZTRh zb*TjzUH29bYzl6D?&!^r&bev;9bYK21M%niTB1ysj10w-#*yEDM69wb1cG3*B+V`G zCG^W4?!9?JNmgo#Erv5NF`S70Y4Mq z&hd0u()T2JPPQ;xh2+&)7oklUJnpWDiknYt^q9$m$tYl#;?UDk6B(^>^%Q^T#7m0$ zprk6vcvjp~x$4k=HiC%_$aXdjLDhHTWaPx7~mR{gF{q26570-x4}cUr@F7pk%j+zpm3Xq6)28U^6GLcP2mA z#pR5rG1YKnk!$&%x2q2s53$O5XDbnYt(>pU<0#lG>cwD#?tGmqKuc;LgH8ezd{O0~ ziCaIsuF#wRaN`^jT~8n}EnA@;mP0>*fBOA7(BXPL>B{wnJ&UFfHkjvvm3WzT0TQ*B zXnznjiMU651qTd*z(XGQ4OG|5-%d+ptdBhI9$4#QZWHNrU~L3RO8fiHuO91$Cv@yH z5-x~!?_3-YKxUXly54HNrd?LqImN}v$>ZH+2HClUr^YyLEi~lp?f@GzWz&)0ZV|xA z1HPGE26TME0}-8Pd;N6hK~PjwWEQT94LtsH7Me0nZD_PU|A&hIf!1K|BkZjHGa)>m z`Bl47_{CI--}U9u#^iVMG`_Zrhr-aKZ|F8MW0jsTbf5MM8gp-uJ5RSA(61L%DRlni zoIHFDl0W*ve&&b*^|XPCb+cDXqxpJC-e1oI&}~yUS3Iv6e0YAH+p~hd^kW_!k>k0+ z^oD&;4UTa{rXE#_j=2^dS*~@ep*W7dpO_LZv+C0zuKp+VALtsUjx&+>lU7m-jezZS z>4P0Y6W~8rPV|9Rnl{LW_AzHV6kNj16yvfI>>t?4uiz1fcg57}IG)s7rYu?tBw@i> z#%Ug+uJj@cF(Fcjj{YxQ(PTxp6E5)LRz;>^LX%pis{Yrm3lsMWc$XBs1QOl@;N%XH zxw<||eZC^%UmtH=Uh@~1f^$vTowZPpno({{fwV;o*PM6e#>eGw@7GLFTxHz;%T2XG zK$NZ|u&DfA2WgCZShFvVc*iE0R;-LpP>eAfG!5>5jJC-6b9h;Eo~bab;XRF5qjPsl zcN6?1v?uP-QJJz8Db&!_liUw!?#VY^Mm3KZP|11+rZG-g#U|t@ugm{55K}qcoO1oE zg<@>CB=PKvYb&)QGM+!*gn&=J*1X4K|5|M8y0gQvo`UzL#CpQgv2CY()O`SEL(8Ph zI4zMY9i|lBt!G~c4^y{FLwjOY)fN9_b|{$t{hc`Z%tBVzUNm+@_etj8{pPgnb7o{u zBV90g052wF%wz)BTTZUx2{Jpe4=8xZ>!_Ox8l1qT>B_prJFqe31K7BwvEdI1Mb6op zov`mJ@<1lj%f)_+w(*y?S}4!@$r>aDBr7Xp(KsP2R6!ep(y_+mB%0@2CH#O-rNb68 zoqB3MPis2n-XTlsAfca;Uu1)_#PpOst9x2o^~i0^jhG@2W)K32-yTqk4Xk2!&jDR0 z2^t}!cC0(D)v{~@yD9hZ-kjYdqb=T;EcU9W8svKKiz{P>e!h?%eJrIx$C44M-No$^ z!)WFLE6tEXD^0{&iuCWA!ErNqF{;EO%=?9p_xihC+|Z?k{a?`6r4(TsX zm6KpV+#xL=P*O(sX>C*;-JulreF8KJWRWN#X_l&O07x3{agKuf{uph$cuyp+XI=P9 z&gD&k&2$qF^(bLYr(n^*&NoY9#?uU#P!4&s20t~=b#N8uxRSrjmWfFRh*{$Ml87tK z*3~7~31sn8f4FUK4Nq0<@U~_Qt1HG}j_LS#LxI^4#|LZFbl`J8gP7!hHt`P^4tsX|(j5;n*B1WG*^M$JKE z2Ai#>_1pJ@Z7h#Ip#$yB=`hp&@%NI`_K^hzGq#PQSr0p=ZTHdyd)JM(b}{~DpM18< zz&6v5kc-SB;j#j^LZF|Hved0c6llWKcGve{)W3f@tMB4ck5bpylA-@f7gKm;`WIBA zJc~i7ekx$}^RBOJ+1!0m{Dr7q<%Q@(NVWcKRXWh0;Vb67@7&F5v(EJt_)aMAojS%7 zt?bG5FICH3qC{$;{Cc>0zlCA@J3gQb1!QUV!LZ=sWU)$besbb|U#adoc}`!(TRAEQ z9uJ2b*jMkYTm0%L1I$F5V!_=LD96==WXu`LGP+x8Tom<@Ch=76^pNCc{JnO8d#D|F zLHS3~-9`)F$A4TdQTQBb8n5Y3w3g&swHC~u-N|6Gg}h9FCUq{CHl6N9Z@)ft$<@V4 zl^?rtPb{cI4dXYxg~^Z0GG;^BK||+k-RFV{ho2YNE~2__H1Hd=*PmQtIS&ne=bjE) ztmZk;#an_N^a9uMR1`Fj!}iW)Lu?;cB5|A*CTz~@9t>CNf6X6cu zUO)J}Qkrw@xibytsYO^$P$b z^->RU(cdReQ?^G?KW}19$C76*gpGUxRFM?2!-O%@lA`(hzrVJw$yhhGd8<&1Qc+J9 zU7lp9dA#2-rWF)ysar0jQg2Y7oYv%6-J98Gl-~|2d#swUW(8kt$+HRp5GFF)cLr2< z*mL*SRJ8S6rd`~a&-p_77VvheFDb|6s(8})Q6kz5ZOz)|Z)|iON1)fl>OmjoeGDFB z?nFJgY^<@`W&vHtR-1Q0W&!F8=l@JnRD@@7)F%_~)MYf`#mIWGk-fR&ItO_jqZX20 zf#%bub_-=|)h+5h4`MnQkoAlH)}deJ+=E93a4qbe?1lc@{R zm1SILYf3K9{Opekud{U#>WVXnDA}OW#Bzmb#%VLiV^8zf?Jr`r_W{#Z8A7R#b*_9% zB@kET`DnA-^1sHr{5(r)>ZVpkLavC!In()}tm&=^)#j%*+mCU5RTo)V7B{Tuj)z0H zxEPjJYe7q+=WmDvc>F%S;Zt6!@UQ2J86xIZzGwSkiF{?VTQjb>TSFxPS(M^DD#3vP zuT}G6LxE6Mjv$1lVl&W{1f23~7Nl5AfD5q-Unqon-Q?CJ=#fSY(lz0#0h&tK0u5zF zLai)r)Gkv?;qrTrdrL;QO(A?-fVuWbre$isEX}7Z3i#P9{<8*pM?||qaKrGNkF4^# z2O>;r6RF`{sBH zuA_IBCwwJd$T)>bqEx8?tHf45AGJ-8Ms@j%nxTyDCaXsqJ5rF*+!nn2zzS<|aEk}b zZsx_|;JARVxV}fNht*k{>YDmrCU2GJ$=g<^M6gf>SH=5>3{J#gUEh8;ku9@HgDt`8 z5;~DiVMCbBf}ksjO?Fs2E2u(4Zg@$V@4a>z+0vL-4GJ$M<#6v@5o3NTG zE;zL*DSKGaJUU+{U#G^ilDx=+%bdJcdm1>9pmFuNj+9EcpTnyR zD=|t;Rz8|36(CEzJCzJuoERv1A;7dR|H@0Yb>-u3>P3`QisZVeD!YvJGU5_mYmWN% z%w5cvzM_ER(olOigS&`hG-^Xrrb}drI{?)tuvu78S``xe0dbV8n6lC=W~?J$)vp*N zv+SV~D6YOJMnzrWG16o`O}x}Xn8i3O+g!nS`&FP+EV4(Tg8FbR-BXSK3G69e6+1|^{W z+N62|4)>2ux`u~8bG?#oQQL+Q;_rteOfZ(Rr&bbPAQwo>u$@2d+|w%dUZD&|VQu6W z!up()tFSdpr+vT|s)jDo#2_%$b*<)eL!_R_98;P!OzC2b z^0Q3D)mw}9CY!9)_tOX+HQ853ZB?iY<%satv%h%-JpS^N1^AkWB0{#0`_5JzF zg0}bVuYgFe8FDUE=t~}Km027eiJ5V?PRcBVSo@`zi16h@vh;Q786GYg$F7)tsbeMx z_ou2ibnw9mcNFJdmO)=sw&}qxNitO#+<0#_NMgZQp-L99)M>zUi<1rw>A*cS)w%6_3O8!sGOm72rC1Zq*Cq)pPHl?on>(j=+W5q=j3LN^}XAhMZU?4Z^@7np?^ ze1ddn(L@SZFDf%1uO2g{MYK1$Xl?MZt6RoLbZbKnw!OhoC1Ib=x2!Lp>S~oS+-%09 zlMgg%6_&eHcxP?p7s64%R~ z$+|4$OS;rMF+`?%>6HpZ`New%GaXCuymS#Ts1v5!(>2*T1Xu`@Ad$kLLc7Oacix3Z zWJ7ur9)mq4iInUGA zAVSt(JYl`>D@$F$j+y+J@+R5wcMSH?mJX=B@3a6&!B`*$j4KwKOLy!i8I`*K^7vp@X4qI zbNCgPEK^@uwX0gz3C?VX)+{|a7_Jv~(8+T!+v^^{GJwqUN9j38YFC`@cZ*WqHq4Wu zGj)s9Vw;Yr_OBYj%wh#ANcQl6Wacr>O|YVjY6ybc$BLR!H7&Ek>m{BKIi6sU$xvAk zd*`HU$!YgQ*+EIAWmpbmV*vp18npwehY0xi1*K*$(@c&E7v$dAad;&w_$R zSQkPTHJbi6hcEhA)K7S$7-D+G$I!4F6<HCne2}ql*~aFm3tt+eVsdiCq4xr5 zK;%J*zSS%()iVrWKG)SXJ(mqRoi7~{IAJwgPZq)4#R(C&j#Mo=SMZ-cTnX#Ul8ld6 zjX(V!Co3T#yVT&XwE7@`7xv?9&8$i!8BIj6__0#~HMZ%Pcx!ArxW_VRLkcz03V-^?w@-#MyH zmt)`$ExNlG!eD3YW=rS1a}dh|OGEiBA#*!Uc=&nrT4&VZ?Aal<9p2EF%`}RWg5l|D ztyakNIvwOx-DgYYT-y^4pnQSIdP!(G7r}YjRu*|C$Ev0OF*|y6(~XXkOx|Dlj#S1V zcM3fDsJTXJ6ql^tJaQJYr)vrLmAu|aLMP$%I7s??NZ34Se5d1lq|0js2*uUEYrgKA zh^AX5IL*3s$A2N{Qahy555_qqGa5(lCqmA` zqNJhZWcETN2j_7NkxTH*Abpjp1@jJx4YJ>DQl{Bmrje1K37K<^?$LIpKQWj0O&Ku~fHjN{NEBMwY<2<+ss^#;3->2|Xhe zKD-FOo|jS}@6gZNkg;!Iu*4f%&|-E{lp&dZ-jjQcwTr_W@4-v!wzlgmW?VZQAClcn zIJ1SjT+YQ&y@POMFNeaeg7O!^%?4#Q8r5AaOAN1)^V`&09gx$s-S$Z_xp*;&nX~=6PR0U1K~OPa7uzP0u|N?_dT`LC9p2J})ohy}F{iA*$RPF$ zErK$`(};-QqXLuc{uiHHIS9*Eia%|5qrMk*LEiY%Bzq0mU{-i*@vfyM`uNorsrF`4LMuXFz8LL1cD_R)u zpr&I=^ovuZsGM23x>=@KP#RF9ZhX)#hLfaugOuhI+Z@%WoKf`|ac?b)hnk=)n;(&1 zAZ@(1P`*0JLIX8ov~h(%wq0zol)6tR)8rYkNNuByK&>Bp*twiDcOMpy@ zyT&OZYd+@Pec>A&UQp8F8@(}nLN;&gI7i0Zer(v%#HHivq?~1LjaqKb8eyIo$q??J~mW$Vhiw5MHiJOUDO{V{S)u zp1HF|-^ErJsw|*?Yx_)wmwkeb4DRq@?Vz(B{>H0kv%Ca*-&ndGTCPw#Totd~s z`4FW|ts#;$t5nBMj7F1QVZ!CtW?HFM)-T$aBHL@{;Q6a3dgxL@PmRs_6Ec8hZF`O9 zA#*1hqC%Q~5s-3|bb-0Qb3|lGK2WFOMqJs#5gG|a1>j21g1aVIE#Q>O8KZQ-fOmVY zYGg>$wc#AYKi5eO{f?Y3LCfsMmBUy?evauD934i?o6c=lkgLohzvvr~R_dnn6H85k zqVP|cmrMs;Lz_HdDay`K9qsjkAD`wPy$0feCN8T;YCO#IHN*qk28a00Yn_;t(2Zr=5CX0!sU9ZONeex#kR?h`#M@)GUE z;J_T;H0iu*+>wugpE28lCpN1PBlSuZ%55a^7ZDkf;j21fIS;|A6E9=SR zXOh!^*55UK#t6FxgRQ4R>O_CS`*e!?`1?`0IeE`X6H)mrjz09e_Mq=xp4GR)!p#%; z?Ww%Iy<1S2!aZ~f1@*?>ON5jFoEvn>?RU?Em8n76uT`3mi@-5qv@;{Z+1N}_(K~59 zTuF?uf`JU1A4y!czWvvP!E-oGNiKHMclj93qRP7}(H26|n-5bhystKPDlQJt+oD~m z3+tNyto)ewT9Eb@FL2We)N=8Kg0}2^>ASDMQj`r=ocTL}}UD&!)>#JYsaIU>(^|=@*iy1sYdrI*Se!p}*3RS@E{XJOuv!+@hla1mIhRD_{GgN|;xLdCo3rgp2c#`o_ zn+#RY@$5_vk;s`@#;-x^B1e72$L*1_x)i^grGtc2DDP<^dqDBz;_A2KKjewS^7Hqp zi`u<&f6|$37+(VLtE_$Z@C)*}_ZAm2d)-rT1+(w6*2fMw4!i0T_$?%2m@lOVYjbp?uZ*#G;HVqGQE|5VplBN0jyPoMO+^#!GPnIDH_F0;zYVE0*3Mt)--9G zZGf*n6#Tl6{is-45u?&J;7oEcTw1*ap}o0;9)TBK*FTKx1(-njyC zhOkr(z;(n)|LzfQA>Se4BJTc5{lR_JmUQg20v6s5*^OQ^v2)0tB`6vEVf|gu&c_u! z$xdi>P6O@D)~J}c1a;xI0dtjAVh4F#qtHk+_U6t!H43udG_h*0^vUi7jZQOkXLC*g zsb&{0YIjsRM|eHz=V^F%$&c&V*oNQ#bjepsP@#3?IW2jbEVMv(C9xOeN%b_J_UWoy zTPLvr6A;x*@pj9%@?gDcbxE1J=9)NW-|LJKPiBH-c=#cslJW3c#x=5w_DHfTd|lY; zb|FU-$!}5^zu&{|cbe}JxnG}@KagZ$`q~VDZ#t#FaH)x<=iZ+p_T$%8%O@_Ioufm& z8IYP#$nhDKd7*`I=BNSMV34}AHY1Tzc?o86Dat}3pnJLb#=NRV%?mpZpSmyWxp!F!)QRm( zGw^`D2J2?W1i*ryc)OS$k?F~5thK)ZIoyBKUS3p&OlmByBhIFEK>%sluI`}N{>pHl z1>MDsBZzdq(L)w&VJ&#r@@MRipPsnBBZK|BHU71yQT7|-M7%HY(}(2xVb>wDx*HY@ zWu~id5aI%|i3e0c%-nvT)VGdX^r6uEeySA>Z-~?}q*eBRi;?7BWiImIZ zW0o*j)hICL%C%;A-Rk*WSaWc2`BofB!R$k)Ir_zrUuGOX_)-4a}_F|8@OwMIzi>bzmQT|7LpdF(b#g^DJ~Ao0K0d#k9rwq;#7A&>+Q z?h@SHo!}5$ChqPM+#$HTyF+kycX#&y!6!O#?#w!8-#@Fhz4m_p+ih(=@Gux-RQ0N_ zs=nTqHjML^KCtJ?{DasP+K9lMm{-S}SXmD2a)Y1?Ch zAWeyGHgPD+AJjR{{>#4%ZKAn-6Hqb-E)Be3N-M2Hh8+fT+L5zAs-oMd%m{tG)W%}4 zp`6q-YXV%}o}E?6U7xmMGTD^(qB|tC)78B(9Ga~Dy0QOBrNfA(S#N52ZEUvjVs^!U zkXA_yg4E$!U|9Q*5folDm|kL{W$wLci#1W}?^rNP-k&EJC(7YC_R;#DOa_|;!Hk!; z)2q=^WkN*5#^Of<$qlG`!RLfNrGd}&rvv7c+Tn8nf-knIm{gTSSz~Oa1k1T^|LlXS zn;X@^VbS(_n@)MR>^ZA0CAjvVbu7pTz4AJXTl>s=Ug++&u`Jta0iqkydQEu2y-8~H zBzUfcM0hVzHGdfzdp}cCcRy1HSU!`G@GyyAb~V@F!DPD?zfXjBvCW@z#4S=W~BgrdI&H(F=XOJg07#)TKS% z6XZL$hzqkOc7(hRZV)gQbtaf+A7L}l?7Tx#t^F7meaxL0hBkGs&(}co&1z;-^!&W!HdHc6W^f>Ipi!meW&odT zPIBYg2)RFhEE>su9qGHVp~}ZyO0Bdz+giYWDv(8(ma3^?olf?)pFG@0pK(DthF}|O z5`7eu<0*cA(tLZ-!>p?Z`^-Nhj3x~x$+ImUV3!0ghphOE>2+DZFxmJ5Yv5vt1p@5 z(#n$=)?x9i`kH4pbhbl^KA(35L%L#tFQwAQh%s(>M$PhJ6=F=|{E%~Z9i7y5GispR__&($P5$Ki%XHgqTNE+m5y1 zyr%KSuHI`!L|k@&#*2)^1)xdP0*6_ygs-$!xymcA7nyWY=VPH>ySQxfs_F&VoOhWF z=`^C=1?=;!C@A$Nieu%gNn%9mY9|>E@i%AIW1ZI3kZ0U|n87$ueb=EL6~fQ&n`s_B zoCvCBy(%z?ZXJ||ZAN4cf1FC3I2R+`V%5EN_*KQN;pb+INao33-nfcFJXN32%bV2s zVVN02KcNVaXqSGgm4Bep2$3&URll^2PqmWde?l|ATx+5P=e7a*q!5b|? z$pM;20SFCpubvcrSM7P5$jv7lcP4WHAmn3`kjR?O{HZ2k$DdTjCu4II#(gYm;0`zia3&O+McaiISD08blyr#Ye`Oja4rmN} zjaY>Fu+K7DWvo=;hJ{qp_DA=*x6z&&%}m3SRPh@UHDd8!N%A)h4~XbUxtbZaaD+G!bCC zsR52#b7lE$|(9KPuHhYXo#mefg|DSyLtq*EkBg z`qH^MS@0-B{dm=@LKPJC7j;|DH|lra7|FUdZ@tFU1}b86Q$wtbwQy-OX_{Q!a~nqR zXg|?tL^Ti{yMR}hKr9V07$ho?6&t;2un}pw{_-TppXh{WNVfOcvrEoH7n)&E|9Uix zp)KeO&#OCgY;Wgb6V*QazX>qfA|FKQhso8gw=&*(_bdHgbpKjg*$A}KSWZ?pCs+UBMsNPXFo`@c3AD;Ey9*YK=rn9@k zps%=nD;QG`nQo+dT+Wic#MO>TC_&scPwc6k6V2@39mw|1y}{}IVc!d+ap{#SP_c8dQ;@%Db{i>tn7a%% z(-FJSJ9mTR*aig*dbGS>`EUj*+%N$}VREv8gk=_14ixSeb#2~MUH1PHF~O*_Pl&Ud=}(hPfWVQ^rux+w) zo=zD9+iSZNpEga)Jub-$y=bu6;pCR0@z!KTA3Vty?cnddV$N5h*PN4nYV(!sQEoC~ z_`6%*QECCpMlUnaJ6Gs?CY{A=yx$YSl@>IUO zh+lt4r|xc_Dpa8ER0GvUCYfy+jP4QZdJFUO$xhInYuLk`bp71T?V^M5K(Uq&`vW6? zRspWj)7H!pS4(0%sEsE{Qcs6uFW!55l%HzvTK^rNJFUp`>g|159eMqe$@xygwh;3 zTSS}XsqTB3aj*_loaB7pHgY(&{^!I8TO(+OEk~ny_R0TImfxqPYEzoR1JmA(W*yp= zqR+cg&;x@r?cnC&r98EVH9cOj31KRCmxL{6h_XWduoF)=cKU-ld0NK%06WvoDOcvj zL~Y&8H2>-2d#epREa0Vr?Y1Nu5ZYxu0_xl-`(V5PGx1Ea9`>@W9{(Hnm63Q)m_S*p zJGgP3SqnlN@f-Fpc%Dcv3*ta$Lp(@{F~dJ8^`;%LB3pJ!)5*X7rAkt0+|@R8ZW&F1 z>5D5&>Z30_MI))b8YYI6V2 zIRBq<{yW$GU*GtD#tCPqP2Ay$kS609|Ya{9+2ESz;MxBx*GqA zC+Y#heW&%g6o(dAzqL>_UB6xad8FE78JF*sm<-Pat99axUPp8-?LS~@57ktIjMSLv z1xM;a3*V1Hz5Tu45cd@Lc`|3z|su)|IZ zh+JU^%*wcH(R8&nG4a)5Yss4lVxJ3S^td1L{sQaN?li0Pz9qdZ)Z(8pO{v8P{m$*D zj^a)B;x7ABWG{}2TLdH>_yNA2R36ms&H+(vRM(eGZqDN!cDOBr-w-y3nN0ozlv6Zz zJ@}1df_g1)FR2<0@0h{i>}jBYtEc=JpF-JimwI_Oi8%oF&iN{7c=92-yn z_SP?!qbL82(%;nuj`!wITloo(N>hiLPtS8EGVcR^AEf-GA4o(E2Kk&{{81waE{@A= z;`D0(yv<7;aqFDF0r%hT?n_17<+M(vi1rq?#|I#g8M-;-18^C~0>V_%JN)xJC%JWS zb`%y0BFYoR0+#CE^no8j{rznv@PWzspS3vu zdrNnySRTyfs`4^!p>4qEby2r-`~psQ{(m3} z8XC|X6V2bke4)Q!G|Bcwfl&qxW2KKwfNJNRh2#ZOM``Bg6X%SLsJ1Kei^*M`oifE$BhJpylJ6B9U?on(a{jZE zHX;9I3hKtcO+m2pW(YD!u{E8%P%M zNB_4(`8Szb_RjB|&gH5GH|A|+O>ux-mMvskMOX_u)~V?30X&NBjWQrX_2C>AQFrbZ z4wuipuCFd85o{~sWsHeQ|Ez^QaAu|4{L+n|@wQU8fQmw^_?SM`%;O)Yfr%h9n8U;Udm|KE@% z$M`!l3wLmh0y~u}lu(;D_(`Y7o8Mngc%GI3+vcr z@iU^cH%kLVfm*06*9OU#E~_E3i09j@4EFK^!fxIFglq*)U);k^734L;?0? zb%2PGod@ID zh~*xSOi{IXf*w0@0}U$T-hx8%Bf#P)SXKY{58~*6Z$47#uPX@_hd~l8nG7rrHekN% zl6!wA(T(<+Ir*k?vHE(-{q3ajw=(`M+409iv&%5pDVy2dA?_>!+br=FFeH=AqmN4d zW163R`P*ThxIPv_!nr`@Dfyax!2-5Oj__XW=lr(pUnaKM6c|DcGPkP+Wg_b?G&0GN zWYdd>hx`v0%wLz|LTWLz&?r7_2q_zPQ6K-}R=wWkXJ9#N6mNa$24+(Hhd?0U8~uB; z0C}Lw1Z?U{AyUpha_`&fh%MS%Uuv&tgmtu}=!af^+VtjUT-mNISd^({aC~h#MEVwW znp@QBa-TbPe*@RSSFiP|sr90y(iPTSabyp5!SlBu^@rTP)c+F`?EZ=g0bEx+VT&}_&n~AqYi-GN9GT<2~UfMD-BKjXZU@K9on1P9`_L1k05SUd)tER zGcG{?_Ypw)$VV5(CFKsT-HrdSii*EiaSrii39Mcl<?E`cWnL!2o$<#x?Q73 z^2+>-;5(X=X@c*_vYTsgv5}EdtdS?vRjcj2L!Ez~9yW^+3Q-u=7Xc0~`c>4(l?aJ{ zYtZN$=p$bIEz15FzUsX>cM-SkUe>IITNIE4pC{4=D{ItFI86} zXC0re@S^raXntU=r1ain`tZ-Gf!-%@K!YpR8;X5f$&j5$K3e(-I5X0DQ?!oRxGVDX zi%ULpw#5E1kbBFK6spyJ60bMaAWP^Nsp)<$(th6Y;Qi@zyFA9Vr9H-u1v@1`d+nbk z|6j_)`0i4_i`t{XXr#FmvaPPCGXvb){M-GCEGDvrkKhh&S z#T{dNO$2aQEz~4-ianj|awYe48>kb+05JY%vLoB@w_?~zVJN{62UDLNpF;%(m>o|Y zmp3TR6a5>^uPTx_u32Ney-W&pQmEfaHgAUD+?`l+TQC2%zr5cD~NN+@u3GRe3cQFle|$$ zwLN}cAY{?8=j3)*zH^_oU^&9<-v^r!B3lX37s^DZwqL16%W02t+3LU71Zy6Q>%Y~8 z3!Evg%U%>4WZODjL%i9Wxm?F~fQ{8}%>@Tlu*V`)#KKMJwD_zCJQ_t7!{1ft+L+bS zD7(FHgK*f(m>e|NeWE>m6E?)K)A6Ppl;+1SW<_Vk{Lt&n-l+E>oY=G^8N_w zlnahBJpVbts2YQ=)8avw(WgP5(TCU%&V1G*7%8d+4{s}n=8|N#%K=wFf2om`W~BFy zofGcYiku%9oO_`NOWz0b+Y#?n+*-p-e(|aKe&F+Y=*E<8zrzmwfTT?4xdO{;gN6$- zMJ0m$3Ut5th}y)M1G)mC>2Xw=qD^cZ@wo!L@FTNs z7hrM?cBh-HhaVZCpopZp0(vi>E;+eb zN=e#O=^*40jSl#tMrBv4zK|uQrZ`@ciU8O?=Cq2IqaWLYlS^bEXdYY6TCE|tdD*cJ z8*;0|dYPkK6E~tWPjNErD*I}Q)&Z`)&}8uMd}5G+OcO+)wWzzv-ZEe_oHwYm<^;94 z6i`b?w!wqyxf`1=mb6er4U~e-jaa|P{aNsgPgqy^J66Xk7iBMIHAA4X>gIu12!9UqS%&MhU?SUYV7S2SF8qu7I1{y z8|GaHb_6zHu`Blbp{Y7~g_02fB`dmZLX)SgGBhkwq`WNedhd5)bbtN|W7Yc5!dA+s zeZ+v|%*#-Bl;tD{@Jh>GWYNZaqB$x9PZ-bwWvp3qhGG-O!;Qf7P zH~hvqiyRwmw_m&|lTyb(+!g@1DprRkX8e^v&3pNBUKqoHQOG&_1>oT8P)d8%@YGq6 z5}b70Fr$qih-9O-l5cR;iuie;wP(^#2>)bzZ?KdI@IjFtO)V2S3Ot%UMEI*5Ex6_` zMV}vv`=bW^7$o4+6>+r<0o6vgHJ1Zn`fh&S8q@#%P5LVIBpz>MxyA%h=N;8t$E|rE zYpH(&o*^V9rIcJRHF{>x!gv73tVCB?)tlB1+AiNVDFw;#60((sNI}gt)*rK+W(u%Y>g@Jy2$Bdc%|F$-@^qwp zG>ONtmnK#N@>XT_7js}a$a!HbMp-{C*6^(~&{T_zC(63;*&|c}n*zB2V8V-5ed4K9 zvP+5^-s-H}u3X`!xiOcAw{FD2jTzJt$d2g!i6IP${RJVa0kgPh+ReE@;?3EN(Ww+6 zOQkIs{g6e~r!1Z#)l@w#4NPsRIg`MdkDkioMS@D`E@z6khha?C?%-duY)pP+jSn2@ zLQr?-GhUYYfT(l3n?qBQG|S;!5-j^r=LNSyU0#%eLU%a-NH>()d=#rlbSWVq+hC%$ zUaE_LF(+A5&7V7DYNI|X2S=gf4m@va-|~KGH^t3V!f+tpGpMk1%fQQY(=YA{XEX@R2ZeJmt3h-0n{ z_fjX-&M z$8WQbTA+jy=$!JK$_u1&2CoMxR<1{>0(dycQ7q%07)``{3Ym`;^ZAnAndGX}5tmULm;sb3bt_G%Nn$ z$YB*RdxQF{LfvAo`#fs=|1~50dy-L3}?&<|_z$>G6`b zX&M<3=CVnqp<*|gkqvQ@WAI>1pQR+C?ZEo`wCt#N*Qj=4>Yeq(xT(fu#(YWobFgQx z(LS{lG!yrP%IHO9&j1$t_CcgC?`VbIBbYB(G8IZ?e5d_+kH*_rnRd%4UB0&%GoS4| zNbsUP2c7_s2moh5cSPP6U0UvJ&9DwD{3S)pj&A&gXTyCHvyTvGim4IcN^E8X1zNC@ z-#X>{c#%_lVC6{B=~_1L%YvC`f6~=88ISFO+{raQCpw6ve`nUQwcX+qJ2K}hZs@>1 z_GZCf_I@$W2~7PxDEufv_2PAz8-*j|=sC)}5A&FXDgS*)yqvg_b*y-z6riy>9<`#` zsKkQ0F8h0p)^aE{De0QMB8lB}E)j-N7>(Y`FmePV57~qFq-(>bTBb>6imNfF3Ex4- z8PCDfgw6fS#^zP}*g5m&xlMQ49ngCO-!Q>^dm`{=`Y5xZsmO?TH9ounb|UQ{$LYk( zQoA0`roCGD;o81fj`D3nrA;CA^aqsc;Ra;I^b0`J>;8}o1ktyaR4 zjCL)_y{^M;oBT>wHcwWYaQ8FIFoe_8T>BD%n=OgHG2T1#3m%UQh`tByxJ;d>Jefsb zQ|0x$jqCKapad!Oo$@FZy{ihsE0~_A1$5f!79&`e)&j2ZazC;Fepp(U0{pemg7eO> zekpd{K_sg+NTuS0VE30Nh6kP{2s9>~DdNe{yPMuUsq2GB)vjnr7Sjz>(Aw;mOf>4j zIjbJ8dA{vj_$}YTI3I*zExF#vMiqn2gXC&mYJK`E?4K8Xxk8m^#qw+n*&yM}_3_J- ziR3G?1uERnlj`qW7w{#|B5!Yyr6X!=T|M3zw>7+-Isp#xcc9?p$toBecKPF#B^taS zZ&%Iwk6aA)M|EXLAm*7=MmCj~F3Y7c)(+3C#C>q@RExNO^}@)aS%sM=UjhBhbk_)# zwHAx8-_A&DvGH$vm#BMqw0CIxx*{XB8_+G)o-Hcj5iYA9Mc7cwrWB z2RTzK(sTQKRo>?Pg$IzbG66#(0P+9*w$LF$RcH1WTs)R`L%$j)215BG zZ0E3)t0N&1n?v<|q$Sn6+$FzgS!0A%5lcK1-7~^~@Z_hD5r4-`_N!+u#;P+LM1#Zy z;aq+yTmFCq9p+y;@FRZ|C@^~&>o(>8ob~FL?@aP#c=6@_Lq!+R*vm}haL%4;XFc1$ zqj@NqJ#k=;ZjY2THy=KJ&dUOwpnqNG(JHcgd$B^0(3o~ivzFQ=<$y!en^~nN88P}{-l3IG1-Nb-q#gzZrw>^-!4&+P+uqH%la0`!&ii(? zJ-5da+;)$MFY;kq*9C53Gk-oa)FH116)5>k&mCT28 zofRPxW!57YEChKz9Oc*-{uNL6z7`{F5+9}0tUw|BG>Jw+t01~ave`a&zQdtDk)Fj?=(_|k53#%S($zjCguVUjNgJso2W*wyx_cr0jp?4!+uZq1rh z2wqVYehXQ>&0A9H)R82#+L><|GDB@E%{B4YS7kOMI70|F85p8l2ZnX(j~R4saXu5h zHzN{AoSO9H|M_yfE4%gPMc|1wNq3vL1Tm28j)R{z8JUW&(F;I^yjirHA_xl_18U;OQE zqJcsX$JdTyr!hJ7TnkWE5F&mV2Kf80-xI`-zT9so=%o2gbJ)#;JGNqk9#lh=T@-Gwi1>p zx*x5ocOI*Y^^k*VUVglJ@4Hxw*pBn9Z)HnBV20!bYyJ!EMMjaX(rJTecU{ID9>EXy zleA^i8Div%@kJxj?YnAQB@aF3s)C^hkHs(klC4#0_u3lR_b}$WQ`0ls(dD+?GTPiz5*;l!1hA1@D(630^VhFqYNJTfd zHzGgo*$5Ykd7mOlC(i_D01T)cj2g?!b>6LA&c0F+>*2bD-SRVQ$Mfox?# zI+0wO!I%>Tj6# z#Xr$>L=|HaR{Ej(ejLYbF+jQdxcnt)_sCJquGQP7F&XnD#U+CBG?z_YnS<63@lhv) z&2hI52kI<)OLfn4C5@!CF=B4#Xm5-&gC5Kcm4Ja1#*MYu#?gr11UcPhTDuRS^)6Tgf zK+MkbZfqx>FSK3cgooXUGjsl@HuUl{Vh`>pf(Ah!v0tQ~nW5rDI8yW|uCz_fR*p1d zsLbw_Uhk;MbTjXOY!^9RhCORuIh~Z~p3Ly@ek+8www&e-`1=GTK-U9*|0%(l+6?Zd zw^!tC7AYE8-k!d1rRO3cq=nKA>0FeHA#q_puaHFOL^7VD$_wvrmI#`Zy`enBh*O_S z@=iPJ)0ZeP;Y^L{Z#cAiH!YykP9d*hQ#R?>-ef#z*0n}Ib%0LbFZV^ZpzzlM(c8=` zWW2gNhcHeTTkKn&^*MF?LXz174a~U#Iw2T=`c@#jn3u7B0CAQ!!9StA>0`0J#8>&Y89AbF9x+Nz zgpylC#+D<%PykY>fcbR-C-w-#0OALE%NeY8@p5PDT48eV=MH5cLiOyp@7ISd^GOfE)4U^&vfki=&8u1*M+>c{;h%+x*OTncOz zt>(>*#ekv7F>F5f9}T3cN`>v=V<|YT==Oxa;U0w5jl5mWaB$uZtZb)*G?7qJlT}nw zv$Z@

n}GM~~wjFkcDQQl~}a^Kv3sInFI?RDQM>nXa{lW&uxwRA=2K3CN@D8C!DA z)RG5|Fwb#f2m1jcfg+vNvyk-M5F$VM(M3W{okIz4V=;N;?kDfWZq9s5TW1k;m+6nI zp0A@QRPu{6v8xsJErde*L|z*q=${yvr^wXF??1bkHDHiTTbv1olrBGt)J1}^y8N@aY^1S z*3KFgpLl;7Nx+7ZM9#tEU|z%I+>?o$hC)V34EJZKw-SSmrboS|(ZwOCHG@;B0jBHD zx&5g*J$Xqz#E!?7EGJy!a+P0Z#61|=@@hKg%zk)O(?4{y&&y5Hz~O1|!%M%gt;&6f zy|jGzFgra0{xHoWMY*RZrkkv?2L)}Lyj#ddgI==xEsfy1yO{bwGH97c0^XFSVMP(K z7BDuDVQ#nrg>-m^9wUR6N1|XFC7mB#?R-O3w>59I#sfEX%S%nA4ZW>1#M~yGsrRaA zE^$mQbdy4Nc#%py?RxmgK-^4BhLdZ5q!*K)WU;j>J6wCJC4tDH!itJ6D1N5iq4-J9 zvQ{|b9q1!cWMlqa`t!8v+TrRfmE)#4 z+{2ZC0c6@jhQxIkx|FnVrUwAF*I|ax6?@}BY?kmH)=O1>tfjR$DW!hJ3209i9-Lco zOC5cxn|Lh9J z5#G)reBIbYnNVdU2`d|Oq{~Y*tRmTByE&Cio$9Tun#FjVbMl+S3!a*0bE_Is7m1@z zrj9vpJuIw*58(#Hh&gy#{KPD*)QPQ9`T<}wI_Uv|CF+WPJ*^vNtc z0G^Vt=mj&UAIScK5(&1)j-N9bE#fpObG8zwRr59|p(=}s$JK(YKedfPN@^a|Z|%jKnmq)C!bG8(Kp_;ZK+@7J*n;st$T=7O>+M5(hP z(N3m}*V05Kb@1AEeBrN<2&_aJIa(<=B_1gb(N1s8Yx8T$Sj2O!T0Z1jJ>9w>SoZ_m z6`zxW+e`VZub`=Yd#cE9{Qv~Z%oaXIlQe19zGU67MT%9owOSnKfUZ#O2$j;Mz1n`N z{p9`+Wy$jHRr&WPC588Og&)g-TbB0|EVq&RaoE*+P5G8a4vK~a!$*J^oUMrWxiK~eQQwX+1nLbsbrIdFdSU^R-=g6 z#XTW)dz=#>K^A@{&*?LU(WWBJjC%te{jVRYIs{S(DHWaw$8g}*P1}hy>slLoLg!;$ zRt(Fp_)I7I{4F#^$amCQJ2rZcu9AJx6`qx`&-qWC=_@L6J&HB#OOw9s-Ph{KlnaGl zku>G^k(H>Op*tSvh^xfrK`3Cobw(lwrcps+t$b$^8c}|0dZ$A-cxA=3iA*-7g{cAI zn0-eZzm*`;(Czx=_Po?=y_1@wY42-B4Dr zAloF)#Xt{V8w}z?OPrXYegK56JyAt=7;S8F+8e#aJ|~yl0U`CLB-$2OZ_Pd&WY6n? zoY&wbG|iLx^7QeZC)75UxU0v0mj@8D>E->q<$R21MZ!^w$b9`nL=C+fxh(7D)4@p zCViED$KCx}oJCcO75gjNsU-*2_c$NU!o0d4micRu+P;>Y%yl@o(U!35 zDCO>r`6a1yt~K{}8h4M9ArDz5MTi%O`l^W_AjNkiT)P=x44WM6r(_sS*6Bhjs&+3M zTRTlQlJ4croB3@fCA6_8I>{|Un{ z$d2N%E?B~!JV$nLRbDNskg1`nHMCHcW-=F!V?PgGywv&?_H+b^cFMUrm2CE5Y1@kmrmb)@-FvU7@eJO7fe)3_j2eN-ovgEW!Tht0Cw!6uk zw;G@?pQY1N!t;YFM%yoKi5^%k$omnkN?#+XTjCX`^`k39=^RCii6%LPJg~Q7 z5ZHAVAqdhqyM-}PijMHfNcn+!ptWNmE^S8` zak**;2BV608h49gQd2_>$fHK@`8-UQP{yhFmO}c>=hhh|x)=MR{U)2(({5Z*YAxzT z6yWbio44SuYmtc_XtXU+v*e_5k2V@b=TlPO)0OX1PO4@n+Q)|I7R|+<+fS%g;L6MP z;R{9ihQZVyZR3Z$X)V|Goa@*&SE94H@{C^z0)(6s06YkO3Cb9H*WA*o=ibxa$UQzB z%L~cMmZ|_VHLxVW{A&%!N&*Q??sN-|`I!PEy%JrOa!TIPOZY&F|KDGB)u&)_8hJg6 z9*OUM7&3*~&r?b|BrQ+Lr=xv4cYQ}y;63j2Zcb@n za7ctHJ-KpK^``%bKWUAL(v19g;hMBra2)bF+Z@jNd({5Sl*JwMG=YYU^V43xEV`qs z<9k)sU)|P7_i`6$RPReVhKW`k(hP0t)jHq^(4=-j7>52Pqo2L9yin74y|eog3K*)w z07HsUm5mPok;Rq*(vef2d`BLfU)!!B-*BuWzg^u8T)m*#XIFrJ0>TeLu$i+WmAQ8> z2^Mzk&y`JuPSc%W)b}iGo+gH>T+2@N=W|d5N>XSej9PmtM>W(iU*rob^H!OCM95OM zCs|ewS4e5_PC`FTxEMT_KVrPHhH2fpDHWEL-h7qW&%Vqa$eVA;1}TF+^wC)j@&ELm zy={+J=6}}UpA5lDjlL}wj@sMMlb$47E@m`yQzTpm5txUBgtk|`^$hB;V9CqK-@rMS zZ7*7@K~kY1k|uGCr#qF2DvxO|f0$6QRttdMGhR|W)^}F)AVjjPw|c$ML(_-5;%p<-}XKs*(E|z z^`>7k+~6$lmJinH2K>mBaBH)Q#h<*rs5>0Jy?;^3JeNaKVw;qFw5zev{{a3ZW4tPU z()lPJl9!G-Ykz)GV9qn`hRzp1Mczm(P<_`=t?t+HNxw|siiLZRKEKok$bPU za?H4p&L5PYPBWXQn0N60MNs_3RM^t~2297>P+R2ckNFKTXhXxCpT=iZA$Q##{ zF_<8rU;eJf<%wVx5E9N(8-d=LQNq|v4EQrUE;&C<9W_XVVT9!BbEL-;G1aRWK#ZX+ zBOmER_baiBVZxfuNqg3@Uy@<;y~{OU9sf_Gb)+%MS?ICRNmGcnMR=Pq$nQWU~y*WxF5u4B6pA4k%x`;UzJ9e0L( zP!StEZtF}bvpUiNs1RLJ;oJ)#wdo71W*F1|NX6b8A%&{c?8d&4p>z?!Gn?QVqwOxs z&qwtrws*g-+@Ss8%Z%6-wkV7`Jp{z#oQLN7(XTiJj%A}4V| zPRb(rwv{uT{FtnHodY^|TEBgmIEqBZ*RRXe zW{c`LGZjYOF-ue*zaNiM^%Y;$exy$s>vd~IN0)GGeSfLydP^&|&X35fBtOWsHgwVx z*zAdd^azeh9inL%Q6fc1WJq%zLX=k2lOw9fwa7IcPQ4?|-CF^wbG3<9)M1F@eo5p| zajSLx;WV$b-=Ax%oU4T|!sT*ZtS?WK$mCt$;b4No((3g6_4rw&G*+#?AEso9%+e?}P6+i@pd=_fn#h!NDgMo%ed z?9s)wwFQ9X^WFBFP-z%ZRCpWp_gcqE0?_G$i=#5TIi5#s*v+`x1!|yP!RmvrQxo=# zcXd|L~Cn%qjA@ji0$_N$_maA&|~Y z?i{n40HaT;_xQCXD*X81+j|m^WR-ysUW2I;?zSMr7Zv0ZR(YcOI%9K|RLvuUXHtB? zsI~?F`6Il;rbKTti%u&QQ3bC7hTZ--yE8wf%O2gP>5WV`wK_@OoUHXn?G--J-pXh0 z6$z?txkrB%CBxloC9@VSybxPXc~iAV&)ic&6{grq_%Tw zXvX28z=YSDU#)U5hYsZo_+^@d0|l`u*7V$+wW!{>%oGtuy| zguvzG&YXQciqNQ(7SLSgklw*G{?{x3O?9fbM%A$E1W^JW9CT)U=W??ip>Nt;@y!W5 zFdWbEaOx(=GBWyAN_ejmmYXE{cE1uJoo?>hg*25{9J*uRPTL#f$!6M&FUTP@xT?&V z@Ep;2q8AmO^uDrr`X*%k=+PHaml%!LMMrvO<++p^h@{o1b~?3}fV$_5yti5MXdr0R z%w1^1Ef23bM+tK=c0(U8RNXY%-r(O~Xe0Z-6z%UnFi<{KlN?^ewWy+|$mM?|uW`FO z?Ncv4pJ(`q_}BxgH#)OkdvcU}!>Bm_u)Kppb7HyNc`Y`lnK-N-jlO&pDtxse=RudSbi_@EmT_E}Xe5>-_bbZsSi@O*r^ao=%)XgwK}o>D)uLmgNTPCuL&6A7p-8J2fFc!VVrH{6y|!=#Pt zC2i2l@dH#>Fh5V)AH|?rZeBu>A*Pt#@tM{4hGd!Blo0vgZM36>6(RIKy_Put)I`e7 zJ&gMwqS|lLry*#wjUHpC!M^aqGbxd`^V3mEt<9vu-|*6w_U$CSxYrZFw2X*1O*>F+ zoo>>w)ZDFGf9Xn6TJ!W(S@X24`Fg&8WidwiaoYWYbJI(^J15fyKWH0IYNvV7yBl*n z@zbfS6<;u;!-Q_E8Yq;~50IK9GTBKc`CUaNlI2IrBXgj~vpb>N$ZAV)KI)I~_d$_& zGuWxO-(#EkDA(#xLyfJwi^&z!K8|yNdiBCn9RCk{Zy8kAmPL&Q0tpZZ?v~*0?!n#N zCCEX8yC=8=cXxMpcXyYAySu!T+qZAueqXx3zpv_b6-Dhj1^ev1)?9OrIp$b%abT37 zn2_3<+ZPO;RJt9oV)S z*mpd#kmoPPlj_7W;$%+S&^tKr{2i2iP+3b~sv*nF#8rAalSmd|Co>tf8~^oejYWPeNj7E15!Yrmj{$ zI4>wdGfV{nCG;yQ)J_6wYl9bG)x(Gun5PUbMWiZ(xbL<5hAwbowCkL)W9==FJ0pegizX)cc z5F^zn;!uDNoq}3O+M`)W>PKKF-rsADv$GQ_k-ceMS@~4UC*^M3w@litrcmhoh(~xE zCCQ&6@?GGSr&zsYzP^&6d||%QE;~C}@s&Lx+$S#d8wQ2^$seV+yQabK{JbCh<3a^5 zKXIe6d3EB;n2;(p_2Ek?a$pb2V++0>&}GXO4YFUr8IckspMfDvLk_IBib>usoM z3=69*3MrWrCc?ST-?AVth>KXS_jzHW6dm*Wk%PMgn?rqu&Udz?Q(sraJ;Wxy(*_;{ z(duvvxnp4UW6c+2;YA~4Y$+}ZDHbAG)u+&+0|0C}&jhg^h-6BdJA3dsLt18)V>gb; zoX#J2nUa{>qR9iD0)>Za1b|UHk3!73s;TFJUwKfP2{NKz?Lo%X1`mrSsBw1Su7rel zd%R?S)5-p14@73On=J>&!Dtg75O-?n2+l`2)Y40CzLjeBID2k)XH7uny*)JZgMqDaAE&OUH>cKJOio40Ns90_l#=+ewfnf-NQU7 z?5p`#3}rXA7O>mZ$))z?wYZl87z#h^*#ZH&KK%uJH9!vL+ zAE9O1$L~ouO7b)yQkmD8c-+pdFz#xRYIA3lrWh8Xv=I%^!LJ53w~@s)^49D!0v?#a*iVn8%~ zNq_Yhi3)do634>;Op;~2v<6+X)QErbmVXhLKlx&zWzPl~D%}8l9h5rd{&-A@J}MIg zak?*3v{{y#Qts_4@Kn>cqA=VXbm_fadurD_ErWS|v%m%|g@_whmwJ-0Mv)Fm7ax`I zLfniTGAgBNh@HU~4Mt+>jFi2YH~{R7Km8m8Z4%_dPeiD~FD$_;em&nxEJ-?pR<%GO z(^?=o&oas1FNH+?bafgr3J>62iL~8d)4@dbl2+MYGRGo}tp@3x46P0I`MaXY96U0< zr8fn^C)~w4uwdhm9{=1P%U~}PAm!=zL%qFvf4QvU_nfn>!2_`C!oDPV(U!hdEXB-wtfDbQHk`ok+W5l)Udn7iYa-#;P zeXUUTgelf8qEKwC`*`()sb2ftm%VR8*8x%alLk$!x)0@gce030)#?Jo?f_bD_ktE~ ztiIM`tSq0W9_@pO6jcs00uowAj|EeB?ErX}caZre58WrOnG2!n7WCCcXFRQ_jJY(> z6@9Ap#?q%hc>l%4{6)%q(trR7Ek0LS~{TuFAmZ4$H*S>ij%8oodS7d;i9g5 zNf1XpbQJ*g#$e^aE>{kp9$DN8A6XK>Ykg8Ax+A!n+^g=6l%2jM3LZ1VIUskaIk@LK zQjcvtyxs^v8Yb^mR6nSKy6_bU8d)T+(IX@nt-PhQn_9;WWP5VJGJT(kUc)JOB?dLY z$>vdCbvCA1AKNVQx3bqig>xHFvS%q_6;GBl7ZI?TFoKgbn}%4wY)RNMp1 zIcm6%LSAizKEM_w6pI|=aC#!$O2BmMjJ=}OhDKSUIdl~DJfyN6utylgEkJ9l=Oykj zyT5YQlgVhRAoe_HKlACnvK_Z7zxWCI0_;b1rjcHER^|SHv*%X6m?hRnH+utIDH)!2 zO^_Y};*3dTbitoc#Bg7|5 zX&W~dA7ClKhj?AHPK*0fR~E$#&S&#$!7@n4rASC;cbmwCxl#h;Xj`#c4f~B5f~bhK zHbSy%ci}3lW3PIRkD#V`RKN%K`#Vs*uq}fSjq+B+ ztsHE{lp=U2Vp?ud#Ky&Z$tJ{IT0ZTEDq9)#K7{|xsn1fOx_~9u`?xI2L%l&dhY9?b zC8QVvz>yy%;7C~1rN+|5h1oM@J&)Aztx-rq0Hzr~1_}A7@YQK(_Rfs&glfl*ALH1$ zR{Z58lmzJfMB;t3NrmU}7c^{Z@(;tTZL2ajmkc?#O@R2jdWm*nnL*`YPXm_T7K=i3 zIufwssP`GCT^4d#n(1^ONY$D2G!~3$Fj~;(>jrYs$5M&&T~GVhajz3#qf>iTtYo^_ z5M3M~2AfGyiQsn1Kl_fiuF}I6mw51Ya(`H@HPbnwlJgUOEp+Wp@T=6sdof}c7F%qs z$*p#O393oD?e+4s>qud4aUH!MA_Qen&B|`PaHwC$Dy?NgJ2thx^uF*!y_4x0PnGHl zFq`OpvrCK+;0q?;z=C8;@UXW;v*|aCX^X;79+A9wVXL8Qs|R%_70n zMUKeT<9>-_y9$&72-Jr61!d_7j%dLv?R$M;#`7jA_2aDWFIB7bUj~)x`m4A%)K!{s zfrqfr#%QMrP)^c#eHBx9P9C>!)Qtu+D?oB=L~Flk!(3tKZ~J1rq0>68Q$8-|xNbdCjA^p8J|rv*J^o9m{7>IYgb?-yvh153%qTaEHe?@qBk=@Ub197y z<|TG3(W6t$#N(QiDfWZME%>N5n{oWT5^-5~yBXdu72@7@L0jGz3GaH&W3P>Uv-0ek(!q*mX`9`Sz-OZs{Yv1YO4Zz5zV@vqS+&G|JN(bzt5EJD3Xvu?9TNBUF*kH6FQPI|7|DGY|s#{FR;Mdb4n>1DSZfT)e3lb-&~fW zk0A4JwXA>QrwcF8cyW2*{P;}fBm}0<)YMPiAyGvSl0hkkNb$vWvs9|gm5_muaS$4L z&|71L^c+^rZ%&vVH#kG+-UNxTUDrY#AL3oVL6aQ26XN9JtQ=B}#i6M&pN_x3zoCLF zK8LDi3g-Id51Pgd<)bINaXh7MM;GdUivtb$>LY^N!)|E^2;D^jtAyn&=OW()aM zRaLNr(cURQiy*@}9-qWOi}m$E|7mkVrtUejG()5F54qvq0lSUOZ>{kkA36(o`KYXZ z8{H>EX|@rk8$7<0KMShd58Dg#iJOxO_vC$N7Ul_rN;914-+`_JEe1d18qCArKH8tp z==IB%ie4wv z)EdZD2jTn+`Z*tdDlz@|P3-+MO=`CQrAF*sSt!jg@o8A@v(URVPB8IGCv>58g=nRY5?z2(qGZ}MI?n?gzjYY@xc9P5dV1MmZ)6WpTbv9aVoD1(I$e+QCkgT214N^Yrec-o?W1%E%!O)rKMh=!&7F@$*-}|M> z^*1Qas|aC25C3?++@xm&l7iK$}o=%Y2vX_4G=hb%|E9DmGg$H=oxYQ ztKhO8$J$TkBgt@cFEGc{G?$9uQ=RExIz{8Hohaj=lb3xa-WuyQaY#xM4kygK&6%z= zPtLPS4A!1vIiOy;#`d>_16EE*C?_WOWWM;86G<&bN3ctd+~ZU-xYQoxT6C2d{rM2ham`Pb<70zt3RM_*tCEDR=g{AHuZeL5f6M<`6FuT73#S5hds$|9xE6`3T zJD^Mz7jCdmRGcFT==P3+sTF=jU8$~NfItMHz8;?TI?;=|u|nx=I$kf|Q|O9HC5;$H z`*S_W#KHV}ej6N|h1%f`8=ZS7c>6&5&9p0ahYZ5?Ty%rM2A%SYJ(pt63i>`d9d#y^ zs*BL^LVaX0EhlGhPZ|cnvx8ER?e6roJMudPes9)Jv6dX}us3tzfEGGl=Z-AV`Ybwde{D?n(u%VJc%^;B zZHwSP_TrPZ>nUU^j0M^ta2OWelFhcMKbWq6J|x?)wVf~5NMRaR;{7g8JG`=|^Z2{c zS!z;ri~^lg2fV(jGZ*7E09duO26rftR`C`b*duGlc z|GK%a+7DyAx=XJ&z-i~T_&DXxn$QD6o>|2Sw|#bR?lp_9jjp!O0#!5dkYb6Vnr^zKAU*7%#ifQOZ0~HWwscXyd`*d+{lASfcy1bZ5#L7EEPaz z)^$L^B9wvuEnnuBt2dRMO529GU&QsX^hS&EI)PMZ#PFR+Qs&Un2^!x^qZB&K9y7@? zo~@xiq8#s_bo`-8^b;+}o~eUy}hs4q1Gm*=A;cS4oz z8>ezaS`VJU^;o&=t#Yzlfldzzz*BHte;TEpKYQlUVj3r?Kq}JoH?t}xi2O1euAnpA zV_@678PjGTVi&y;tbHCE)9rPRJTV_9PLDJ}WR5!qAHVDg!-CO!7GCNmJ~)s-YcJ3& zZ>Kwef{1+tiJp@fKuR0nlqr}u5Kqg^(No77t3KtVohOCQHC0fW@huoCjCS8AtBvg) zJw2wdF#R>Xp)v!-L*dt$qQN2Fpp7a?IqW;|+T> zN)h;;<(33?sZ0b?F%*5iD|vJJB!KP8@c_ArW6J}P2%F_hY&_!u4{|Wnl4(R#;fNjt zE$|ZE*HR`7*|jCYLhZU?vlYb>=ow$Rv4l2IFTx z9-PjvISuq*Ng{909ihD__9c??xZuGMhmBp<3GGt8c|bGOv9nVEnA66mOkJ#Itrp-s z+uEQvh3+I4nz3wkG=h#j;%4jNKcoWo4}~gvR5FE5uMZc&1Gl@aT;~i9BrYq6$7_A7 zEpW@**-FuV%wgX$7X{1H8(ZtHG&xynpQ!mxVvp1ZNuQC(@JSy-ZHM3Bo${q<$xV1aU;lX%hj(-b-7d$Ybjt< zHjikWNhCHnBGlfPUQ@gr(><37QvMWS!$>SoqumC;sf)U+%G#0H7F}b~nKXMyixG|| zNBrqvQT4ToA+DQbAbz}wM?Q9d$K~}#1PzO@$FJD1Q+Rp>O8wQ_gXI zB^7BVITBE3J$vs&@H&=X#td?3VI+_P8?W!*FN#G+IN+=_e6%H{g%$1>b*bIeaql45 zn~!4iQ5zUD2&MVjo#B7vK64;U443XsC`gd_y)a97<*ouKYQzfEChyWS5yf(D9>DQFO zUK5NkOLc#0kx}u+KJogT7(tkWTU1wplP{#MVQ|sz1Jb6T-v`mw21JV%tH>l36Ravcj@XwJbRAY#u!Jg9@Q)R$-@ihj%yxz?Drs% zkF?!h%a{x5S6)P_zK`_FfZHR`g#z}sk@W?KT}+=oi?8MVax`D4gU0& zGR+0t*1n=$gFaUim8tEVgQ>oNr?jQCkv257B1%?2R-IBY<+u`m)@02?_4q+bu+FbH z0JjFdt05Z=hpGVbnk@Y%e|Uz_IzKKlGggL3ibUw6mDmWzlixl5<0P;kGt9fKZm7F( zF2g#7!(_airf}wGb%1xo6X^o5uui!f&Gm~c;!l1XC@s=xou}9(_d+?v8W%<{q&0a!xV}BZi+}E+R@urA+X)ui`3Pw zyZJ$H&m;XTaoX}g0XtjlW*U~&4hcuyq_Jrxnah;T|Q8t>tQ&hG# z^iHKkEB7))kPqh9Q21O{q+%{!VUm(F`i}Y3)HV=RQ=5*Yhof{z$ZOnOKfZ8|7z%2>|ygBV1saoSTDqLe( z+-Kn{zD;6rxG`vRotlr7tmf^0zUoAg_vOK@Ls%*;B8I6qo!Ug;@_QT^Px`5$E+w^r zTrT5l0wx+y%++J*;rvkA^u%(zFcKzNi;$zq?_Qq5J1R*vLp~oH#abx4>0v8@M|#dUE~M-> z7?aK33E#o91#60D6|KKAPVK9fk>>R(Y6Y=E2W56SrutWT_q>Omu%tFgDR_*vd~Bl| zz-Ak2hp~yqbZ@QC97*6=Vy$*r?EDe5q|+8hjoADp&O-6s8^Z&G&iRjb7Y)1at|0^E z@+^j?3>1Df?XHXjX(HqBMgMPV><_vIg$7)6&fc$${BIW1O33IC^zEurg^hpaqbl7X zPv!nJI^)@Vvc<#q;~$X-R9u*ix&~=9=LjUE;XUuPt|-DmJ{yeLAoP)N2Ed3hW*6BK zBPk-={zg%c*gflLh^ecNR-tEB^-TV~g2eF!gtBx(__4mmem9Azhb*$HJbl}%EY;<) z!t8MZxruRAocu%XE&Vl6fQ-gQ=Tf^x?llM_`9VLnd?7O&Sw{yViIg9T1z;ROQ==~) zBN&3T67$)?q{*$Pf0??k19gu_zd+@xa30oH``{?F_}7ZF5;Cv21*%bDvGtR-D)!B@ z`^rgG%q+i0$&hb~!|nJ$DkO=!x5Nvl^mSNomOkh7IM`?->#eaIS1l69_32UY&!N`! zA5n+r^X>Pm0Ro5#MnU~wDQ#T3tr~;H-`yq;1&$QLCtf3Fl+b+gIW$=iux$FMn(B9W z+SN>|@lfi9zi{19f&L#{(;EdWjx9_OYD5s3kaR=2YU}J0b(D4mI@x03c3_l38FAW{p==w{hU3`Tjd1JWt#<4m% ze2D!w)OX=d_~fnNN@txt_;ihP(O%!i?_N4E#z)EGbBdFFIzD)u)zZu%U6QQii4^5f zVFh;(814JBoG4cGec~s-5zmoE8kvy`-jSc;Hr+C`~j$TH;&AF4e@qIuSTf3&#neu?kOLUj%Ve%o6K!v!1x2G!GK#^`A|MGcaUEi zW_KpUGQ*zx? z(uq!q$Z78jE<%2vZrc5FKE1xbn0}AU7@E)R=vfjt&gTd_JoIRk&zlPU08j({cxw}? zPKE3~Zy{tsH<%#qo8!Xugn};FJjt_C=M7$`J{KdjWFFda6tAYS_k3vmdDveDowj2?`gLdFKdTv||Ez6$2J3-%&2L+-(cW#0R@Xi{mx z#IEjuM{ri6#R56Ain_N*sMgXAp~n(NdW)0PXdP4_d}-HO#<;j*DOt8>k@Wd^NB^Wx znyn`oaIGvH_Qx*Nge3{4Z zp0XH6n{Q=?7s)dv*%wsIBdv7k_eEBaX`*$G{4V2iPBniNF!DtA^D+iRn#}BcwmVVe zMNrzc$jT;W3iNs5sTt$8;tk$a21K|keJ!ikd(gH%cveV?)n7EDEMMU=m&8%8iH53^ zmXZX%(=cAxk#zqC9-QieCgE0qCMR$+>)P8?r(NrY#}s4yTOyJu9eKU6lo1aCR6#vM zF0q3KYsJvo6t%|!oin>|`!-ChT4i5i(t1Z9r?h#8Ly>A%iX~rp59#qIRn6^yCiY6{C%bX+J)zHLSZT>wov;-+6j=*v=Vg*tdOk zyA%6e{=BaJ2Uf89J5~rJWQY4J;rl->y!i!w0n%^GpnA|U*f8e~CzvH~_h~#&_W3m5HqvFf>{40&B zPxOgnd=jM8;s=q0acq%O|7v#ge`K^|Odx9P5LN}megBosB8k{w{oXkKc&Q(Lzp{=E z(|@-RY2V-b!e8%_K92Cqs5GVi%^?l|ehc0H@gWMIfwm=3A=+VySCqH`YQb2;^iCY`Xs)n*4;jOv#)Y{;$#e4?Wpm zkO2gi|L?X9f+T;7|2}aNIRCde`wL(EE13W9MgA+k`rnNF6I%ZN+O$UPx4P%nIyb|4 zdumsBH{FDVQ>C+=Bw-J>^MeQ`Mx4J8JILK;v7Fp3KBtAg0l2hOz`&O46#w6W49Z*cc*eFr6N=xgS7q zk%fJ~t90gd0>g{YgekW*!GiwD_kvs>?;R3?(cQh%Mqd5zu~$>5-4-u^`lOH?5S0N+ z{o>^RnP-9KJC-nb4sgV*I+gyU9_O$bek7k~K=$}{PE!f>IEl5VP{RsQX zq4{!~GC$3@aSG6UJMHdt14Ldu$A;};wQ}H3g)0j^SO_-*kDvW#FMx>w8Bl9YOEy=$ zk%zFpwIt#Pr>!{=u=PkCh2474EWZQC^ylenzSppRM{hCt-x zeag45Pk({_**gEHDs)aKlp5wEPe*$N{s!ykZ`&jAy31=;LyTyBt(qauX}E}-ppMjD z@4w}|TIt$!_*^0TpL_C}y@pZ+V(1|&WN?*I;m3U-tdRJ(=^lql?pq}1%^0yf0Y@Tw2jHAeGDgJ)OL!(VgJ?A4{JuP(I47F858}K@h{-j7z$JvmJ z^_W~UD(V$aO&CfG0jG(`Puc^sX4@V87+7cV+S9f}-7y>Gz#S)8)uax58Fk!{$F86@ zm_y>?IJX6;!rsDT{b2EPJV2!S>3CAe^oj6BJtXPQgyhtu>0C|tUx4XH3?GNGq-aNf zS39c1>rJ$d8D$U3ml|>U`YHgIWr1zMC11|7PngWJA1cofe#0fhu%!5S*{B7#mko%A zln6^02z1KU20Sl|@&2m%Y5W2ye-ws67u)@O-{X4BrM_SI8v^n?KW$Xn#J?;@?K$0Q zA-zFEsb`XtXqjzfhy%>&SgrV_%J7Ec-@a~zeww0q_i@Up_2Mb~jDyx^uH;?E;b~b( z7joS~pX#w25b?==(vj!z)h8w?O<+4Yo9`-MewvJo1#FxCwo1lY!tIUbNm>A0co1CL zy>KDu2AucQPN)zx3nZT4AKry@_f^Ih;>NAsuu2}Qjk)SwUgmkWeK+QfSVOBO%>CFa zW&5yc_O8|40_mlN6_q}2&3pIV3ZXSS zTaR(MIJPNUOp|zMGtJ({L1~M$cGerOMeHpOhc8OK`em$yo<_Vr+9CPTTtFMrvjS%@ z;jT9dj*e68q`EN6`azCT%k&z!~}>~UBGDU*1mqXVf@Mq%TDHW|K^-f<<~|u)Z(LP zh$GOB-V2+AnoF%`qWcG$n{*h_0=$i!KotT}%t?#~!e+PIgV|#5*Wu*vMWxOB=v7ZU zk3VRn#u@Mzxow>f5*YV5&H!!?KWf>?fupyv3&SC&7sP{n7SrORniKTYWywm4e zdYfheNIimfSRB#IwO15b4`Z@9M%fD0k`rjYVJTyS5^@t*c}ZB{Dnw#F*x3Z28{m@ZD5#A5X;C?>g-yInfY) z2>lrx|DIHL+P%V%S*g|mRqiY@!tGRZPq3-zs z%pG49kkXS0wvl={tLdV`q4R5V7PN;wazrcLy&v~?l?j*QA%PPV=YDs2>1ZN;mm0&4 zTk%inPUkHy4!9|4i(wU0Yn7+&Bl1J_ASFeD*Fz~KTeIVP7K_ozGL@pZSOS+3hXC#S z{uh(?7h9YjS)51{3=h3xL)+v|$8OdPXvW(bU9tG!f>}E9_;%*Fp`BI!<+hIwZr{d( z%%=*42#GC7HnHoIAX<&W49=r2%N1N%C1k{l0U{3HQS)*A*)eTw=yH%(T{z?ZUVYf(s&W9!7gBl82 zIVo)f55bW_qJ(WE1w zS8)#S7@AZFoa6Pt39sJA_gcJl9Fzu_Kua}J67VEMXz>OV`j4%u7l=3j@~Ae%uji|V zIcl|M9F!M&n%$OTxWHXM;xI@8T;jsXDlS9;s%I;o&7D0dd4@Owd={By-_9UTv4G*8 zHsfk7Iek!sB)#3NdS1iyP%iVA4y@}^;qUT!Yn3@AP$0~GpLDZ|E^2CozsR|UJMVqV z`-Gr_H!K9VkI$Sr(JJ6-_c*X(vNG&yQ3^!bb~xiCBx)6Ys|I`RP!}F%JA&N(nq+-; zSu%gKkC)s()H71I>_0J}^)+NL))6fYWN8{aW1T;^Fx$53P3M_Do~JaXKc-ZSHmX;p zLzeOHYaCgwc$Wzcj~~sNjN{-UH>UTkbPgLUCNGuA+E{Cpw7d5B(|JyVKgL<4yazSN zS(UQzzjPRE6dVe%54?i}+Hbm08AhOO6{@x9rP0dXc_ zk+sPNO%Z#Jfwc(NN9l<2}|uHwg}r&H<`|4UhM6;Ye9qkAjEcGW}c(P&D2(dyBOWh z%2tcVW9pAC^V!vyMS5pzAh}+)#euu}$=XqO>C{gGlf{cO#3xW4u3 zkzDyCPc{5;;2VW%hlD5mJ^!jeP_^rHeca+h%j&kso2f04H)~k#(RYMizDQ!6b3xfL zWMw}t@Jw5G<<1x|E@=OiOMXxFB5;~1@wUBG&&+L;4omB%-O=uTcj@-pbO4kY2z8&M z8SUDjwJpEhQo4uLfMnKEnqaD>+0=LEN?}j(ol*&^IFvD_(@irfg_-4DKnRp;H&hgg z!dN(8rrFUyZ4G84zM++PPOL;QFPe8TK{d#xxGMfw;Q7$(mKtjpD5by513~J9x_NbC zJ8%00Lo?KOmv{j=bE?|RLGub;x}o$EA~5k5vP@Ps8x9F7lFtyWd9rG){i4OQwuc5T zf#i$}mluT)=uvw5KJeU6X-Wa>;b24(aM$}`Oz-S1?~GYKqNXx((NU7s*m`?JR=rD& zDh~ov)N5iK+p3o9OX%y}y+OT=5>L4aqV`<1&MI47=aFnK6IJ((VOz&u+{?+4Aol@1 z*lF7tnSQkYkgGDuP@|^nh`M$E9n)J}cdG9;bOtz)^3+Q_v!{cc-&OLsym#vWhlzKq zjl4A&!NMyg0n$I5Y8+X)JQs`CzJl{CUF4J3J12LoV9Xo9&lcPOvtN>w-NoM4A2Sn$ zYXzLTInw4h61oFF?0EzeG(&!NLQP)w-tQ9D%i7M1dv2-_J-SgoR=4LL?P1V7*Gb7# zhBaztgS5CZlaV)l7MC|o5I95Ws@?=`Lh5`nT*4(EStbnY9S14Q4_V;B3ixJooflP; zdS{%8ltYl@`fM*XNb${5lS8B~ytxW(@wsH91DVyt7t14Oq2!NT8~`|Tk?$t~H*WzY zlGs2hOoOoSccr~4^sc>hObRDTMd}g7?$vf*-Z`XJiBGGbKkzW137mw^e7Jle?%X5@ zu2vsgb_^+kS}>c8XjFQg-BMbZ;de{iTp}$Sbn)&~%p62}+O?KUu9@g_VooCs8l&eK z!I>||v1K*d-wbyA;etUi@uj6X)A#!+sSRAol`UL@KAq)(pq=FgN~U<%?7oDq!+9XQ z!7^~Ag}~kpd)L9BXW7pDl0I#OgEvHsdsKhYP2kf-*sQh7#$mqXZDi%B!rBI*+-{eW zs_QU9$~2EAH4^<}-GIpsg4zt$>f_Rmba#-iUrG9;Ss}Un3{yr>chDM7JvH0H8dC^Pzt+4Li+tGl2B*?N&1t;Uw;%!G+F)0(* zPjipD4I-}CxA|Ib=2h7~D<*jF(4DV4iYpwV&DTI3kla}XqI1^`6}CVAZB@qekweh# z-`niyF*&u2Uu9r@qy>bM;H9P5wz36nP|^Uo@l#2d3d7(OG%^C8TVx;72+RTiv;_&phyQSj$d3OA9Ij7(Lkx6RzTDDu_BLNiWHSTRaxf?x|cK ziUOSQpLb$^<~i71A$Eg-wtjw6@}fX#rf4S`|1l_#&H$sWjMb$S*?T50m&(1MjwL{y zvOy zHjCpH`n1Kb@@5WA?Q!U?c}K5K0mh*O>&U${a2~V?uO}y9ommyV|6^&AU-p{C#mRUP z5f%XZHHb(enc~YC1zRI@N@T9H+BwZxd65zsI?XD#%2uR>ZQo#*MORn;_pemW9CBjs zNw#?=aq(1}-c+$9O+V_d)=68rGxxwr9f{RQNIDylB$jus$C0V)m7nbk* z>9&B)*|3rk(cYx)$qYeTU^l_GM`X2x*l4;By^*~b!I&RNGeaM?f#xLg@(ei7CJ}@} zNMAB7x$zSG5N}!R1>YC!9=reWoTZ$i<5fVs-_mOvQX;B7Uc7kX1-sP2BR4yNtRj~r z?i^7fLNowNw8x`E1mXqQPqUR9kr-KA(`I7F5R04C79;0r!mxwdD$d79XUNd8_xY$} z#3lNTD${9^FErwg^u4#u)2kuhtRLdOQNI#=2mzCxI84^8iV8KG1)VX`H3_4gQ6J~V zk%`7~5{PR1E*b$~%>0x2geYZ$v$0;$#x8D#;;Q}5O%KiFBRytnR58cG;Mv6I+w-DFyLc|@ZhJqpSeMbXQFZ@HRmoZ zPGIbi9GHSaO`dPk7cW|kq92hC;1QdPtNzZBwFmRb zUw2S_q58QV6XlE{Wnn#59{p*pqY_64+>pCUEpmZ$(J9R6+L2I)+-XI0(eM=1$9rL+ zf*3kFkMJv@lISH*^rtVq!T4&fM za@vG^T+TI%w?xsqlHw6kLp$0Be;@)F$(c7P z!|zWcTubI(etvu1PiFu}TT+rdE*aP&Te1P-8j%{q)b;F(+GfC&N{fMx3a+!mLY+faz?b1?6l;oe zG-YLZ(t~7Kh#k zqc_OqmhPSru@^Z#t(RYx$5aeS-IdpxvKWLoHMg`WC9**qt)ETmfdlxi86~PJd4A0cujT96c zdc&>^6QTgJ0mWMS%>gFt!+D-q>!Wo%zG8sp^NQ$;qGTWe-$_3@YRVy6Ob_;n@)wb~?@3 zg-m5Xh;69d%-FUiUmd{ANF=*RE+ukiE&IoiuBPpTc0X?TUgfA72(Jo?5v?OJB9&8_ zGmnj#tNDmosmocbUyVXtovwm`I80!~v6cQTHs^7}2^Ieq8zMpINJAK|pcgii+yBGf zTZP5BWDVPdK(GM8B|*}-LvV-S9^9MYjR%Lu0>K@EyF+ky2~Kc#Z`|GG>+G3%_n!IR znd`s4gYRh0o<8WVr@CsbTD9a}ufe;E=M~ROsvpwhbhG{HT=bL9E-|y*elhE!i)O9>&>5Ym( zK?7rt<{r|(q}K&FH#Gj(lzh5-Lqdp)5=xTgb+w1Ic(3rdhAEIV|4!Ff^KKy8r`$xB zEIW)=-V8l!Isz2v#=9Jvq#6=cb9GhAl2;v|=yWqGAB4^YVI&ydvh29!NwOi{Q)N!-PcIb&@-_g>GFAdow5 z#UoPKYHWxxpr2kzG%?4NHIjpQ`0)H!o8jssq`a4QmrVrlgLu*}_cL6XkBCjS^0TIz zMNs73B++{cg9#ywWXpxKa`!8}Cm-+1<+vK6l`Bk7o@s_YM|UTp53hRqca@NVw%;m_ zHiJMbqc(hkQnQP=3i%`(;QW^F>mpl_0}q#m9i3*+(lX0nTM++A z)E%B4@Jn;RD3mvhr6=T>Chj>-7b86{D+8OhQuJ53+ z%t^%a_od_o{lN*TBM_hs!oeHsU`|G!n)KvR*xjkY-^oz9%MNWH^Du)t$Ms}NHdd#+ z3(us!FCdwow1I&X6CqbcX*k5viN5Vl+>wMg0|_;P`7u$8E>1nh3_dgn${{co9}jei zW?T;B5N%o8h_@R9ul^o-kurMX8Ndwu~#Qoc^DUVs9Ef3M_GJXfDoo2 z(zXy7m3dtCmj#XN(gluAOeW}Sf&36lkP zI(@5Zie{?tD;mb&8BX;W7J>9s1a#4rP!0F!O}dU3i*Z-PCw~BfNTz zj-0~In$o8LVHBWwa9;*}73QDTu!<9GE?%?UhovY5h}$E~kC%jdZ>rP|B)aSBv=sYE5r3ArJ6Gh)}WEEE#JOyxo~>Lb{`xtWenH z4G7PkM)7xXdQQEp@YFE8Zx&oA46MBf+KT2GtJZW!JMT$rmtFEf*(Ns_tf);)(Q8b^Mcs0#*Oir2pPCJ!iCKbby&fR&69eQB8mE}I?7=u(}EMCo`nPKp?C$A zDPBow5vwX+N-I6l;$DQS&dL?Jsqkh%^YT(7DtcHd;8GNZRjk|`5c-%SlEP?yxY&Yl z3x5VT4Jy|BX%7+CKP&UU$8H{fzQ7B;KfXPSRK*$!MP~(T-(p3__ycCiou!;4nAu&W zr`rkFaNP!nuZm^5vm)#i%bXZT-I-02JLH(5!B^eYZ?p9lue6eBFi=FX_cq@6#py3` zDTAtC(ds|!dvc;`pIM8yH0vzhtzw5ZmNkg~Or&M`?5oZ;US}=r45`Yr(yWSlzl)n* zMOLzYvPVU*V_u{yKDAi3+{5a%NDFBrz#AoOOJ(oS7@+^UT4TETMRR2^SpIzC+1X3t zIt_BLQ<=lPA9+=!5krD!yrW-5r>KBsGLOOOzi=2y`_cFO#kp;e^GO5-RT__rSwIAMzUN;w5Ha8Veh7=|hmS29Hf z9JB&-&t(5{6+sh9R6#fVZ6&O=N<2-qIwZJdUcE&!I$+4rzsILsAe*>>x^y|C*|Xoo90>B z>(QOwC5)B%H>^unOXoil1WDZ_&y4{yhC&V4etPT=>M(VmA2YzN5dpG|K_$nQ!(^^h$8j0IwTBYV8V_Cn_EQ9ZPUHD>7 zA>Cn&d8>}uLGdMm$UtLAj~e+WO4v0UH+8>-(laH(4AkF1?>g>m9G*ROS23zIEu3k8s zp6d#S!(zHl9?#^s+-cxx4?}9w z*5K6#f4VVU3Jpfh6WXqx3e4{6AvW?ciKb*!iwH5i+UA9s6@$>#AB&nOy1Wn5Q?(^ybr9 zL_EFDLcP4}W0f^IM(1W+pY)OF5AuRZ_X0eYe~56oKVaJp>;BA5;oI2!W$^@Wbhx+; z0Y}~I&x;n3%{Ja;OI-eIsPDc5){ z72T(*8>IRgrT$<@duZ_z5GmOOY!==6L4*BL?BX3pTi*Q}!C2yE=c)a{8AnhZ;yUmQ zBj^XGdKN4{Jk>(;cDNl;$B2#_$EjQK?AgHz12H!|O&x84g3qr+uj*kR9Np5 zdh=d#4#y&+g*~~G^q0vAB?{o+Kri~ygF;oBoz?@gn6G;K(F}Qqd8SppVK5@_7ZeOt zO5Cv_|FUB2pgs6=C69x+@C#_oB;BnzG`&afeZ#^RXJ2h9O$5u>8nlE>`SWiVlJ-1mxn` z)59f=N1zJZ4XUt{U%9#B8tv#hEX=kb5zqhGmNx7W}YcXd^BnhFzsetJ22 zyzDfrEZz1ryK%S_N^{Pcv^$$JRMUHLiWG$Z;X%JTW+!Kr!%ytnk)(p#EZO`}W!B{P zm;zQ{N)Sh07La^DfB_ikO)t0jy4rPku6s#?g`a5Qq1s|P>*Xs{dGT#7c6P(vxx$W* zq}P-7(}WiLbHj7(Fl!C@I+I+)4R1M^(8NQh zA3I9k@G1(d3)W)|p51m`nZ^BUn2dZiSqN(O)iJ}*TDagyHg-TuKR&MTyT*Y_JvM8# z#XMscacu=!^HYcUe8$S~R{0eSiF=`2FQ`4lv>bHBopW=GHZ9oWhxGDowgqn;XnU;( zl{;tEQG{8~nF_knZ{MRv3Z1}psGi)N&%8ao_i7E6Q9nCq7npZ~p`2DjH3$^WdHl|$T#O6aahw&p1xHi`F z2Ja*<)t}s65qmKIDk|-%@!Te^*C9O>nqgZQ7iDGg@HIeX8o8|D$?Ln*_*LPrC;Y%b z(^0g#L+K!$WTGv-K)UbT-I$${vWq5mauvY zw7TP{gP=mJB|;@WMc)@6Y^Do67<&D*x$nt(sOJJ#qgQekf6Nngk^1uK)=CV3$tl-0 zDlD(hS&tTrF!%)EjX{_#<&r|@{N)VhdQp<&8q$7%m!FYK8A*NkDDd@KTcW~M7u{Q? zJo}<7NRmODO*kXsKAsUM^zhP{_>x1KU$St(IC@V> zoANZK*p_`s3nE=cJ>$&4R;c~=BPx{yLbBkrN}{BoFat`c zc)lVMKovPMHz%>+&DAeLMtQTcp8V>Sy;SQ1KeH16Q@ef@S3;*!d>XdwvWY%_lKp3M zH%d`lZ_$Sp&L`m6Nw;l*Xw0um63}9}Nh?|m3m7KqMF*gqhR#j&?W z|AS@+Ewiadicvd>zGICrtS7iN%BQ$^)9l}?a+x?jy2rV_&hF6C_TGO568@4->ZJen z=1tPmuUlP~OlzF#LW<|979`Y-`^P~@PgPkYBCxT_kL#h^`DO>gXBoM}qW5uOT#uvu zq>I`D#Q6{ly%S3S^U!EqVv*jyjS`we6pdcZ#ejB`rs93TRNeIQy1UbDCLjCg*L~#m zrYM+{t$2x)Z)#0ZrA#YLVSIptgrcT!xm`y_8ZTjRe!i8#g$5TIu*b?7EHTvKJ*8};% zBN(BU1t<-j3!Qa1pK5z}Fg8i9&_vcWA(s&vPZ<-H5H=1I!nP0C6EVN;x!246NrMi0 z?7y)eI+gj@lWfBw#*OZ&<{LNN5i(R73$oeioZtPbeQS#}YXvXxgYWYdjeOvKIGd$+ zmSBuyxypMn4;ar8P!8JR>QJYRiu9W&SM77ftXV~mD)U0an1cK%%u9zB?J+1ct3eQD zVcNP2VMXhBQhB~6A`I|yTgWX9Po9C2+H(%#9=S6pb%_6WmDvJ};HK_#@}>`kV{UoHcSmG7<>ReMdvJY1*8!l%H?>waapN_jxj7i0-O_3=in z${s^?V7c2O$R-e@b|}>c2O$gzv~?_?y6`K4aJrQ`D!`7RDXYnF=gs*%%4nOm%NdhP zNo>v7GIze?hF2=n6sqG&x`$)wot)NJVcXoB#hg-Os;F=p%w5n*oB4MC(=P@Bjr&+i zS#^;J32{LCr1eV_VDO8hzi#%JXdx&bXG!AIzVSY{sOo{%=quZKXEyd+{l}!fbbA?^ zM{1hf%o}boUk=uJ^Zpi2vfLGPijs_ZIqI1cr9-WI^)I)UjA*8M9sQ238b)3SMZd$Z z9oGOlwS_Y`D9_W=Qp5~eK|pObj*Ao{M5+VNWTl}c-I?pQEcAqf0gO4^uA|H>EkXk> zT;m1I=Bg49JtKq&qkuqLUu*1Hlj>Y=IhjL&txjJ#}$tDiX@ z2cLv8i+s=Tx>RwFS4-MK2VbzN;_rQx%~|V9+m#R>7lhB`czsJ&s&0MSHAmIr`#D&(eA* zGE$FVKEsMcFcF69R!HAOpW-RKnnFSl`UMkl4KpJAMC=F<|a)-@u$X_48;9I5fxr0Cd}pX`LIA9h@7Vj{VTr!P8%#>!2lc1a54%Y<08_4m7OFA;w6S~BrC{`rTTV)Q zDPJ+Z?Gc)Q=&vu|owuT`W(x}Up9#CEcK^5$z>50NCt-BaKwm7s5`I1vGFqtp;VyCP z>(fDzJz;Dl{k3M+e6HJbmJYhzMxibWGSd)`Ow1Ow;32yV@%|BNVk3-)_S>1J zJ7R(dc8F7i+n=_%5bP6T%*PX+X9lIb)Ly{E6=#(E-5APHd|4g)H32Dc`fA||InwkV$Su* zhCHIbTsHDK?}@DIwPh-}qBhkrh44kwKX$n-Nipvi5BH;B(eok4K z6oiq$Xc^659e!siJ`^c{R8FA_*3em3q>Yc-DMT!eKdA_}6o7Y@cYC$!-}0!HKAqVe*AuQSbo%vH(FeAI(xUC%g4vzNXhXq(krfAF`EI@efPLic4m=C zh}JH%e^=y1@>78rEV?k|$70xfqn*$q!cTwxWI7OiIxFKAd*yMu=QfgM!Frpa7#VVC zp~M3A&ef8t@n#~upGyNje)XA|!b(`)HPtZHI(j7@;)U=Z>`7>b0x zoqzJ<%}kyC|4BrMCJuW;w^A6R=;F;abnP}_K-!cu<4+}+b!%+Bdfgb?-Ozr zu$WWKsANa%sw}QBd|s279%H%;=7vz`qiIt8Iz)rtERBZ4we%b(#VfeT(}~}gkYpc$ zyJz+zTpB?dO*i9es@X*Om3DSW<{4>I{1vHoko>ozmr8oXVJ5^tqv3RVANAH4Frt4U z{tnrC?;{M!4xbBQ&m~8q))h|s01l_nxE$dz$+X!@8M1PTPZ5y`(>sft^lHLDy>lvh z&TZLL1$N9nzf9$Ag4e}P08`^uE>xM4n!y+gX^+|C1$D8LO0KxXlAM5K4g!n8hTvmv zOd2(~=*^-~7S*1F5|UoY{vI0fesP*~#!ZPqaLa}kvAOPmiRLsND~sU7=h81lv1;kwrDlCM5NB7gU#}TmlbkQZz`KxmME2L$an6h<&LiSx`i>n7XOj1(Wc^S zy=RN7hB8wx44}|h`z1b$a}jrn7Cwt38aTlxid|e)$a>D6-UOWmaPu&)UPi|9Vdoe3 z&Ww~U6z$MBkr2q@ahvumij4;Wm3F9yGhPFJ^=NU+t2-^ zv3W+>-ubfg(E>4v(BykNa^wxAq&Jh)?)58 z$`N_)!_0fBSYmfgb~XH{a}nj&jdlp6I&cWw_nJVyrW=qs7@CccPsdLT#0?K@-{Flh zb?yc*hv9fj)d5l6;;O~z?gP}6sJHRt_zlbI99w*5jDw6UK8sgUR2PLP^tyQ}tHLqe zo^GwJ^mar)y@ugDVuH$d?;RKnm2duw**G|9Dbhu#eAh#ZQ`V2BfKfo);4cb>pfC}) zlHV6?fEG8PS#*|=yFd`==@gur_NmAsm(<;U0^w$JgFq|lec&&d!@X$60(+{-f}@VB z=b?`0P2jkKMU?LRm7c=u&cTXo%ePvI#x~vasQ9dOa(ENPck`}8Ws#yTX0^?}1SOwp zDbGHiRNw3WP(&PzbF6)pv~LNI-7IhY@!@fz*d&2r-x*a!;Ik%`ouHQ#Tt3cp=HvV) zfp+(NMExElb_N3w9FrtlXXSDWQC9L;{MF}4w+FN}#>iEQ5<%_7w-F52kHB$W+iU6L}>%KG8F$jk}4WQSqMlkan#r_>OmE{*hvc?Q~XbI1~ zV`#$k_NAg+n&{qQ7kj*)VZdEtkiq3tjVb{ws#vttC_3T0{Oaqv{gX^#-q4*Df-v(s zA$Ka)vAlrM7wYuld}+S5aynekm)4f^_^Y2O#`G1W9TJ|3xOR0dBM72>Z`8)> z9q!)LmUz8PMl*+Yx9kos|1kDO?ZS}4D&rkz$bR^H{`t&dbc3-xNB#||>M;=AyyUR+ z5jV`YC)O92y0=^pa{(HSp7}%k0#aG}cW%flU@BrS?7XW)@D3*z5|Nh%dVKg%mt8kw z^<~o&^}chWBau%Scf79F0KdoSo2RQD@q=o7vrAQZ^U2th@g5$BpZp+--8UodEm#)0 zB||geaP+*|syd@OLFQuwhOe9@Jc5vUZbxK`m+B{4k5Xn*LF*GhMR842q+S0&PVjc^@>55I8}wzBA`IBh)kgx^sEHc~iuSK1T!&Qv7JN z;4XCw5;xn!skx49=f@y<8YrjjX;Eq(`B%xoQ1 zJM%IxM_W(jxSV$|pQ>5V&x?=w1hT5-Z3KMPjx~r>+xxv8G1S`#Jtr$L%+(mW(({RW zPR{dA8x8Ka9gPo53;Qq@>{n=hzADmlZ2`8UY`3+4HEXy^(tTW*>KgP^JR>Tml_zTc z8tc`%K^*e%fyb-xc1h!OP5e;lq){=atc(O1IMW+ZB9);62Y_Z$zdB%Q$#h~|oWM6X zEa%|wkcn8w)b9t!ey>}`{KlU=J(m@us)zL>SgebX?~1^3wo!LJLC|~5k3nQ{JeGDS zu2z?>Ct_hKIAW$5nhID8{N3{xTu+`gtE!`=O3xXC-aFk|jJdfgYur~) z!-Yj@wnrntD%6*}i(zV%R?vu>H z;K84!GQHS2tYSjKOgQk%?DH>`o1sK}PYnbG8C~=*xnFTL%X0C5gJ!K63pXY+cE1 zI|B<%7>3(6PJAcMGc{M_(iS~@7ofFp_g@Q5#M#^0iZp8c$1}nZipmWRQzNPAknb!j z!GV=Y^+qMt@c}`BkrC)7ay{moYi+9Cr-TkXA|3fdTM(yDq* z#50poH7LxwtuU{a4UFXTsO^=UY9%=6+b_cIVV(2EzPC^-y6!R2iAC!1y5=>+#T;)b z+6`?a=sECrqpUD2<{G!gi^@v7f*TkoxsS2xtmIB;E5enQ>^W_YO=J`G#yzZS%_F}~ zPDaT)y|{A@k$)mTU6txkybM{zQjjsdtpyFi}2_yUaj0tiCc9pq1*5 z(UOKUHo)M^qsFk-hbQ1U%Fo_UV1~O2XH0t6aUEb2$ zmr}Y>ICpN-qu2I0i;oxLXV+iRWM)pc_!bgH`$;s<8t}oG+Z+}}f-N4zgfY|Q#85#r zh%0%bL^d%gS(*!2M^zVX?%anaD>da{oVsj*P|{kwWA9HN1)aS8&=)HTM5t1K<@bxsBH!1C|m zR%ashao3@74*Eoc(tu$}a~}S%&mn1*PctxOmu)H2up~sYmC_MF9<#?)`EkoU`V_Me zI`Do~rNF2UCwXwMeJEp>b1Q-f0=f_@YnJF?$&Lo@bn_jh0GCM~P*tA8A~i5GfqE9>BrJ&bfBeuKcN-Q!HtLYFvGS_z|4@5DWKui|1Mrcu4mx+6N zMP+MjRR74B7_;L$RQGo}ivcxu>1`11HZ|3TqdQc|9@3!5v-y|{Zw9yi7QQas?XNlF zsk!;$VeO$%2W?m{AFq{e3r2 zD-4&}rOlXM*{CDXNzC+Q(eyNGbsn34&HWW-N#;heWI)Bgl+_E??r+qiOONs7nZ3ok zsLgNV3eVjq{~&kT`+;Y3m?z;RWGnFObZ)NDo(u?0IYjKASwVm=+c%X&A%DZ>Es5D& zEo_lBGti`mN0W!&R)1;gQrMYY0GzsClN$X>tI_>8pOEIq@3!P#MHwT|CSXlo8V8Ju za7=PAbd0V3>N8e9&&!(qzez+aTvn&{j0yfs*bq**m}RmBKlEzG4)2FV|Okx7b2$L$quWN7zqp= zPAqd2;+(np>}}bYF$t8%PUZC@3s8VW(=%!1Q+}aA z06AD#{@A*0)TLqk2poogY@8{5xO)#C89mAcG{jf$mw}f+qQA=AzpVs+*;*AyxfO@g z({#gLC&^06axK!Rz-q=ROX{{ON$q1<#w+U$kJrcjD<>-^@vFP(FPcd7t~>O+B0ymj zU+2j+(RdZst*0Zw)F z$`z}ga^>>ydOkbxPQ(UTL#GzqgLC zpc*MJ+Evt5<8SePsF;7cAH{I++`+)cHhb=q*^Yi1wWk|gUei>u4R2crKH~Q>lwf;Q zn|_q$n0YMZk{`!{HX;j_`B}HBYWtY`zX0o^WTEAME`yBlWhV(#V1oB>0L7Ew$RDTN zp?jy8YsN^r^F^({!YQqT3NAZ$UfsAaS%AiwweaeSC;j6<#$uM5Wm@(YE8t62wNDy> zZeF)=Pn_BVhxwOOOi6(zNY-GoK0;u+g&U6DxlWtKQhxN_C_xGE{bSzmh&7HMvd

ZQQ$QPVs~$+ZBpFMGDgZBfaZagd&!#%2;C5Qnp4 z|E3nF5_h^6GznKfda~_uIoJAP=2&U&OZsn9=c{{hDl@?`$ik_ChwO1v-*dVgK=;?1 z!{48&y`b;ZpKePHhJq51g9Xa5X8yqfs1}A5{*6x{FF;l*f#xduQ)wB+;u_~#-L*nn zKf+P-pCRvDL0ld-H{n6#7WTWUF){vN?`c_@b37y`&FOGL7DvkE`G?Fg*<gUE#)(F^r#1^ zh)JBS*E1V=r z^5;NjM9ut@@9MU<^8xd$iffKB-3I;+I5y7$pP)-5{_(e|*i~qkKR>F4JM@VvuvF-#4xDMKMT`rL5F)6~lfMvzHnh5Zp)4HIgi0LD9LRwCKH@76ssO!pQ#%o-w|4B`qjRYkiir_UPm3iMQEa|iSibya`hWfX%i z&7lJDme1YGCXc1K01xpI@Fzv!pEOUP#&11nXb%13d-=u&5Pvrh{YV+z`5$&i0o(8C z_NqW%FTyn|9s>%dQTLT45BN{8O*1HI06F4E-e1HBF;NojKOri|p^R!AVr6KLT)wR! zpg1CQ?Jzz>z57qJ&D`H_&zA?y82TR-4=@67zy7Bg|I^I8rGGakbdhEMp92fp?5j`I z4*#!FmVbGU_5ZDV5}<^xH5P@74nj{B>hGP$0wsrb7^dwQ`~IHV zd#qpj{~`x}{is6t$HEl;cVYf+N&I(V{$EH;+-gnf+A*1t=zkJ47$i{+Z|{{yEt1u6 zU-l^eQDzb%_%h~np+*wmZ{0BCKBoQeW$wjqLxy@^_ziVe6(R$#l$B3`hd()kNs z`25Aw{nr}XuW*2rKlI<{zZen7Z9&CJ5ANLd9KRIo+6j;zA3pE{I2?Uk=__v^^mNY-R*jN>B}i|;;6fla~{WRZT1@Zq}u+fJoKq$*)MoefiT69q)( z{LD;{<>kJAuis3!iu^LM4kxPAZhCf$$#Zy6U&0Jttnf7r*9j#Le~h_O3lTwjsc7%~ zCR_t-0s~JiQ3zghm?Z}5moaF@h-sFDg&?|L$cVv22fRkrwNL!^Y0yjNqti5MB-P>4 z%Y^$gwUmqM)nI55DmZE_m2CvodvgbGX>oE^9^a)gEQU>2>CXxd*HZ+1#?sPo8!5R z!QNZDFg~goNrBO7s&P|tKkq=Tt@h;`fB2>-p7%ue7L&*>L)$@-OIK-Qk_77#uNn96 z!*#alwitVB?T<2>2%UQl2s%e)TSyl3GlNPdwyk?8;+xH?B|Ch6cpO-)Z}`JY&bjNr zZE48L5$!kUW4~Se^B(Da93Vuz?6wAH(%*Yca(7ourgxpxSeT)zjCa(MFVN@|c;~s; zoCah{>-kEJ_ zrG`nW`v;6p|N6%(e+tQ^`jN5f^x?=%fCxf-93ePggJRm}b`Yb^+wUOjgH){uqx7x2(zr zw}}ig%)4?B!qdj^kFyFv;VNConfTA$^f|H`M-0~Y_%LHwKW;NM8>vz;%Ay=(_80QRTny|{+ z&b>E!SE6L^dRNBPgP3aJLlawIg5-vJ_P$xD(<_-Mh%Tg*VER=oyF@N$H1Rci&fcEf z&6I*5c>Xt9UjQj$mf8nO$L%Ye10n%bvt?-Hp7eU%L8Kj}Zld2uef` z;CB~(wFQA{m){I&Ewf@)1>C(MgAMI&vw1-{mk)bW7D(+WhPfdBHFe!1de1fp!+RzR zUCl+@*{?7ihEZil`8~eZGPHIEte7V-SkP0kbMq39@s`j*AA``5c$G}6>fo5SP%>@Q zl$;XPJYWzny>pzNjHl)g*r4x}0zU=f+2b6JH;?W!xL4WJ2e!6^W5J3VbrSI^QD2_^DS>i!ZV$un*`aU<>ArS47=NYvSkgT_UdNwXF`4r+9KmZ~5| z^8G4gjMp4orrSw!W%T*-scnZXpwx?;{|`^jnHcas-tbH1fiz_rBOqM6ViU8S94$?n5AlFKb$}*jtlG(4J2{sYLp8grOq02gp*#C>3~eTP^AoMs4JS zvJj5Z^at*>Pc^57u_sI4?+dF+$S%M+G@m;;SWWO{=)rF*e7+CRu6R6e0WW}|yubo8 zvg$SnqeC127ww^w)3REF0f}^oDDfBjZ(E>AdfcVsrnfBYXzv>)gm|NWwLz;BzomNx zGH5%m?%RJLie4}EpWRIuh37wNCE{Gc9`$f{{*18vd?MoR%B()$PbGEQ<`HnUbM?Jx ztWopwYnXk6r{8s*XXwaP%H+Z{=uOxbs&@9ol5;kMfs=RMJXCPOqk}PI_Y0L4=S6M0 z8wkOPh`r;no1n{i-^afnVeuOQfHqevYy6vw*Uam#9YgxAw80VEywRPG%5*nL_r=*808x z@bG^Qlm~m?_A-2dfr1}mcMvJ278?!$S}n%G=7$?EE;Jx7s?CKwB8!U`V(KhnZ!E|O zl>LyLz2t?B^KTkwFKQBVq+~8*Ddi69x*q?*P^1$KcgU2t!a($zg|)H?shPQ|ki*`F z3^RBlMe554e_B83K?aXNV5?#O>_+TWiqGO zVc}Ka;}O@^wwP2M^)VCGbF3ek1pB?v|*Vcgo$ z*=Z{u@ecC(!X%ct2sANw$CInfR);=R!ZNZ&G)#YkbP(1b>~1o)x9Zg^QcpOVV7d)Me9 zJ#Pxhx!J~iE}SV1h!Z>tH8H@^ zJT>|d z>l0jABl{O?pv>gO%7u%BNxBv$>G(@D>Dk!@~2<^lA+NON(I4`ER~Q3-NEi<4?J7x1HHIA z_UyteRZITrU=}4Duv);RpvO}%wmO!ZHHi;aGqNrCn)`s;Q_M}&Kk2Ny=l>_oP!lKU z0LchM5dPLXj4th? z#kDEXb%PI<%Vkjd+O6&p;Cgw<<&r7FU`Zsqe2o5r;_uwSoMAI(neF8zjp|eQi`H&! zjgQOkn|U^o_}YdTM!6PXm(sOD#j=mCaOI)2md5S0_6tWh{^Z%-!|&UljivQ8L`H^W z#by+3=C`AoU(eA1TzG0fYNi(@yLm8j*Mau&cC1WaQg=1#FEK9}Osh{HCz*;vBnvye zm;4PQ%*r6jOr#B;xha{h4py09E;C0<=FU`z)>dqC=A`AGa#hVhlf<))|QQgCF0wUPWrkvAP%?^2u_-*O){TGfgRg z`L4eiX(kUyb33U4nw3Hk%WiUlBzH-QocHNE=(e*r@$da}z*Du2sTD?R%x(KY1hKHs z{8i@+gL<_~BZQ)3_&lBfFpb57hUKHyWkp)tN~0a%Snn;1jBnL<0oqHj5#R~Z#K|JF zy@&lsy1UcUuO-4gKKuu@QCjFrD{9XQrECvx(x+9^eTt7`pGYk7T-BWQl%vNLm2^DI zxn63X<&N8j#P7fFJ_ny{KDUY9=olfUr*T|Bl*(1f0>jJRA>XT#Zu@M7SlxIHWI2{JC zV@XSSkijLj!@MD{goOJ#mK`p@rTG2MuIaLT18zLQOjg@uK$?|{>_3b@A=piY3@DX> z%bxq3nXs9$je4rioU1v9J;~*voMAg88^3k^!!oPI4<@9rLc`8UHIRW|XVg=EAE812 zwpLARZQDf<{%eKu*7EK37My&RRTKuy!Cti$KV4-*jQmFzg}H}Z8G2}C>My=v#WKBG zF@f$J_wdr-2v2*NK4o5>JH&P9ed0|dtq=~>qw^Va=i}!4PXzExbYW|>Qid)+Bt2EF ztp5r{M1n#Qp;#o^5kB9-&ZC!3B;8XIK$y}#CiDKv2I~$vi!==W4ESKZIUfrY*g%QA z$5A9;g9c|ULY@su&uKfRRE&W*rr9M*#isK!I%MV+FJw{eSiFZSWNb{y^8(8hTIpj$ zk4zn1&j4~ugLWDkliP_Q&oT_sW*g^t!jUT6q1eTwl4n<#s5B|A)P|3X5yo_I(oq!QI^@c!CEF9yCC3 zC;|j6ED)e@cXxMp_rfhW1b6qs-EU>?bJm>ee)sJ4a_`Ii_FFxT8ly|=t+oDdzb3;O zF544BxH*2%f_|or>CO46A8!^%ABAdIUM9uI_pJiDtliIVM=+Q%pxAn?13uUy$Rl@-l5f-=e~ zasx@Q#Dk^nN1qa_L!cZ&iQv+z@76+(js^WSKt4wSakbsgyAx4d9Piyq&Q4@jmF&dV z3Qg1*RE4QBbAu}rEgP;|H-m{O4i}PyiN=c=HZP_{B~~ z&lr?_IUb(+bCA&gR*J0kVKbzy!EG6A$6B-Z;S{DC;MjmRKh>`0N+`oE6zk7|XYv{H zE>8*u2I&n?FVGe^nq_YP^>;3GZ*V+e8=++D!VEUOA(4SElr;OafNTz(SC_bJ$O8KRA!^%9|&-QYjn;) z;BD#?BlmbKMhn_DZll4JVj@|t{olaDNKT(gJ=PHQX{4V|+p^*Hs4fD2`vVO* zYpW^j^D2!GFOW23*RRucFCH`~Y+fRWqE4|=es4_?HE=@bVj{;{}#WLCG_3Rw*8rFOfHg1vOt(oSAV0VbTSjQ z&Jv7@aKb=*)!n=m>?(`%G}9mh2*`+0?L=ibBspA=nB2}q`$$s=C!(S%*#3Qt3MO@D z6}m?)*-$kaL!8&;jj)s)@zWH3$tK24@In>HTRu6Y|3PA7Je~_Qp{BlFPcMHz!ucRS zwql970kOksxfEn>oTWAWeAC;%^64aCr^T{n>V_6n*GxR7fP2kHzUjxMy<~qyTC!)n zi#6|pV*IoLs09K-1+7bas#y*iM{I)izPEap=J?&3>_|Ey$do++y8AOL5Quksx|yEm zqZB%)ge1#!GH=q@I+=`r|GW$a2bR{>4XM}0CMHvx%whk~L+jC}2n zyK>Fm4dTpBv+v?H;0QQ|GgT34x|Jme#R?||Q{%=>InuHwAY47z!$~`Z(LAuu86%;tcvy5j*>^~PDtVb4a#%ls0WjR}xQ z5Z7s}MGh**FvFZcH=IzZ4rHa=T{A^s$u?`!)-R-Bu@?(v z6VfsSKF3EX*)+oN+8@-`%QpMk#WwtEw9c6c6=N9zdB9TzH87jlt`ErasjV;eKL+4D8FZ5Ba9X;Zt%O5uhZ;X8o zR`u|q&uiU~&Y;JW%ycV~J&C`U{}i=bD03`lAn~I#5BvBTzx>Ue)cJK&@1%cVJc-SN zBH=}unt9;F8OP~2@;Bj!T5-aTGzqJ0v{Tb-tLv1h0JsNQ>KB~;2i{GibOIZHPuc+_(Lsp^^HK3!eWaD27aK|(xv z8P9FI;anB*(_Cg18Lkv=L8?AFy`({;KY+!*dQZ3|LEhKPWb=WRRIs&(IVD4N<67r-X|GM+Y@@wGm*_$(O zDh5<(tpF6vme4X@6J-BzX*B`A?9*Zd^o4|X(xZ7s8_RWCXU7M0Bpe0FS#OZh+k7%Y z(<-JKoI$u`OMz{MlcC4oDk_{vjp(ntnI%?#goxqygqVA7NYLKp;ensH4XinI$88={BA%z^fSEPZ5W;23qw5acch?ncLro|yk+=NlKeXMgR zkq#WS4!Vn@y8%?4M_jWi0-H0M0-NC`pP$QjQQwc_q^3@{A^@VQif$+Ej5Q=!bo5hw znFJsQiY`g@#_$^_Q%9@RWXSt3c3Qrl5i`b}QPkLMoeKDdfN~yGa;TjOkVTg3cj3uu z566<%ZQFY!n=FD)wH|#=GxQ(cziXaq%-$=~bYG`nMx9M2Hd8l2nAJAB$U6nX-E z%uOR(_sz6-yj~wTU)*8y!>YrkmoLIrv!MEOLUk#09B{D@)gp!0D@0!?kCAkC`!3?6 zyA;W=KFYLL96iX7%>|~BhbV8%`qc_&4}6{9tDElas<;n9L8Wf$LBxTlFGN1oH!t|J z8Fy)`Z;K^Pv3}4fbgjDSUrw2SxVA{yyY~E`Kt3wdT^)fmQL98b?tp@4njP!=CN##z z^SBw~)2=7$9>G=ZYT3fL7~38@q4iT(ucB&#*-v9m}}~>-L~D z9c=?0pjYV8BD=`T^DZm8Hb{sXRsk5;i9bW6zBg z9Th;J3}R1Bi$1Gh?bJv!7XJ>R_&Ti*S=NqwsgTNdK2p9<<2Q4(0=G(;B0J?Z{j(Rk$U;DZ1VOm2+uv6RB4XGXsG zfT0IaU+FWg&tZirL0&y@Vq=MI_yth&rW#Qx&V>!%kx$DSpYr=r2;(0cXJErCE{*W( zqQ6o~Kr9_=3}ct5i9CYy3?_vE@NLupFYD97J8Ke2bs>OiRyUdLn6AWrPChcAt&5_V zOga8RfiNXtZv7;gIJ})y;`AKhb>UQrt?=)eGoJTW>k1}0ZG9$Vlsokzwi+zqdh5=` z^>90F0_Y9%LMC<9v{Kyq8yiPZx}*>edQs3jX9dB4zz(2WV25^e!MAQ6E&D~6n!(Kk zjDks8p7=>*8DKkQU!P*{sgSeha*dRO!P7ZU9Q%iv_i9geQim$5GOR(f*>(-(AO`cU z4T&>Jc?YMQV8=y*Gb`fa5?$I}txFB38)U z_ycH6>Qa6k|8xMP;cLg^v)P$V1Mf&YJ3|3Pk0LY4-)j)$Y$39-ws>fJV|)U9cQ2{l z3kaCt=P$UOvM}o{{nUqHDe2~655PTZM|jD9wq6dTqz2(Z5tB4LfN{&So6FU+Ic_#2R7Heubk&KkJ;? zqeMK5zn%@TRnrwvhkZNbq-}Kc<*b>Zg3ZM8T?Q3jtinh1(8?e9RfboLrOPRY0wqrq z;E(xqLKgnRc6w*kIdk8x5~~v8Brf)=P0LyKN<@9sN@WZ_WrAfYX`Li{dxKwDa-1OA zvmT+ilEW*viww-~$YPu~qQlg#78uF6ML4N2z1tSB_%dYn(2F0H5JR1zxBDZj5J3V? z-DX_|b99HS2cS;S?%8irTecGLd>-@pcBmKY`8T1JWD|PGb*sN`oQrOmeb-!~LUuS| zSPMYnfv?uSMfPX3Q#{ylIn4AuE@69_=57{(gBjHvsV!M;$8fDvV7!S^X-eSGq=b$` zkikR_*87Xsx)>$*yWw^8SHHkK+dx&5feLPyN`wdPDpwnho6}#(m0lI&Qn!Jb=GqJp9pRrA!iG#pYDU02*;07|xld=(3A9YkS*iEMF!`N$A zjPn;YC`3`XNlhLnqIglBt*isC0?6>IDG=L4CelNxix4kvqZquScZ+w}#%$-_U)H>4 zk9so3@16H>iN6@cl{=)YcjK~v~Jon45}YU^nE1~&(LH|SOov<-s!uS@brTfZ9jTxG?n0MIHpQQB)fQc34G~9|`HKMK9*Cf(YDl$lLWL6R2&Vy{$QtOGz;|yfD z%l*B4mkVM5zfBuA9uTCQvpaLUlC|W zn>4|a2a%rWFkAaMO29e~?N=EPw+dU6)tPo6Ke-Fc!dQtN&CRJVST4a6%Jwgcz9!n9 zl5k|QZui#qtWUb}Ewij~X?O9OQr}&)UA?QBSmkQRYho=Hji$nI`ifeZfd$cPhY5Kp z4q&-7730G^y$$_n{&GV@VX1?@8GLk=?aL}RBZkkosWiX9cq>4Hl@a{IE%{(}`uRao zbb&3~ZRW!1Q+nOI3Gn+vAV776M>R1d(p`<<>1+3Hcmr9;^D`Y*JmJfRqr`$W6eyUQ zvL|FxwmsDS8{8fy;5}m#HKPU+|2uoh<0H}w$EK19v&07urqx+Tq1Ms==CEj|mvl4} zC2**Vu|4QmT7P|29wQBEB{(UeZ-uBsFHI%c(t4}m?xby{$SWN7zKX8sQ|YKu4n3pHt}atE?dT1tY@P3&-DqWxW|C7vs$Os^y!{!%o;)^M zX-it7QEmp4-6$Ga{o;g>Mc*ArX}XXMK0|IC;X)apvLu z?sW(NzLP-Eht*|=3Yq~WTzbVM_)1)Ot=WJ8wT>${1Y<4Jc4ow*gz9ci?hjBG?@oKN z*_LRT`~`If<-~VWb08Xi2xWhFMD~}(e6tDaKD|K_K00-oVmI02Xl^1+?UXI z56sKWXj+-tUu8*zvWr9GFu*Xaj^HGUT^P(7n0GncdX;R%)^5>E>sN!ARns!KD=%s! zybJ4{dVQHj8oi7qAr0|Ugk&u*(m@6@-}eFLtn~03f|CsKc35}Kk33y2 z@f?#+n9995Rtd{F3P|^dtE*gj=ys!}c11X6EyU7psOev>^BJ@14Z z%I1b^n~D$4wrv1Be)7Z*Z=S+*3}jw#+XGnSAB4XM>rLH#8B5@B*UR81Bb7}tJJ-y2 zNqfC%T+tCZ8eEX?S{>$Yj`x;(DZ($LCanp!ZpnB>+=L@ne|v?Tto{u4??R}rNg@(( zZ105G@Uce^NL#wG7GNt7kGjDAoSeffjXz1(w!yBa~eUgj8YV_uxw$Tbk2g)o4e7@v?w08yDlIJuwVIS1w2FK5*7H(mhiqU zvWP4|;!Uqgj)lA{i~8xHJ6hgL9og+7;>iw=rSC^a$Nnqfp$(g%e1ug_cvFXvKpprm z`A%-Qd~c4OX2OnrPj~yWFDWqxA1$USL1s;{STq5%3hKg9RaBJ%Cb$}@3BeUvr^!Pr zr*x*jrAHSIsMKIncHg>aSfG>gt~D`#?+Gq6uCN}H*fuXZ?KqjZ*$u`|-uP^cO32VP zBs4&q2dpG>m{XjC!?bqFto_&<-Eyc^1ZV-|&=s;BEgtsCpcTQ@lPGJ22@Y+Z-V&*h zXi;=&lwvuLF{Xxzt6hH^4oV8{c6O{p`o1s#{G|~4sOzu5!;br)hBDh)tz$3Hh?dG^ zB&jtdt=UqLxQ=LyPld_#wxW%b&}~uXnF(LJWEmG94B=MWYn!wXrn{RZli`qr_^Es| zT0({G41cEp^5?8Y095YGSD~mMrLNfWWi#X5Zz zCLeyZT{-qBD-$=9eNZ?!YOXUO`?~{b#|Y(@Po0NrpE-5RN{RjSSECm(@&r2y{5dUV zUa2d$tLtApAp;CdvJouH;k&Wmxjy~&S&ggoHs_g6>Xx;foqNGkAH(11$%3|qR`oUN zQ#RbZ|D`we8a32_=MKtRm46EuIA#E;Vv&?Yi5MZkLUCnbs>o)I2dBi1=SQkT;~v*f z1e^{_-m4yW^%9WZNVGpaaALCE@nQ`Uj6IyZ6DFC4k_2>_jyy^@^n5!5?%zCw=UkrY z@e4LS&WwxF0v=|pqYj{boYrCLNHi*rhs9To&1dV!y~^?p$LjmXdv++dO3IhD_cRen z_lDzWz133#E%h2(%)KrlmVs5S>P+H0w_c!6XWSn|vCOASyO1#&3{rgIYK3X_0=qNF zK2^E$GsQ19Sn`~~U?25~&6#m&_Hkj_4lG%4OUk5IrA3O3PS&zk`v^dC1<+ zj@+nXVXL!o>-H6aaf_~`DIS4N zh{5mLSx4$N6Tv+AvSUT@pZq5u)!WwMnp{Yo&8Uj1;6NuDqH07pX|?(d6DkBKrJ?!? z?BL`LU;Iwlx0S#%l+uscFGvKu`ebw~MjqtSA6rIbBV)?~94<}~_-aV8Rk-&<&cjVh zHTVXGRJ@}@_7ci<#Gmn^XnDpB22g8P;T$f9e+m%fvE`?1vP-ac;6w^Q&Oh=M7bHjF z57%Te&UB<3e#S*+#6(lTeVzhK;xUaPPtxG4@UwY5J+$Qi{8nAMuXtzO561f`o*LNEDEqt1>dTq^1_y~Jaj+=I28JKgcw zi`|S^uzyKHt$kjz>A1#|JbIz`tkebnXE#ls58SF>E@;`^?r@1%CuiYHdb?%GsvWGZ z${Y^pFRJ#`P2P{)*{NebqqtaaK%tT;2rWpuN~gsz0&28g!B_~!owCG-ClOX%(QklC z&db}!8*e;!=C8yXctcOlF|2+RmMjS|PK{5x!&L;&F{cC8`SAt~RCQy+W#GbhRCDXc zs-dy+=R+5@5d7av`S{Vy1)I}18Dgv4=Mp)M82miBl=?yI$~3q*isfv%R;jI44p{f@ zir;fKE`9+%!qt43u9uQQXlpvgaOI!$r=pz1P^iti7QgrZ+H+~jq`OOT=tnPbE?wk} z&+6(w_xj8#g5j6S4l>~7je$#Z+fmTdyzC|mx15Vx00OT;hG1@HY7j}f@9=J4X4+DH zFTQPL(ygYsBG0*A_VqY`Yd{*bZCIu>yCZ*^?3q~mM)EaEVigfvcvyU_ z<5en|?SfzNlA{ChJ5W(bJCW`{cO0uzY@BtrC_k~80@p2e zht`UPdswD7-ZdxQQfWo+<++@2WB^Fv9jClx$P;;g!21Tz7bk+Gg+40)Q%E6kg+I9? zCA_M&6+|948xd_dNO=SFXPGh?C^|}f?-BDL2n}d(QpT*by^@IAf2K`z9|_JaHe$-y zn06!OTB@H1yrV=t;yZ3ePxqy{ORcr|&{w+;)S-M{ptQ87TJUX>iRE#Z6$79R(%_hb znQ)3h9rCFu!CsP$;yI1r2t36xn~>S-o2Yjm&2Sknnl=yizSeJ1E)^pmW4Zq<_a$!~ z!MKV2QOt)lGWKfGMHZ-BWQa+qyL{&Ncb(3&;2%O{fyqn*@7e5R;xjy#qZ%nBUO;<7 zCcO;XnoO)AV(?PG7U=mnzgpuYs2gt+Sg^c~!6P_{dBX&Rhu#Hc-_SctcjaL(Qm#8d zfpK!M`R}OM`LruSx|u%aAA-x^TZR zcqepwVYeT{irsSzEu{)V>)iwFR9&*xR8_r86=85GFd*Iu++XD{o$uD1(G}U=h*bu> z=j_XBRZCgpq9IyucZZqF@cAvJr`%Hb+ua@!ke(eiRH{ZrjF9m%ukk;btrEQq=pj`WYzL zT;r@om_VyHV-n3aREJ=KK5ZI%ewCcwstukM$WR-6vT578bMDVc=6TQL7!oVbIk>`d z&Fr|iY1L7(w%vB%Ce_m@7UF+68;nL#zX~fM&RJ|ql^s@d5nZqr*Fvia`6{lQgSZsN zT-B7)H$K~n-QgzHDSNJSWp$8Wd{Rh;x{%}mvRwd)J6iZ6OF0_c4VH~YdW}|Z2v(_q z=^Qt&%0r&8YhD_dAcgC^|gkELk|~7G?uoAuN*2E-+etD zAUeaFaBB~Rdp1l_|K?r~kh)r9U%0$44ivq=!XtRtWB3_V8UlP-h)^YVZ1O{?Sl@}g zhq_O-H{MAJ(ls4kUfq%g(yP~vk7I0=`kM}^KZ7ir-RdQr{yLg?b{>fXzvbcF%%SGG z=fSdV8Qihqe;T`_qcWa)6-qjX+u&M&ZFOkTZY?-$o6fM%B5B*Ze*^9Rqam9x89}>6 zhB473O!ve|G_timjh$iDt2H2-X$AdzPJO$l%ddjM2_^5PE);GfPWWst#}j_lEWJ_g zfaWayRH~}n$OZU}UnNg7eOg&{STBRL9^aJhx@nExPIunTsSDQDQn|g2=-PX@&(zdF zRDdj-yOBFxJ=V6PGd?%@RFg1A=xwy=k>PmR)k>@hL|iF;M&qO(a*&ph#mUlvJ;K4P zh0)&T@g`A^1*NOX70&fk^5#<`!%)wQ)Pc?nfElenIzpuL-L#pQY z)Bz=op!KeHcE+ahv>KrJ?AyefZ?GS^(Yup2@vDv_5=N?iO%A9VH>fzbH?=^+D%O+1 zYst=%>Me~3SjJBjIIwJ%b8TJ3=Cj^{p;_E-4NF)%OGerJsVw@WV=d>r4vUDxc1j+# zEWHynltHJYm!$mdIw=O}L*mXBv3WXYRV)PeU#j_;Gq$sB$7KcL0k{#gn^~5dzt33A z0zY4NOSt^(C*4sLJ$c6mzT`%VisQw2w{nS_gFOpu28s7FC~}z`>D+JvNV(L&a)0)$y3CibBj2X@c+83Zf?4Ugdw?^5i@JD0&-mqj`eI3m(I0ckZJ8t{>%F z0x9q>;XKeDQoC-;S=*LB#p=7;K>aXu`u2U|B7ju4N<@H-e{MCf+#rCaM?0CRC#5X^ zp;B=eM~-x2+6}UT8IcsU@jmCp0?RrgZ^I~Gz^^bN2CUT{3)VW8ISAkFs&k%h)3bGD zGG-oD^JQ^CJZgk9W=#w*?cVQW$Od_WkPs!aYyo2@qB6?)lN=*1pO4@zf`)_?JBNOS zY)yE0*27V0I5o%$|H%~aRw9cE9VP7Cv@ZCLim^T zoSsr++0);Zv!|!0X^egS?i!zrm2K}fh~>I=y;Gwjk)fY6ZoEndeh5o#Km=$F#8BPl zH^S-x<+p!LyfI^=Ph8P0m)#E2GwXb?(qx?()A^1G0pnlp^zJcg_KuON{seBa;HUhw zyUsxwb`?V}F>R=Ls3eR&ls)%mOd+!U2?DzUt`X%-Rg>3P|4te#y@DxT!xgy*Bb^h> z5zN*ez}5!Tm+0*nj8{`N@U$gRjjCuHJN9Hc)CX8gd_SP4ig^x9(zM&}soyxF-AJxo z{<47f{em7;lx|i`Kg)=DM#n(45+vV0arEZJOmM5}Cq$dZX;{GB@U4N-RV?%b5hStx z$pl^Koci^OFi2ispq7kszMI%zhCs}yxp<_zWHbhy!s1-<%k=?#V;d-DK_c$D1S*6h zvRy||V@r)N(hdjB;m*6zC@be1?pdmJ_Un!->WZUE-P>`{$YsrD{vM@6LbvLyX_x6! zkeK_8S8Qk^5TI$uPx%yXd#?7`;(%@CfhC`=;))6)%^zbzA!OCa;h3;!Y*cW%=D-Yk zCj9P#M%rpnu<2(Ok!8n`=UuH0k~?@#xM^PYSA=#(7xK4?s%J}(abqd3=c313*lXy4>I~9y*(Zj!%Y5|E!z3D}?mY z{TGHQ?aktjuoFU3FQ4|fi?4o8gMe{x)u&3}4w3D*Qsyt{5!DYSO>ckf%UWn8da3 zf3%NcQrL)gBR$vq!+1`@F-gRMb$hLhw&b+D%tPv;JnDdt;Zb)O+>ui|=Ap!|pd|s;D3g zT>*P6o4i?+o)O^SqRAKY*XFw0sOkQ9vp#|7U_g4d0S6fH?fzKV!8y8yQ#usy~WIDaw?GbMDpMZJUNeC7OF=# zdM>2Z4t1y-h+)OWE#Fx}q+bm!JdY4zKm;ska7_TWJ7<;XUIfpdG7M}Z`oFLZ_fuIo z*_f|g9h{$bbvsf%Hu1)`zO*+!51$lU=1#MMnwXhb8O8M{By{Wrhp{KXw8@-j^m7bi zwll4Cd`i&XKpPU7X0o+Fyl=W@`KPHM)aiutXu5Jh4BfG-q73>r1R%EVCyIlk3s)Q9 zL&PO9wByyFqQhXw7Fz|hKjan>55Y6VTEk~F(fwSjrPn5lrvnZSlwO}U6Oa}xXpay$Q@m;9hPn#R1fQ)n{`wmV z)+uW*$!a~Cie>8)FUggj92rqI-nfvV1z|!7`-y5hAR>~6U)e1Z5-V;e<+X0(S8wBn zddZwut1YT~)4FwAznyd#9MCLZo~`CY?1_BYQ+#1tcPRTQqbI`X_M zewGOZlVw|fTg0OU7k{1cAlvKOka0MKH@~dPG)yuJ)(AlCMcacrr zdH6QDKmSigBr_SY@Gi@OlCq?!V30e3o)X(0U1-2*w_D{4D)ykOavUwGX=_NdHXEUK z>K!AIfwA+)?AAa7ulhbHnBnhRE1FE`bz~fYr4G zPaanBfQvw5MI&&cNUDlu4Uz?F1_DF!n z3ZAY6Ijoa%tk}MG7*~hdL3U^^lTaUn%LbJ{LD}p!7CMhe{mg{++5O!E-VY#pkKi@?z0{fA+^B#5 zzfCTN9JCG+uHQoG6e-*(JC5=X3e86TyFD_Xkl6p@g-3WL{KEmoD1J61Myw;D;D_6C z0It7SXGzyD4X^+8F;)a#(X#lox+a)bMJJK7Or|qnQX;N4{pBkCzmX1qm}Oom($JnI z^1ix$<uA^`=%6*E+MSl^lBgG1L*Vov>4+>w4-YzwF)r^A)<*m)B44*qF{ zL`DhtTOso=9l(G6@GAZT?XdUf$$^8~b)OFp6tD6B45mHP5_{)R$7wfOA@(%d?CJ_! zY#gD<8+Rxugq;!ZpDj`UZ$z~}k2CwpQ%42O?FFZ#O*=r>miHkVLW|+YFUof$+B`pN z!);>S>M#8rI{W@kKK?r&1uB$#1$~Jh_)u=#KZjOhDWoGyg==!}_(&=K&SIoCOzrPP z{{IFi{;#zyf`IvFjl_KZ_cdYO`)%6|{LJkNGiKV6otP5j5Q{O3ndnEqi$h8IDRl_VG9Ok#hKl@zJJmh2ExuFOXe zEenHm_aojMQTbrd$^R|R_`lE28a0$@$GZ~>U0B1|Whj_`2Gfx$Z5_429gSq`5*9rWq;nD_YWTMUqdPSAMk)$fAD}dfAD~7iS$~8RDa5z{{tSd?hhVt?GGNX z^Sxs22k}3j7WLob0nPv50XzTT0sm)W{?{7+pNaYJftvqs>=?1r!Qe;xz|g?|j`m66 zHBN`&a;!f3+8wWzvKF3bD+g-Owy>oyd_w*#^kgyg#!+m44+50-e-sa*G znID{X6`B2{f^r3F)J}@8mCQ~~Q-f~VbU1)9w{+}^>A}~CW z(ElbzXdp_-UsDZZpT|Lrm4f;uPQKdg+g?adFtl*WiAm1S5No9nM=NJuhkk@=*UWfRqI$h+pYXg|t%EDpEZn`o6GT}_O92Zm;s5TX`*M2$Z%yYvqr99qqtu`YC9fj7Y5%g8 zvV6GL)3|;pX=H5iAu|hUJMvb74iNpJ_Jw6o5fS_qpUhgQeLgK_VlsPb$r>fq z#hA%M#rP2-bn89Dz#kuQm_Y{TK>E6^3=FSXh6xT1rzqDWX|q`tS~mVp1r+^ivnw*u(#qWuD_;JNTT`F&7nbXs~#FPb`ZD8`lkBil|{t50E8OLDg zdSFZI6S&*I(6ts`lE61VD)b32BPIB$$Z>guiOETAN9?>043%N-cqqrle(MHbLKgm$ zOn2D>X2gR!U9Y;#Ih%z}yk%w})x>)f{jY8C&k&+Ks1%AZS@rrTOzvCR|MpGPHV7}Z zJ+K*2hRpP2hlofjP(Tb5Gb@Ox+pDv%+x; z#X`7=f{0x5JCHI?bzj+5jxxV%Sw#MllS8b2x6#o0O>D+I)8>4GwTVKHs@_ZWB zq@JNu0TaJ~Rag+M)KFEaa!_k=hJ{ydt<&C0mm=mv-bG7fbZk(}^+o+A+KpV7_m7GC zn!a@jizH!mVuiaG6npXBZ@t6%gSX<}5{2IY6$y_DD?Y^MeZXUbSqb-3-fdh_hsPwx z3%#PtEwXf49P2&TW9w?`LFMlZEPnd-eVEiJt^F?<*ht3i^_of%!Sdoo%cqqL zbqcc>S-CdV1-ZIo~4gJ5Oi7rai_Mpbl{Row>G+Eq~Pb<)?_i zDqW-CszE|q09+^DLDe4T2R)sAlJ4GYhT-H3f{(L-f&R0Rd~M`_x!vZt?C469O@b+e zNk)+4iH8#&-U*LiF}D(ww@LprQ3`k+?p})vbWB3NuoTyjxu9ZBTdG8OwJ&F#$h$Vnof)2v2&(@SkJJcFPmd7K+Nd+1A+cgfcLsTeMHGZ)+#NvWddC- zC+<4Sbjj&Xd%#+o7U?F>;d?~_BRY$PsvNAH>JTIv3tZ!R3ic-}XD zWjrgi&Q^t!vuf8|r%}T$nvG!4x~)@Jfe%|FrlbVKm=x z!TI*~nN7w8*S%lcmUQ1S_C7Qj-8+76c*4T7aR@L+e{tNZ%h`L{x8Ki0b6`90Hf2M9 zXp%2n8=YRVuH-MkL8VOvtv)w9Cd|OXRUmnPKa=^d+*_$M3@-hbMYxLU!`ZWx?j#M5 zOG|q24_&e@?Bxv%wE~4+{S{v#TrBYMzSk*j42`pzAjK{)mURWz^^)gB4egP#E5iw4 zj|OIb8p!Z8ZS56Nmnq?8SqG)@MfxLR{f;C9%sgb@u1Q{!rR;p1hZ$N~_0O^_H3EIr zgqe+m+MQrZ(0?VbQHpJ%Mx!#K{4Q|}DM<;=O{w}&Z4rf7IVGg3-y5B&%^J9f$;rz~ zkx$E(+)^l~kJ`BCqmsk~28qRSZ`~U-SxLWf>!UF|7(W-qfH9E}NHz1B2iHLa+OSbC!1G%APOyG7(;4ED!3|=9)K|XlhGN)Fl))5s;?NBWO zJ0givAtH$h1iWVkyL?Wz_^UQ1C7s`-9(hRqSpu1J3NO(Oa_YFUVfEk2we-QS4UgvS02)^xnWL zz;f;pQ-W|ahC1upE&9`+`jwUn+n7&h#adV=_Uug*FI&sX^jd=1Qu{{H8w_^~q*Cou zW~{y9%e1o>j~w+y6kqJ?{O!;!`s*WvdiSX^KVQ_F&gWylU2@@Bm>&D2bE45eA9E7v zKWpw@wnZMe%5;?TEw(U)B9{C z*qpT*aU!|PP4)eL9;yAKXS>A<(>2Mc-Avgk7;Zt*LGq;KM6%*U^U0gD&jl^rp<$-H zKW&d>f8Jex)MV}^?)aFkFVm%+AEU^%Q!t%Rwd=Jz#N34Dx!Rg_fv+Eg8sO*_Tr+zq zEBWBZXRsMQW>W>g)_FMK9p6@X+-mDIHQ^f5EaE*uxF!)S;Q3O)^7irsHs^Vr!^BHc zDG@_hd8co>^;%gckWJ_rgI`E!Ew%f8dC@u(r%IRlD$#y0rCssM4DlHP+g(yz25$PM*CrF8G)^aY`e?+Pl{w8MXjNc-h zTSMyS)e-3GV_aWDD@*sQZEeQaxY1XM+~R78VsuY5@OdXv>-o-sK{SANYK;(+R(Uzw zcNqUirf$i}yUJC5kdLv0dgvGZWQ))^v$rP`vv@Nu&H(vbKgZ5QXO zSlv%6S!_SfOE?;tUur)Ram7Lv612r>CYmGV8zH|QBrnK-f!s%aPV*9}kB`Wyk#@AS zXg|R&1XeZj5-Y1Wn|s2FL8xd;SQA8~+0EJVK>64v@0x;9eoNp?Jr|Lm87auhBm&Z3 z<&oNDRCHp)I}T|{8*g=_M}J`CavwYPlzoWebPGFH@-zMZ=n(x*hjIHR_-50#;3mgO zkC&~4s)Et@ojqlSIB~j6AKvaPVU7Dl+iYazZ^BY6sN~@6AfOp!6s&5jS;jZ{HdEf? ztnyK!HV=Qui>mo!w&Z8lrZpseAR#jP-*A9TH7qEBukR|Vz3>gu?M7rufD|5}%Xsz* z(~+y2e8cJr&|Hc@-^S1pB`|==#@9i_+cn;-VkqwPuv-_3(JL}}Cn`S9DM zO4J@(J70Nl<%jfL1)D2V!J&-H zj`eN_Yz6T4{+kfmu6qefRF>D{uTws}h;Zvi#GTXTv(Sk2nzgte zd#I&+=TZ%Gd6Xs8>AH%JcJ z(x&<`xUz#Jb+c(i7VQk2&&0~9YfGpptYdjvB(dALD!=) z)T5aSN`a#0A(`Mct*7t)AA{G`r-;IiHBym1C znq())G7moeq|TR@vAZ!d8+IDU-nXt08Mb^2*D79xu-4koYLB0BeUH|2QyV|9T!;DN z%#7;O-JG+cAJ-QB%$cW*qnyL*BLcXxLS?(Xgmr!(@r znK@_Xe3#$Fd$+qAcCTK0Rn?OE|B4|mG-&50ZMYsXRc_g{-VuKBXoHu({?TtwX| zh8=De+I#XOw3S1-G2_?L0$fpz5ZNPh3^~$O0)hy?N+T1elaGl$3#}Y>O(4i{God|m z;H*fZ5wwkvT*{&qj|)JQfzh&wlu7DB6mi>n0)uu>k(}-m=cDII`^++nLgxSpCzoi- zYf}1U)By<MXKOL$p12S`!kk7eYcig7&7bbp*y!imG6ZcK=eF*q`_-!ojCo}L2fC7Z2 z3#T3XIUrHO;JUxYR2QzsCSbFP(@z7pmnW}xI^`TYXiaH%J`joJ1-Q1RLg8^mx~Ach zmB*M$jJuAL0hzUsedN4o;}J}7-aqYm4Gr|~Phmu&geXDaL0<9dd_JGP2YkqXVIN)j+oB3p&s^n}ZcdJVe51ErM%F zr0+zW8XUd*=Mu}08#?m#l+t`LPJ;aJF5}za(?YqZg%1G1Q>moa!(h792^{~<=lAS znCp6u$(BTy-tnVlL+WbU<=(*OB>XDd#NpIpW$|Q2$vXMweYsArD5@Aj*?lQeh$Ts& zO3K!mynjWzAha`L1_za~Tfg9Wf_&GWi*mrUzKO3N*HUR4YfJp}7~5yt4iq>G%Y<^! zW^(?ro7HK}-!&ciHk++`#(i32N_m3(hGg{uh7YA*os8H6L`<_(z}(d)q2my$3^u_# z4U*1G5+T{TOU70beRr+sT348bIdsISjf}?z)t4b58}W*SCC89E>a3OF28D_9rNE>r zi=xqAAN319>Zj~+D0ItcpT4cJR`^=Af@p?y68p!2{gmQ6v`@{16(TdFFR(I=5L88& zkZJwg5mp*sWT?C}w2m~U=1IjVpC5l#o)kFUsUcd1pD@0BJ^ex^`VHeX^*uRoA3|(u zeT8Nng_I|WDCiyxkabGuHLt7BXGNpcUcXJOPmRYLFlK4{^icb=D5JRmoRMQibt3Kz z&a(csry31GCH=h?8o1JlX*Ttx!rXm!B57%pVG6d2mR||?5WYyRbPa9d<2peItFo^v zg$D`V-Qz&v{I4qFP9~u%Dk|aq7Qh-R@O%*nmO1!00g)rKYZ6);;WNYhqIuedpqY{` zLX?JOd*VCan}Y?XWvU0l2x(z)^%)c|edxoLu+Ciy+e0%pbtZ9-KY~anOOA=nz(r$` zAk*&Qh8UH~04+F0U^1;rA6$2608@-|&aEI#syn&5`1fYKGRd9;j}RA@3O%r90^}a< zIPulUj!KW@>&<%PjK12;+g!$|mzUij)9lO#JxAOPpd1Gjl;nTK+HIg4XwJ;_;7WLC z`=}H(ogXRcW?nJ?RjYa!>Ds9plYJ6PRHv4MkOTdf*`if%-H?+?=l$49Kt=1SQD0DK z%kcPwH{?@6IRU^aK}sHEu)Nr)sOF5!JwAXs#Nq`BDAk zj|D}v{AQ?mxzhY~!Wb2n&+Y~Vw~(4nVlBlO+Q>gDFq4y&g&Ax=8LB;Srnt+TjMZ~o zS()8wJKDOhWe=dR$jWh}E=ojLpvajv9fs|D1H6Stp7;yra;BJ_z$>|V_d3>O=Bw{= zz3wd(4V5w1nrbNOr(y#*d3z;B4D$+IwEXAX3jV1GzT~>@-E_mN4&(B(9^>`YF*}lc z&Z#Fgztx3y0SFF>4#)MJy&+CSViyeCY-6qi)<$K@qqf0YxT$-CZ0Hjr{F1$k{F3O} z(P!T-vQO&98u*XJGVL>+VTja4yM)8FUV-Qm{jK@oQ62!JUdhfqwRgXs_>454VlhEYequBYK?0{O0o5g= zcjPC?8jxj70I1vpy{I^w>QmqO*uJ&e9eBC8SJIa!ai|q8!za6A@170sX?b(X@$)9P z>lTHx0rRBFQA!apq7MPxVSQ(B(``bLMi<-A%va{NZ!8qJD%|T#TC!T}bUv1r0%o2Z zW?UOFHT7Ik1`St_y>$GvEly%M(0eoZR*U@do(8s zCKk2(=&$Om4@#)+JPhJ3*FZbTJeIEL{l8Iksis8mhgp33TvQQM(x!$iQeoYMF0KuH zs#q`x8Iy~%8r{uv38J_b+#Ra69fy@4-SU?^feU9JyLraNA7T*bw)8Jk>_H?V#x}3V z$a)ggDr2m#{ywuK#=vBJN{~_j_F)t+DfARe$B+i50K69etw_2qoO$J3?z~#*+I5v1 zQpQD4@ymb0(3Pfr* z{5C@sI?wS|V*--8!=Yf1eaQ57YX`7F=;yhWmYLRygfeI1Ax_fPxmA z>ng;=1+wz-6HsM+KN!dh7rOKbMW+8M1d?wVIKov#EbB`C<&-k-)a-|!X5@$OP-A%= z$Ha!}NyUFeq7;M|Tr9JbmD*{ud-0WoLKlCxxdq3zE}4kFs&_R4b#tZGZO-)hLx*y; zbGYAo2Y*QcavD{x_jsJms)Ft*q#FvY41_{w@-076D+VYES_^-CDA@>9A6(+8R_~-e zY5AGC$0=#&+XLEi_{C3~CrU?ln+*{HpV1jUUrT0VqE=R{!9hYRC3xF=FduriW;l>o zvx=JeBUnEe5K&WWcTSF3Nc*s;VtK+uhO7{%{=~klBsIoqNVlY7D6*o0165j9&=s@4__%hb@_>-4kO4`;xWr|&8 zR};;UkZl=U8fu;&CWJN7&&mA|XZeWe{E{Rr>waGTBkMK*_lp5wDYH1;R@3w*NofOU# zt|=l10g*k>{-xmxK0~tko8zc2`&{}01~Ohv_p+rLZpzws9r`D zqof|dd{t=!3h>m{`~B$owddN{bGOdh47phlH#qjpSsRn9CWYocn9OyHoINc*z>okf z%QTz+`!OcXxFBWZnKfV!cN`Kgo9+18W=9d3PLkV}x>k3TR}HWFw!~!~H_06Sc(>RZ zja#u}h^oGtxA(>p^Sjq8lhoH+1}M0U1N0F+`KqXwNofl7isCvU8jbMc0_UXbG&hu! z^LqmeYBQfaA^*FVe!=e5#hGlYoUQeReOkm>&_I`A5ECO%6LcbWRzQ{g5%{s@5I2_nj*z zc~4O9_rXIjq#uwWNVy@lg_;Jo$i;poA|W)M=6J`9PjZH7K~%jvdo;R`pYVloW_`Vl zUt7_J5)D)J5|w2u_>RYx19rr&{6Hu*2}mx6DmHezWWy?ukvD+ik!wJ4uyOQPRyO8J z;SK*B%fOkj*eBygB%|{fDmvQ`%!r4SpUM?dHS^kYeFsSHwInhV_WqQ^U(A(oXzm;1 zer#C+eR7U)casr(Kb5zeBTyG%B(d+hVT^TN5HHB+^8jr(j#);}oDO-QsXcW=q^B7? zX$clr;FDErw9wMy_!^y>ZX-Loe;+3be7vD4N~dINMT3TgXF$SavLPOcpe3oC&mrg6}+XJ@Pf;Hy-3+QhcJD=zOBaEGoS5sfMODxwk_APY*!aK%YGPJBU z$5>vK3LF@q5=8$2i7fKF4mwba=M~--klWXkC+%B+Bkh<*Jp+Urrob()LB3|k;q$Jy zQ#8fn8mymVRK6Pf;A=$VhI&S1CB2=^bBl~Op4X&5P0f1a`p-*V0zddyhVd$5B%*%< z*_}tRdqU+R(#e{;9I1q^8D84S_mD&)9Fvo5X{K!@Gj=m-J052Vv!0I+{^G2*@NiD0 zA==+CJi4K^=73sy4bv!>f4t}C4BJpdBo_RkVm&F5uWC+x`UZg-Us%|q31hqp$4TOa zqN;Qm?s^V?@LcWu#8vbBgmG}epSUJ%+G+A~1i$n%kh3hWfwL^_c){))*1L>}{N=79 zFfs39=KDy~d)tz8u$tgxmD>M@z(pM}+!Q*yFjMZ6cjg+$RXE`$L{30cFS7m4at>ZlK_` z8X1Uzht?44U76Mz2ucmd3SKsk_fF5dQ;h61509y$s7?vx*{rTzD@wZQYOxt9!&14z!76#j^5;z8gzP5!WdOVnZgo+N_w zRt6v6i;yNA1&X&2uuQZ2rA7OI=n>UO9nI0AD^t90Zea5@h=B3~Dxrwp z^%vK!G`a-)*GD5>zFD#R!SvTU0O$TgN`qMzNO9T9)fAErq$R*NDwOtB!RILyWwiV! zs>(L=YiFJ6tzj}#j@~pFdk6AbfevFotkeQ#rTXjgM?&q<19f!1OZJoahye@zR{vM$rf1>NX%*ZZhYR24$t{DvGjZOD$|L8H3t}Ughuex0%D_e zWGxH9KZ)gb;yj@G0Y$51k)ac?hHB%13_>{xa@;j%JTT9YPS)@C6^HIK*`Mw6mRmi* zgBnKepr!+taRbgGsUK-Aw4~B(w=VdcFbtef*`ichNBA98)gJGs=lTa9(dq)iX+5g8 zUJr6UI0-$DaxU6c-SYWK@ntSNH|GJO(TEX{qnBOO|!9{Kq# z8&5-VC-`okOI&G6Ztt9>kh3*AL{ca`ovmcL-i(9m0WOvXAsz^w zhq&75-H}d8$$UFrs5T&(g!qTE=8gNf^1%h%ov5Ha(6MZ8*|MkOBkk-q`{Fyc%#>t zA&Y&^%-bYv#z_l_PPEY&8~^*4uu82ef2}W%Dknl3XkLLrnW7Y9j*nVG3%WZ#SV2~_ zZFkdB5X0L8SaXN+8w+sKyeTR&EGnd-s@ozuN{{77+8yZ7oAn*rQdnG6$V{a49ZdsF zOF!^vdpufJ!E*=|EC~V5KQ$FU*?v#Wk~>k?M%R^S)?th>*J0k{q;uWI&s*UQRINbD zC5)fCrfQ$E2m8&2o_n%sDT2Xw2y}l3-(8$P8i5srd3`G(ty)qOrMOjA{f1zr2ZVVI zt9VDzLUbh`%9#|QDr6bY3lmP;%VJUelV!A9QuC)Dg9D_-Fd@jW_DM!KehG2XX7wukHM7OuyELo$<}R~+9SaDH z6e9OFo=hja-X|^>k{%tE-{V9lG_xVq zd|W5Ysq|pC0Fcd`FyiQbyoABf&SPmzyY5WJ(ktD=173}T!^!vfkB`S}=V|wuEp_L= zaJ;v;iLNJwMW3tfGlnMGI(}4p=h>S*@aQAiQ+1kb9m((!+%AqUcLNL{uL^`j6(<>$ zCC)0Z5NSY=(yN%VXO$7O)S;L*&bi3Sq{iR!Jzw3BU%%lpZxaiGi{wQhPeYF}63+6S zQ7vWqS`wtk45KIot*crWC+606ho#C6&)ODx3Wp=Qd;>WGib2QREf9j_z4wUniooeGMxh=kWocv^R2TbhlZ`Plqp34p+dZ3)D6?+ z-(+q76dP?4$trY6EYP$0Q?AyltVv0jAUEgv`OoEj`7)ygx@U*tnnF|X+aAHSw_ z*I|1Fl6vkXl2iFHGF}w^#8b`2p6C%unqPXTFa>5aMk<6pWmQwlvQ? zilFr6z?LvSpa5$oQk3T}huH_<+3ziu_@=$J*@t-ZUt#l5d&?-_n zQiRdws4+>Na3C-}$_Nf2J$^6}UhJ(elRq0&3%k&^gnF^HGlkuzf5W0U6iumfVO2eJkV$?5qfBo+Rva}2 zMQn$B_$YD8t?+KsIQ-O4$s$;T5?NV3bEq+1^tkkw-R*pW{AodanDsFZwMXK@)w4lO z&dlN(JaeZ!a(wvVRpr|0q@fhMv)8`EDa6K$1r#Bn_fq)o{;2K%tz6c!35& ztpP!c#IiSOv0rsYlz|y#II_r6TtEU9wRh9{@e{JqHhlrTKIwIC1sMg42HV4oIaU)C zB_y+?(PNWN{FD>Ugwh9-hx{B-_d~YH%9gtsw`s;Ul=vM>6v+?9@viC@YhKww;wNo$ z0HCPiAj`yIjpb4^8YY_6ZCx`jL=`E1NAdfe;5VrIk96B7nnPY=&FS9V$0AoNd9Lz< zzFzIIPM_Ox4=}lPd)q3+;?8Cs<>jv^JJsiTsy;hTb9*3Z=yrO66db0<$zwiWci^*> zd@F>=6!4Oaq2RBh*eLa25jM+AewI(i270UB?R?1UCD*imKLp3NsX~pU!JMe+uhX8N9J!fFIiUQ&+LZ!uiX+dhkBZaxWHA!gJL0 zPRD7OHMVnt!}9EAxq;n`V1fG6*J;9 z2h?`@S1;;l8}#hCZQg)4^l=v-aFcyhJbrDdlE^~7&4&RlR943Ym?OSoskF5isc&!A z^$|`@Bb4F0bdfljh8tbEy_@VLaLxhm_G!nj<%$@ZB5O_0rI3bxzT4B$d1Z*>xz^zF zDf(rlUf3;93qyRS4BpOW)B27o-bw9N_4#Kkdl{*t&MnBj(jKtzD6~Fz%RJ2=quWwb zDVP+DgSlJs7D`MzA*1ROL*AH~P=5-|{{GG5(YX*PxICImBdePG3K%vtM``-D38dYG z7-Dnhbs-1H92MWXX<_5fYF{w-iDYYpUp5|W%;TrKX9*40GB(mHo5H$0j}!5yxTg% zs{Z9popCtZ#<~!?X4FCt$P>e>n%`kJnu$xX&p}vpu*PEnd#)+G$;6W=uj;@}CQih@ zMPcrMFzvf6lPOYM)wVZ7m!${!4`PZ6S%S=1i>w?>pRJ2DYz{E7$o2bI84k%VB`=1v z^YE%!6ts)3GXp|?GEOTiA`{U9{l1K0l!&2Nep$k?R3xL^*l|_phAd?^w89YGlJke4 z91RjON8#iTMyc3B>o6b{_o(94FFc%%7|O|5P?X`SiIAI&V&M9GlJ0&~~N~}wU zof>6;ybJVg9VpM|semV}8}^W#-CrP*JHHg~LzJJnjKkSxd5!QJT@Zk(r1eDwLo?2q!Yrr&cfW7^@#n z6|2Ayd|2_t>N|dqlEae#4IUQ`TkAyh%cybCzG+(d6khD)y1 z4Amtr1j~evrHD(1_}pB0u~~2$)rT>ar$S=$&XSL{yYld1v<5(GG6V`YroRL!?6+T2We3K%0d z{SrDVbTB#;npXLy^qo-%gw>h*(EwJ6@KHRV@>8?B$CsV`q|dx9ti_j?%ztES@@@jY ze+0fIu4?TGMwBLgVXkpfW^$)-tOW>o6tSHrW(R@VIz0fz%ygVyMWhG&Avpq^$5Mgy z&kaD3sEbon<~1pl`cw3Udnd?8rOT%DkOpkL;!)3H-=QGV z=s>0t(}HEpA6OrST(L0IGJ3~V18d7zJIp~ODX%xCYwfmiJm)x$A?$dwiEG>R*e=5d zbTWBk$vtGpA(XV4nQMyoF?y3E*V@%&0U(ypZ~AGa7G=U8O^?AVK>lA)>PK+;OLs6#Fk@kcJ~Ot}rn?;e&NP(IH| z53|QpTYd_wMGWF9YPkV{QRmIgD#wU|VH|Pj$1TcQ9TIV*3L^}sZ@dw%*bQ5&6XT(Q zyl<7mF}<&Mhkb?MJl-tR;q|UO8)Z7jQFZ!c<%bv0T=KF2Z? z`}lH~JXM|cG^iHAivHRWnJjmRVY?W_sMzZKxi5x^vE5m^JybqVoS>+@JDAkfLf?)a ztU5I96VWsz%suR*Fy8nfu)vf|`%BwGB?PQ9&aD@YU?g6sx5w6LP4Dp- zTmNL=ZI~K$2dwO&eS?VI4{eZkb&DN}3%Q3R{LsjYT-L@FR7$vv&K_dp_oF9keEsee8lJQ;fYk^Qfk7(v{!mgINI#YWj@+u3Fl~=-n3U587t%H7M4w>O!ORBes{G%{pG?U8F_g5nz z&dPKbJ{13J$xa~yY~$?@GYzrqgd2G~^)^p1z#&TEl=GH$GV#NuroM?oDg7*?{xEnU zP(J$I3)k{Edk$(Dd3t+XXhw&C;ZdJJu9M0~7{eosRhkY2ec=48(phJZm%fF3be+-c zUTaB(fuhLO0j^KgMGMj8HFaxEe_3o9H$mR;N_ik^Gj@WrrE&I}l7`)p$h#UsBsibM z+9Te5_!@I*8U3Fn9dic~XJ$85$RCqApwKjK)?7o_7J8D3SRP$JH5*)H`FizyhaON$ zL9Y4=yI--M=R_)E&#*B;;)UEXB@U<}(c4E0R;`54SyQ#5(xZ{%%`07yaYmi!q5_#_ zNjP=N(hM_SS9(O#A4^+n?wYvP^wfEEAc#!E3*O6q`aVPj zGW1B>k4URzAn56D0QqsZWmvhamd{iPIio(C_v6CR3EL zdm^+>7Bwf-;ZC^UeRRvjT+G7ASIdP(K^!r`N&W(n9HFAU)a@T$ww?$^v|h91l9^>KARZq+`^ zNh$TqE7EQK-`_xiq>GT*yYgu|NSYkHV(4rNb6~3*E$*&RgKuK?*w6PbwC{dxDY5I* zLeg6fGjWhUSQjBb8z&0^4l=M7-RS|FpBHRf1aowiW%HI?XqoLMW9^@%Nb4kCDO;8x1jecin~)MlH*(kGlqLwm9;8FFj; zNxliW{YUD@R^$n$3X2y(LC~+WWOi3=lLiG$~(hVf3X@UH%Hsjatl)Q-JS_NUh6=GU8xpxxE0@PP4_89DRVk z^a4OK7~%F4M-ggsweBD(^UhFPuMa#yp>Jbk(`q%a_ zN3E%ovfT=ujq-0EW1-fwB@5q*xdy}6%RkcJR>GE>dqA=5&WCyl=;!GknAj`E63}V( zv|gL4nzIHJvF^~OY-6lvh5yJS#pNaygUG<&`2mZda~UEvcU_~jGLhogo@2bLclWK` zi{q+8T$;#v1j=HdP9qat>kHN={1fX86*t2W-cND>k6`Tsu55+%mo}Y~)0-4QidkBL zsE1Dd6-6)MMe$p+7AE8&7W$A1OW@PkvT76ChYxgj^3Hb{smiUCx7 zco2f`JwuK*NzEiR#k<|tdWN7sw(wta_L?IES3C`owWfK!iVG@$mWdECAYkUovsHR` zRjA%pX4;;;q5oKJxao9suYOcJubKX+w=lNx`RkIMm)e2Vyu&qzVJQ)L0bJ~^-lmvy zl?gHxxe0&nGTH6xh5yb~)cC55{r0oLiH(s6Hj4ccO^GSt2LG0ZQwX z4qlcB1bf}yereFz%BXkB-6#e)%?`^y=(RUJW8C>|60s@FroP+vCPAxt%UOgBKC>Ca z9IR-~Ulf=7pM%NlUm?yGkQ-VD+_i9w9af$C(CG zgeJIZh%c878yjZ4>||nRJRYIW7)cnU=tVM^bb1^ky-uc1Ok5GGY?^f)Q-YXr4;zOC zJBsM8<`N;yL?WQebad#wd5LI2T(J3kE0?;OUlQ-+!)RjGo*ptxXxZC^NZ$n~-n!9g0`QjE z43NZp_Mz4!JVLASy9YCm?7m0txEb|5jT&elo81Y`%Ce}%Wo^!Ctb;MxjXdc)9jiD`mtM))h(of7O9;~b&gCu)Q-X?ApIu}fE3unhjP{b*c(rI?~R4yzSz~voL z56Y6cCbQ5V?ZoGau+sBge6Wh1P(EV}_+-_C5n?NZBTHjB+W@-&^^mP|#c%@-E+#4i zt&%4tO!;H$gEirV&WAH&^1(K6dVDAs({b=^9I>#A!hEt6|PN&WHBU++a2-by8Uu;)3@WfDc3=qC6 zxhm?LG~pb0q{@}O9=6#u95IsJczpTUO=oEdjvfQ=XLlAeKF|S$@5@PCQi=*F9apWu zx@YI1ubtt9Hl8w60mBOiSPyRvB%Vt@HU;*^e+062ma2zScJ5VVXKutBjuCOJvUa+Q z;oK{F-Rq2oL--`|+O9witDZ{Zqx`g5aBg%r-&zz8cCX5VkFNGAh`Bk9vZ6Q?TQ>1R z+qH~w>rLmb)SvodAX!EQxD?jtKK~a&+4p@gL17y%6U?Cv$TTsGLi#A&K`pAO-xT(H z?sm1q0eoZiicdw7yY47vsY(Eb`8|B!OJk6{ZlaPX+I`b;Xk&z{r$Ql~coJZiBkd5; zWI_q8$iGIlqFMfC@rr^TBSWq6ov)kXujUT=c-_RDt;Hj;fe$JeVK^M-ab{Fo=C0o)#zZ!FugtNJSQ2+6KtCyy>!kGd6Zl@b<5rqbv| z+vg$Z2>LQ^5yZ^|F-~w9QRU9Xv$&A*EsL0`C7mT6bnrAA_r^6TWtx+Ky zyCWKtjM&tjBJvr4Eagp=^|4w0%uxd$ivElPs~W}6zodkM=OW?MCZY<2;6rUSV5v%n zM8XPvBVFPFn{9!$^L=8j8NA1^p2Em_o`E93|(w zLWiqD_18n>?<&sEk}j?+U`UBscqt!;{F#~TQd)5VSW3KI%&`^#4TK74vJ$8|`>g4uParF& zn^tlI-WsfttN3$$_D3fFZ`5N7_$jGpn~z6;pAsr7x%t8GegHE7u`m(irdxYw;I&GM z*r>R*Tm2`Ro0pJY)N`i}%EKEZE3 ziA-r?;J)(Aisu6Z8WXE<=Pf4POls{!vcEjIKAC7{K4pggV!{5;H|hA>O`89_$tCVQ zMRqgh@^&*$F#4Z5hChjWUlV?}zdi!of+s<0UoZfW{sa5Pl#3q?DEB;vWvaXMD*sz~ zDT1F^?7xqrzd(ZRE`J+P#((y~Y_zLumov}%R?hcv@vKMunj_vEZW|B%Zpppnx&!ts ze=HCF$nXSP&)>Qe%>VmqY(fSTH^BYJKPU7i1N)0aCQYgT={}XHuQ5Y^W+(jHwF%?j z+6!jc{qL9j#)1BCcm3M{`Jb2n99_?^Z{EClebt5U^ZjRyo1+%QZxKo>udigAud97Q zhT@9YN7(9}v1b(1Q&%FyO?f5N7TJEedai;1837zbl*vj1^EgZ8!72sHwmJ1~)^PMH zltCn2tgg>YVXM0jUZsaGOt-^aOe0(?D#?$HV~(S{X_xG~jk_6_t=yRkn_0MPeD6qP zDIr`9;HMAoMRr-Ka=DfH|D#*^?Xr;Yye1G*^!8y$Wx)s||BGB(6r10( zgw77u|Kiz5WVe6QTQ3B1SfjwX^LO?`Ida;7a321@7_r1}B4s}&m`EATAZ=mD42~5=DR3lB;9vZQtNoX2n}OeWO)LR0 zUX!smxTKg2{H;o{-Uh7yqf!3LL;drD4htMY0Qx_FV`aWp#83a`otiO59YOuEjrXr% zBC4JA_k6?v4$Rl=$0V<}1+|M=Is1&Pl49kjn<{=@SK7>mCY zEXTGDmNz08&+xxv|Lsq?kiU$TeqhPy_z*}fa#U&JXzhfhygG#hZ!ULLnG|>9sM1S9 z!kuY~e&~BOw)?|8`a&8)^~HyG&ag{@-skG;yd)MHCLc;1;dMAo!E_<#` z+_nEop8xE%48=uSD3^oxJEnC1odKuT`i%wZ~G8n;R8dQ}h2sF?Vk4fWt0QK#p%-JIw3mibg3(b?Y!&sVkb+ zvRqKT3TKf@5Lx(@1k!D#r~MKAyAVzJVYifOl@ZKt1e4wL9StJZr_-(_587{^=J#01 z-cO!x7&}wE2ZAY8y>0afl~8cijnGW_aQx4*Fv@25$TC3G^R0H5@1mb%^4K@TqMVMN zCKYq6MnvK`y}sNH>>?9r(2JfNxR~x{k`Xy>QtoSVJ7VaYopTX z2<-sVne1)v_B_)9036%B?Z~1c5npVO6zsoc-alqbIQ&pXS>7#FmfQ1EP`El;%1Jvc zgRsw!)EV#Abk|X4E`IgWS%02*XtGy(oH%0Em$F>Xw){*}%18R;r+X3D3AS=l4`PFB zc>*MwNY(H|D%h1Ja|q(`|mr9bdX*^9@W(9}q1aXN*!SqtS>DYV#O z=vf|QC%qUFsjpESX!77oC^W;~_ya)KcF@29(?f<@RxULvdpgu3wP35;Tq-4mzMbQ2 zS*vUtT>>EH14ZqRaGEdz!MgLZ_4zvqXI;+SSVfXtD-lU^!d(jtnn(MPNZx8z4@?x2 zNJl~PTwMWi#6!z?SI1EM+l_2X1*ER=93`_bsvL&<#po$>%vQ#5@=66mvb!4g>WC0>CDeyccnQ9adCc=gA2!j1@O(QsW{#*LR$RN z{gm79WU%ZR_AHoz&rh2pZ5miKXj!liVHCk0QedR91840UJ&V&h(}1`7W4@?z%nui} zS(4Yi0O@xCfS?YK`Q`<_f}<-tc9r09sopGm_AS&a=!2!pdR$PrbN;atQ-a5A^V^*= ze>oqrnVgYN|8ksedzYsb=dQ;$J90%3!%$uAic)Gc*&a||_*TV*w)I<(8e(Wt&4^^M zRL4%dfV*Iv&uQISZsLlN_l=HRQ7VIO_nR*O;Sc(`X=r-(4;C~dRDzrCI%{*bm>{W! z0WDt7TADEOdDu;@zII)L*vtjE74v^`}8e^OdRn9q0nR8K{wl9 zNwx!l&0h%6>7@?2WuD$UakGAZ8lD7~c9AU?@VoetAiIKV&!ZaZ4cNG#@a8?Y${`!; zlP1*=)r*N~`z4L&6|^2Z%d26>uAOIho(G0( znKG*D4NG%V^$J)l&>L5VCIt?BLZ9+HP-H)99{8wKP;iwUi}zhvzZ5FC>@7T=ne>rB zPZ~q>z!XzCnE5e&$>67ysyfhCeqZ-ojQ&vl)Bgq_KaJ*1sv=l*)C{ZYcE`zr&l9O9 zApkpZre&FRW>nU1|AU0!~oq zy9F0vZG%8Jx#Lyz5E=Ix82Sb&>+cPX5&0x5FO?4lD4UsdQC2u~6(7dq?(4Th+TrK! zE5D-bFm)LMyL1oP>i!%2rauh%*{29RFf5TkOZj&oEd^ItEi%VVIriptzqccG6+6-Yw9Rd|@XbAc$_E=I8uk?$y?S z>^YI6w-gH#aon{KIiqVo14NxFBkq*5?AF8yEM;}z8#DKz|BR8w83dnM+!a7yd0iQP z_(ddCnRfjA0cWBv^tBQ)u(IMlRbTkjypC^mm9;Xi*R_4Vo(~M zTXdklD!T*R%-qvPg-OrH>iVb;U(5m{j^UiqoVmg$N}MtJU?rP1S*+(j2>dGcmG*EY ze0Z|8c-i|@{4D>FwP+QQ@qF6e?s@FqUES=lSaIxTNdV7wFwk4XdN**{vNdnKQ-DV+ z!`AaKHxZKPuRD4}JLlGE;gK4_C{0B^Qs`H$a49&HSCQ_~mrW@1)%}5uWzM5Ce6O}* z&&dPV)#>~=R=42`4{U|`CdOAYza!yIC≻hoxXUCsCdVwdW}Ip2x$v4BERr>&*TP zDd8V`wOlqTog*d$B^Fj1QGr^oF%5P`94N80GDxN>ZKwue$38yp=M+th=*OhEez)Vt zUKh}<3#ozZ3+{<*i+T3AP&cQ(nE9qFdJm6j$A{%Nlk_&S&C}}DW|1r^)dI6CTu2J5 z%VX%XE8ikYN*b4E^)?=YM3gdY5?PM=d*v#oD^`6)k4hpUS!UbX1I8-L56^6~X@8AI zq;q7=(z|{4Fn1QXY#kGhSX;R1n*b3_4%6pmWVFFOZ>={fj1Sf_>|`3B)U^Q@PFaj^ zv=+*rnf6bO=N!v7&=KFD7<6^2W-E zsA1W5+zTd);TxS!0GZk^kJ}mcy)6nhcA4%G9sw;bj^jVqenN;_`g)$THtiHHvv_$l z7bCf~-N7BQ9t)(igxg%DW(V!tezH2I&tx0yt7W-$f|1ya8cPRWG;Ho7s*Wf}pv6mb z6T1x>=TK{SyDOK(jup~Z8d!vn9qlPwZZTPXJKZgvsiV2HSE4Gl&s{R!G0s^gKkkmD z&%1;_ag(mx8ZcfEIedv2X+prdm1b&i&RWdY$mD)e3uY~KeGO#4)xvKsG}gvEDrs%( zTfG~ef9zq$YSOaP-u*EV&#G@AkH~6WWE1a6+u1kStAGEtnG&XTRDnu8;6LuQQIdR{R|n!;Z=V zoCyxP?=Rkqm}r!#M`|)*$8XRE>lOMBcLNn2NB1JOFSbZn>treqC0O|Da9M4Q&H&G! za)>34N75!rWds^BzO5Uvp!Cq)cmPzfy>wuD`69vnh!)T@`yp83fuCO zGr|IVtu>Jt%$p?Fzp{-B1Q~P#Nh-!qp=(Qc4s?1KqV7&25WnQeGYP^yR4Mei=<;y& za}w%>P4}1GR{mdIM5`Nu5n}B;&O9|ISC+Q~gip?;;VE3dc7zoVsl1?8WCTx#w0`UK zJWguugDuSTfBTja-u}tjWmTjsIlulzQvfsf$CESEup9y=hM`4m*F|3p4LZ&%gJ5#x z3Y<@=68NwI6OLLT?rSLmyIGIT^aNim|C620*-V-h6j)Yq5%ciKDBoSq zA?K69LcZzno&IbMeM%(K!sI^cSH{qT?Yj}aLr%#PrUl8y1Uca;UKG@JFO$S%U3OrM zxUGyA3Jswq1(okFC~0WyEBKSKZbg;02-{CRW(rxKMI|b3CO7^CR^okJcDuzoTH1+; zrqA2QN$fx-*CGQU^2FEQw@Gk=I|V>*+VA&mdcMx-;cP=jH!Dc4Mx#1PkHs!R9XkGD zHR~Z0wQO|7QGLy2&Sz6V2;%v-)1lADp#h?4b``e69wQ?)SaL{^UODNp^du1-SDRg2 zK2TyaLM3C65hP7ARyzcJSZ~ZYn|BbSXV$0g zYG&u$cbtBF_-{R5^DM|?Xwm@AT`!^FtN4Qg@c~Xy$Nu1V@?mrGY8%9-IEW?nfxUB4 zxm6HOJd&GzkUd|ew&xr^G>t2mS+YTggv=S7kd_Vr02%`W4=8Ua$Ow zUZR9{!i{)=ZpDnd_5NvI-jdc{3z-&O3H3ALQi&>sS#tJZp*O_&MN&lf{u{2i9pnp_ z>wRYMe7gL1%o;_}Ow{`8?xL0(oFiIDV^$QVCnAkDC{G-Fq0vG(l%*UP&1E)G&);^2 z$U63!@>bdQo8AWLYzb{%qbIlQI@6D4dY4b--1@$*g=eG3UVFY>L??VY>P7inLwMKDA0m}b=_y~V6jAQKyjAni zx;cxbn#A$M1EA#@aP8U|wBd!!&NudSHq}N{xeCK*!AI_&|>gST4 z_Mt>gdt@S<&Se}5Lm!vT=`c5 zn9YbG)>4pM=-(nnpcc%1GL^$0-UGg#nIfHN{`kFzv4FuU zXmnlNf|8vWA?2=!I)t@ug6lI7WusTWskMJ$*;J8>$UziQ1S z`koCuH2HVp&y^?dxisghdhwP=uh9|uaT+xhd1wp4K2OgmIb=F(i2QNFy0&`}h#bjR zaAtG`h+Mo7Zc2iswu8RdETk*QwAAr-pl*IAc>0MS|0!U8k0ShE4d{c2EFP6b8}r^j z<2Tgx3vtgaY7q_Hm!CvSH*2#7LT|N`Q|^D$enSJaReGb)#@Z^fXos`iButgs|7r*A zsUU!a>qKXKVl>ufSr&HVCnnIf!9MgK=;cw$(0x>AY8jap?A2SK$#(yQ{Dh&KwRfmf z?hStNBeh1Ez!t+&qAyJ25UqmGR-?pCFr%DWRav14Xkw_6N1+mGBPzX++i z>;PoK|9xkIR16w&OBf8A!ZzIo`&fc{dqE9MTLAUcp{Tp5C-XCgmxN<1YKd) zFcdC#FhAvc%H4SxGKWB8d#{}m!L?mA*n!8?XAO6(Nk>^K{Sl?+cglrcV1yHMybz^7 z;l2?^MJN&q{iw{)pyFZ#{(QktcgfT2{#GxMyS1humBDU?^pn;PXg=n=JF7zZtL_yR zCf6i=a;#sg64n&^TdrCLb(2*3Xf|H%SWujO8R3hUtGN-CLnF}jaadZ)Ap;JyluuoDEZsw^@!x<@l~`;{H9nrxYUCkAHVg` zup5bf_{A_U{@?I+01q&I`(L+b7~bJT;aVSP6i zrMn-J%(6)bCw6uT8H`#zVjtAai*>o)MHZu7kA*r8u}8H@{>@>9e|3i=J?qNdD7awp zmj3S!tQP$Bzimb2yHWbr-$U~LBWzM|Pm5=gL-ON^%u6b$tL9C7l1xRf?<7-%DFO{# z7rKZudsbC3jhCk15ufido*7JGtmHHx zmGm-JEZd)c&{$c{`~sr_i)c7hgJC^WP;{_Nt}OyP?D-fFNknJTy9uuuaZ0)RVl~+gmk&(EdUECCF6h$V;7IE327^Ycv8GU4|1tz&|OkSak(BYaa$K`P|7ua1Z` z)`IdZu!CcBa1E`wSz}0rT7-7=o?O-bWVC~Ya znsx#ZR2rd7D4)ZPq7f<7)11Tp*ECf_i|{H$+uRG5tR#xutoUn^Z(Ppi5#+V?Q=MM_ zdvixzB?j>E*l24H2QwcCizfE-CkQm?bHN7jPU2$b4h)^MPOpa;_BuF7$=Ac%`rfFG zjsJYw0t)*i*S;g(+}mEgO|JG>k{3gK^L-YOAf7?KtxvXZya6bC>l?5KFh&d+cd3dL$ zplA9oB^&*~(SDmLaPb=J%^SAa>l5&fTsu^;S=5XNZd%rADbCnC=5a&lvxBsJ4^3x& zEw?k5{`FcxbuIJ2S~IhIR!&r28_Z{4IHU$P zV(FA1^QXg`DKYUN`?<-29oiRUnsxi1S0WvmfYmjA{cnQE_!XP4p`!f~iHl3i>HQdg zxw{p<+F(CkV_yC}SxotK^Lw5omXVgVBS1sv>ell<=E4%&l_J>nIar~gqE3lydic9_ zRNgR+<0!`$%BlMMgO|nl$2^)rm+wTeNnR^|{v`8$!RxV6L=H%0h*fPaaTp=HMo|w{ zpZqpLVuE2EeI)V;ndaiOIG8CgQnNoYsMYWV8UTB-k7Jmz0-?ws68!=z(I z+2(N|;-anjucuGyQnU>-QnZ~ zZ_dl{u3rvPvLC0=S-4=W-oynpT1;)Q*r&Bx7O!>t_*KI@BKu?ZZH|{Qr>Y#hjxQ*H z|JplVI7vrlroz1C__JFFQ&3gpqmXn|LbEhR-eSY-0TfT>TSy3yo{L0bkLq+ySafGV zL&>E2!IidJg2tBQiM!9(|LW7@K@rmQo6UH2n9QWi(QPrzo>sebbqe@Z8NcMVCbX|$ z+8|rMTBn|29p&|`V`JE zB&YTFspwcBe?T&j)<6%5!}ZPwr?o#og!np+K61smibt(A8>}m6PA8a6!hcJQ<3PE~ zTJ4rWt^W-gVM%8^)%OAppbooL(4yC;P)`%oN?K!}poW%GP*W4wVb#iX$yqgNDCO@cDhEor!arD(OZJT=@Uej;py5wMb5vo~A0Pw$l9$WF2BCdzZfqFSjjnOTU_T2< z>5ZV0xDVu~xX;LEoEg5cuNpTS^9X^BWqq8y>kr^C0?bnMVx*+SCVJDR4)jBfewu`^Pa_I)sA_`Qy`XMIg3NlfD&|umv=)3qLldskOywtKC*}7Y=z%hmIQ& zR%~{!?ts*wALBq*fMB^7Li)V)*eD#faq*v=qjOx^=M6il+4dCy8;0NhoRHryRX!Y@S2oM~Q`^dC z#QHt!D3ZGVHBq8X+mQ2rI-JAdj}=5p??+Ei&RgiiQ@1Lrq+Mr&ju@Wn$NR>l_#T`r zpKRAM)lWU4R772sU&tbwd5?%p#+p3rs$SI&8^H9%>1tPm)aH|fT@N`;WoXTd%N&4WC^&_B2bmzWkoS@8`TXZGe zX6|C@;Lh~Y>v)`J_m$4Z#p5yM)lz?=W--Iu-(uoL>WNn!jE}V6m-nIMy<<_%*U|5e zcx&h`!fgxkxB<#{-Drw!%$`Lqkv zwKAZ#{wLauGk+?|-ghcYnyV&!UW!R372pH8mNDXQX{?$xU8?+$}r`)Cy`py$FQ&cuG1jz zrzzf|6_9EpwrUmBPSK2h*=x0V__>C0y7N8>8hqx%ZR~HS{J(mLf-r{5D>0eh$!xBb zj7p;+Bf%5f{F>s!X*0-=7G)DEIML9+)~a}oVSJMs`c4ZK>J_Fqd+04QWG0?Y8@I1_fza^E%r#6&eX3-tEvw736Ef7wpjOziW|?x&)90_LirJ%o7QG` zG#_z6=QA4`>iAyV-+p_G%zB@mBAV^rypfn*cwM4$Z4qS3M{Zi=CkDATU`hh7A=q#T zP##y$?-EHy&u_R9fQ_GeLmaqp^FYI|s0_;rWqd{3;7@jAB^zm zl5TK*beO~A^~$-@Sh0qFzE*T&Csa!9Ce3DVrk(sNvA%DF_<}VFLf(0B756GY5gAl_ zDPaa8E=!-r1!2f@Ng(kav*~MhN6$u9?8F&3Fx8jGmP8L;zaJ#M+__c6epGEHId?a} zy)sM)f_qkB(ZDqMVbK$gRl@pDuKjTI)nHJ(Q!DS`mrTdn4|xv0`KA-{J#?GW2og+% zKe0M4M%2>%8Gnj_yTIvIn?(0eQ9MvR&!BN9wRTG|ZF6d7Z1gAOm?PRJ;Z_8m#wn&s ztk~|cS?%-@1#A;tJJrz-6T5Td`CD}2XO7s$cMi>Pr_P~PtkGMIYQ2gR+FmlOo9=Km*~@5NduKI z5%~RovvJthV?+)$+FwCMx??Xd*oBtex^z*O+i!M<(5K0GH802cp}|)jc7NFjM4Te= zP*=3r=*MRE(si1EMV;-B`qzwM>y%nvI(I;pwd$Z1%uWUToWuZ)0dDqK5*YS|B)Rg* z9}D?g+t%G6-zD|om{HWr1Ls`DzUM|SJ=U=i+Ih^sVRXuUF@ zJ7=amvc6XEcAVcZw0Yc=_Wo|i?CK3%i2Ci(Yks7a3t4?L^Z-yN^s=_;P}06GzPmla z4pm%ikf51t0S?dZTOJ8 znLwV*Shn^5=F8BSh6d>=lv%)`&IkJ?e$be79z(9CVYg9MS~((5L#@?*8ZuO zdD>M}_>TW^qu$EXNFC+a=+Ng=l`&L@HP5Rl=Z?R|iIrC0 zhrMX8d23ioOIqGRoX&RcWV(8wVhEQV^_|5XP0741hzdh2V1#nc-d=k zlZP(}15uD|DLOSL%^@@nIjIUTBNWc38nH8uw^V#mwTF>coIIQyXI?NKXMO>8E0&Z5 zqja>edMMfUA0Z#Ng?>NHkfOe$lF(H~1Q|w7z_1CqBk?|(K8Pn*#_vuW@Khw_$z{GV zlI)S#tc}!NM636^lrf?`U};$%1|8396P%^>W00nKV{mSw*6A5%;1%@;jK==211uAC zA4(}KtnZ~7yFTqiw@3uJ~Hln~CzgX+gt23XbE6WjU#= zY*}Ym)4;Z0_L4-?5g*bd%Av|Nt!hb~D1ESvH6OL6NMrI!E3^5@Vm?WMm#)R_y$4zK z@f;i*?Ofp{c8j0l#yb!3?DsKrr}FrkyJnj$dA^Y7`s+6Rc|kdW$(v@`mMIN%Q7Jph z_B>?j+~bpiAAz4%9<~LYbMS~Yt^&?zvwv;J=<&OTj(kU7J{fZ=asbf%$FVU>=As2? zEckx$Qpl!H?t_4FE~^Y8p$pda4yX=gm=8;Cl-J9?JEz_Nb16K1Sj=5{wR=Z{IAI4$ zeETIHL=LY2w}dv+=oK+DaS@kc(q#F<_~ps*K0t#?B(04yFh)=>%tjlxPjFGv;R?R} zf&{rpbBJ7Rcv!0yCgoyIb=R_iIfSVHVwHwB`-C+duvq8U87SF46YuxR8luNX6zl5! z7j%RVd;If8ojaYnnK6z94|Uxm0R{_0xkP?`fYHQ^!w9GKO~qv|w=y%e z)sqJU2ixBnIwaj4RX%-lr@oT|j`sOGu@`)*!{PeV`4zU0S=o3xzr=>ORoyz0$PY>? zhazS0->pR!6yZhQs)saL5?4}O_LjAA+WLP?VSruxymJBD+-OB_JvV#9z1LaQHHqpK z>mZXu(@ziPLpS52{aLHPLt|o>9w8tpx((ePvh`-o)}8kCqIu32Iq(V%1v|88C3W7z zmcg3gL1(I2qmHev3ix%-;o_|C-^Bc~DcTdtuzlXwSjqKc5BFCqa9(f8BdqLY!OttYd8vu5z z&A8h`g89lWhRvO^$pm%s>wa>RMG^QBab{XI2Fczn9UKJXEe4p_B@34{rpv)*C~vBe z@$_~TrK>yn1hB;}?QQx+{9X{?XXl7_?{?X_$_cXDiFx@)Rq79g#nI9>9%-_6C32sB zo}jXiFdByg=ZnU$;lu#mYZUtakV%vU&)swy!SK{&mO71$SGaARi@1+jOvO{jVQ}A9 znI*tK2G{6#Ju`G_-Y4d07KUb{O)Thu@fNzT;ZxG|^*1V1&`lgCg##Vk-!MjsIEjJ) zNJ%GSsupWxD#LuKIe0~iq8cysS~t2`qrTjavJHT5^L1v#LX90c*3a}>|9gaR$74?2 z#eZb}_hbG2-;Y%~4gO5H1BF(18_1$-WVwEQ;j_O~MfFr^5UKg%6|uSyAH8X`M~Zm= zH|y+%-MdFlK3gQBbyn$<vPm5D{YJmlkbi>WK+7%P*ePug&%)!_!0OQOD zJ zPGupC`inf3;rpGWpPJ%#7m{7C;ZX*VD?@|$=3j=^$ z?n=HHCutXI36A~Pt_UTo66Y!bZ*y}9HOiTnMh8)`)-qO{-`9}5ThSnZ$5RdciyfMJ+9fKdBpBE`xF>sV|B2pytX4XxCh|y$pF+{BeqO<}Nb9Qj^(ij^SPxw8QA- zC!AC>_0Hr!ec7e0G?xi&loIoy;KW3P#mp;C$f;Us^4(q{XAHV@YhtHE&;96COeExw^~jQM3ep}G%rbDJ{D?n>v*(pNvwY8I;0IPyP%|Ji`3 z^0Igqg#*%qtk#;Y#cX}`2S4|b$tkrcTz2A?f&}U;ma9gx%^;a5+plw^6>n3J(!`f` zR`QxnH0ziE z2Vt?P*ew0-`L%X>qJFY=B=T+0JGXCR zo?{?VmhEx`cTd5-DG2L%=tJe3CnZJ}q?1q^n)0&XySmzd1^egX!(B7J{JexK(cR(> z)pMK#P@;a%D#|)!_WL$3QKFtbz5z(iiWil*AQIMx5Jdh$?lvQCcRW$o66W=3ntat$ zuUF&vf}&4=-<9wV`KKN{>`YGkHb;iWEu3>!6GYKtqbaY@O@+>e(OSZhe|SDkx}yi0k4TIu$Yw3KH*+W?8vhoN(20)xC1`_#bWXtw@Wj@a$l?n9GCVC)0# z3c+hJUC14VAKRF*>WT)6ei0pHcCTdXZr0JFDwWaygcfKeQ5vov>;8&vJ>uu!^CUuW zc3&SRI>FJOPL1>bpP7`!-yWc0aL`0~X)NnFE@j8dfog>!-UtO@rclicaX4jSEZN`Lv`ZBRzeiEaKl7Ci+4SAMe%m zl*2m2*}xfOsXXE|SN7l($sNj?n^rhag1b=nHx^fp9A$$2z{4w6?GIpBK0#l*qcz7A zbJoX{A-mOX&Kfa28TA3=NovPqV$Uaz>`$PK>`!h_m?POg-37dUB-hmFI9`8(T%>S% zt}Yl&2e{iQ(T(diL|uI_i$XQkM-V!ht0h>XODzGhQ~hIMzPBqV0c`-p&Pl6oV@z@- zy}EryHe(9)`_8bkKXNYq68S@q`g*xSuls_Q7p*M@N{d>e(tHe6Ia-$L-6y8xaYV)1 zX9IdjQ1cE|VlGhUHLkPW^poPK0-Tz{7-CpYJhfoN%nY-mm#9Efm3j-Y3~;27IF0Ap zy#Gt%fnJTkZYzDZPS?|*tO5lGx9eJ`&ri|SN?vP_y#pOvNA5{9?M_TsBrdCy3M5L$ z`I3s3R>$ju=g`8e{iRQ?I@#XZcoS-hFPLm$BiqtXu~gqOo!G$u_pl@On6LS&tf%uZ zzm7vY%-MkN$Y?_Y@I>Gt`~-k^IbKf1l%{+qsup0u)uH-C)Ge+%9Zq%zF!ENq!N$WQ zRoF@3d_$Ha0=s-)o|VQQoC`kJ2~oh$8+prbYvYJDFgol1fc)p4u+OWyGl+*w3zcTS zbHTbBK9M>hSIdAk$F;kphpI!(_M7ZxD!>U*|8-rS#&jRQ*=RTkA+#i65mb_?msNL| zz~W-`4ts!|a3!4P!@C~9$y|*ihC>m0byJxuiwB@LQ`J2zAQk7wj>bANAS?hXJr^sl6gkTM}0(HuwuF>;w5&0 z+Zg3ry`VMXak1k$Yl&Sibf1Qqlp8|JI=U3B+AQeK2IBBL|9PVVzLpNN`tRx+`IQL~ z1cUrHDh>prD%H%+W%`ifnh+y;}2F3cTauI2(NLA}<3D6fq6Ioq?!-!0rN+G*sv} z6QetOuCCz9ehl^4K8r}F(?{-m_HH5;&-o$pDs(ZWoBNY?_TQLI)_(!rj3TC|Zxz^W zJlyKFZ^g*w8DvU=qLNXCzfi0gSxzY&;9NmO6%cV`hlNqt_lt}d|p&=KE)+8 z;g_LKoJX4oq1#l3{c3PGiTzrqxAt2PWn+q;=u6`G7Vm#f|IL4?Il*77x_nFn?I-Qlm z!9*bzre}eCO?TYo6hg>JIXJOU{?LZ_66&rP<70xG(5^`xlOlQZyHyon7?Tm~eMt`O zZ~=hE;2$HKDO@kS1`1-W>uAQJ<1jjlv(`qj?V?yTa zPXD|v-}wyKIK&4*R53xChy%D|tGGZ1QokZBtDXK7%puZAR|CytZO|tAC+wd8>jhwuui8s{N?r`w2>P}vG%kaY z(-~S|W51*(JvKbu7zy4RA@}qOWczMI!Ox_`I!Gm4Vv)lk2~KeMnwG$ey*PI(naj?O zw9<#V;nQTxUDB^(lW<&xFH_1Seo%O&c~ziZD)1;1qahGSRoHp{Wg1 z?cH2ZizIcw)g(%?pG@C7@mzh47@9SYUgj=tBlr!WJTvRiW1PX-D)-9y2dv_)qKl^w zz=Ny}#oZzGs~?#bo=hp{UKE)B zLs*_B%AV~iYQTYH*m4C^m+{+SHgpuKI%c9(Uz|m&MjiUDiIGLl5&HvglU>eO8N~w^ zI=Q?{=|w`hCX;=jQ_2W!0&oaz0rmeD32+BcE4%L6f3oSM)T)lA)?KVk5}MrBu?EHh z6WRGXW!L}W8ELUAgMZC#3&FT@l5XSw>i7gS7GGRfW?y86lGDI_SUW8& zUm^0v26G#E?D<7SeqNG8^1dFVv4M;T8ZjYPRKV-#%^RwtRsIei53`}xd%5AO;PiUJ z(QRR5=@G*iuRooRkpc#F z4EZJh;-MGSJ%QCf5h?9nGNw9(P^%oHsY1w$ZX1+dyt`Cykki+(Zm%+(lQpQq6X)#y z(BkBvDA!AaT9<6Sb64`>Wu3Qya$RF2$t*R|WGpAkJCJzF!j!Sc)=rNo48rrz-Qr&u zBaTKsF1Ks`w5wkO(Q8;F!3V^KIZ>B(=Au|zUD6zf5a7IiUvY@w>#+qAD$Yy#p?awGS=FR~Ot-IG1 zr^xAyHYIClUHYMJcg8WUA4~gZVv?<|K3rX zz})bxs+PYI$~{KHH)LdX?Rdw{_#P?Oy-Si|;_sU{V~J&pdozgMz$IszL%G%`!e5m zHPAd=_ZVixzUmo+bFItI2*0b>^!I2oaLC7CRC7XR=(gR|Rab~s{&40^v0pCbFe#;Y z%qK@Vc*RxVR)l^q{M*mpAQA1~g|ZX6Q`HsQh@H<3rV%>P&Uz|T0_uKc>+IHLm6WT#LRZVuq#1WkR ztsov62GtH)25tR>u52IeZ18d_*dt#Jhbt7gBcJ};-S-O#;8s@4JN8b`ySc6Yb>K>~ zPQ!pLZeUxBXhlKi8HSeg@7eu)TIGEEgP>YC?JNVVS0fU3&$_Uq698+L2N81*niVi)8asjPm#^ z#rWcwJGL;4Z7paN+NNmn>h{R>7eT)ndPx&5{a#qVl(zn5Q1I*KaC*{UC~;JtN@hl! zpd$$@VWz%K4YIrZpg;mX%@|$Hg&wd4aR}6$ZYoe!2PZc zopz2$ce%|&lag%di$t+qy2_F#F;v%0o-d@o>FPhoj?w|l{v1ANVaGG1QP(|ZeKth+ zvG?@*6brNN=W^n#oy@C#il2 z{I_5Xb#7@?2BV~Y8cqDl^bvMiC~{T9?bKhcT^pJm?fvhlP~~e0>V6y|_IA7n&__(( z`?-HcsCq^v*4GZ8AKW-a7WvG9Z`@bNDKrky&olSLX#I7trRLLxAYOzZjw*dZVJ2g)FFR+ zun%KpSbQE0_N|_5qJq^Bm~G4;9rP1PoSFv#EpEe|hI+6d-p0j9?VO!(?Ef~AR#t=u zoliHV3ShsBV~Z9v?!iyn9%|2EgNm$j_D9MmG#{iWrw-W$dA9aXSJf1M`EqDXI+@iY zh(7J|$q|dLnlmzp5q3yud9zc+QYw*DXq{|W4)L;M#VcW>-pbb=t#XhEE&3gp!^I92 zEbfk%fBI*mg}@kyt20cj>iH`2zJBaGZ~UKI?na zZUl4Xg;|+T=iX7`aOA8qEGOUJUB`2_{imR~Sz>?_X&1-tuqE|X3V=>KU@IBtdRbNl z36tBL=a{8laX>7c4KM!s%hV&X6?`4?^_kk4W4B#8$flXz$!hM|%_>O-d!}g3KcZ5V z>fMBtG+rZlW;L280^8HOSpqiR>`~bX;J&$Ct~u#|-zLVpf+lg_=*!JduF#3Hjf0+1 z>8Z7%Cka8}@x_bZk_bM7|G*8>#cmZb@Sgo1qBx2qxW+%)O3g70KifB?q=-cYZ58d? zkgpO-Ah{$6H!5ydV5)C-cBUU0k~!7+Ehc8av{%|FOQLbBpyyZ~)++)~cf+hhx4RYo zHnJ5sUayk>(|w_;yJg5N`Yqj2D{m>@KT7>A?UmNE4trxt$x@n9eVpsoU#x|?pv8$LGVwG_oG;vTn@&8+}F2M9~Xvsk@&4+j6G2pd^v$$Q>V6MuL>fsdt zkV41%&30D1rP>B{u1YJcnc;k>Cb9S`#P~@A-R!bD{U81+N0-L$dG*d~lIpGCL{Y=3 zLV(v~wU^-Bz)z3APvz!3PQXhhRw1x`JWxsij{|t^y*}<)2Q#P8{p;68@Jf`3vNX4S1deqJK%;qIAmz&tVv#v|u ztwvo+=0XmiOa&MsbARR>t#`+YvFrH4DApRcK*Ptlxb?kO+q`&{j6~nVz84C=4g=^C&^(Zr4?a~)K z={(#(W&8W8{ZC@>$TzKHW0m66$JxcsgUmpyH_k@i0|65`YeD7uqV8Io{!C+e$oB4y zO3Z_KW>va>#KNRWhqftlH2k2f86q&dE8F?$&|^n<{e}-!w+uFv{wxZ+Kf46uip(1D zS{f-f%cQBM9{T^8y(Ejr3MZxgW~}MxvQAo!C_HXi@kf?3dN^6Wm0 zAC4`oAPP7G1Za0eu84gvkUje}=KJgSMI#R5=7I)&SZI9X6kE1{Dy_>FzW=Yt+=R{# z1qQEEw<4%XO7+~TXGEAJ9?l+wi*_I~2PqGO2B<`opHnoo?ZYr_4vUgv4s+hwoXvMe z;n=vs1Los3?5yAP<0n+mpn;KQ)71wEQEgxDoYkzrSLYi&%(MH^w1+XZBT5sIFmp4k__EY6`H5Y|!C-;D z)v)H&FdO6c>?Mr-kMzK@Y^!pHMRDw_5GLx4S2ft2VZ!$8QX|nzJdxxxmU%cHXjUC( z1M4ghY+6Z+w$v_Utl|*haZA|)QWqo~fPE)8gUaNcld)N;Q^6%F8gpfOSJ79S1~~%} zmnieBkcYqOjd4x|H#!=yE@WNxcWT|__P4x))O|8-qP}Cjla*|}mE>JS!s*jp0GR-x zHsd#kG|3@PA`6E4c*xF|vGH5In&=BR4tgbuQ2#~C>-ZcR8Lj?7lLhD9_^IV6uR=Wc zNVeBb;x!MlOS}QJm~*XANoPVDy*rs7!W|nqw2=)df47^q*E0t9d(72$#&_2PrExoX zS-m#MA5}O=BP|HKgY-S*;y5R?X(5|RN%4953SB`CUau-0rkBU2ya4syikC~q%*S02 z`6ad}1+KssQj2&IE`)IgD{(h>f>@0GTZn}GG-wd32Mx0EPc3M3~2Xxzkk1f$~26W!D z(KbH9ub=tEtU0e(|MDLzi;@KG1iPs5Cw62Uh^)a|y)`m{FbaZKrbm zBvq`_TSn_H*VD8Z3cL^&YWGcG$Z4K^dt$0ox!_`vmTq;|2R@3qeCcD+K2GPnYNGAbivA=y$9Tk~#zWdL`P>p=Bh z!UIov*OTnuLkSMf*;2fqih#wfTIzhW61jWv%Nf^)s#ZI1L_*|@mK zreK(O&-nH9A7HCWsdvz|8@x>!=QO{?%GBb)lxtA_UF{h5o2G6= z-vRVb-ytQpI;_mjfrN=V)7aU4pWP*sy>RZen@Iziieq``(lan#uhSqHxkmGToV%$HJ zLguT@;pF@FG`H)lnyC&r9{(xVFHzXBJxExa!hc(Wxh9>Pt9p7xeZ#UA-oL01s6;!B zzPq|6T@%{!A-Gsd?!2g9dMpV}YgW19oL?cICkvz`3cqJ*HokAbyvtt7u~6^6|Nkhg znBO1uRh?l(ihsJhl02T&#sc082=jVifZ6+5nl(Kq(n)&#RB>g$7N^)z*O;XNZ<5V# zsFgf4F5Q=eTS-tORhWck!&C~`=iI*kczSWFJ>ezzydH;%9=|zzfPNQz62v>m4 zjWQx4)>^U``!>jiHT$K%&fhkL{zaS$1;K>8?EkR$R?%@bNw%mZOSYJqvBYFCGc#Gt zlEq|OOcpaUGn2(sVrFJ$X694==X6g`PfyS5UF)8;^q_+LA~JJFRAyx4j`gMXQkLHC zF`8F?N2LXGVU2bsO%ZmOX1kMyHbyLro!w0{5TnJ@G0gM9RJGbNLwG5T5h4E3!t?2f zd=h2q8ivFz`6y(#Mh4?_f7w@^TC+7g2XOab`T=fw_wmE@60LQ9y>lTTdzh(4&`vs< zqHMv>vq7QSC_9fi0`2@NRU#fKL&X^0a{Q-P^3B2T*Z%Qy2_MR&s?lGb1c4K~^m(bt zFVG2mySr=S!!!cc$ZHWWAjSc(9+#FZaZu7urS%qaiG%nv94&3!W2_f~0}kZd%XP%< zG!5Hk^w<{#O|2`~EGB-6kMlXn?g-8hf-Ts)_9VAEgFIt&aZMT}Zv65*G|x?U0wDGo z15?|tJT*SG^6gGHF62M1XE<4cF}~jBzZ>9jX69|p+0)3vzWHmP8pazCis_YVD}ss? zv|3)trwJ(v@{J9EyG##jWqbZ|(?tiKw04GsMrFFen8J*O?5pzDn$7YQsi`$Fae81h zP3?7&SgZ0&Ez#XRL4hj57rO<8seiXGGUwwS(7uJ49MxvBfzaa9kH|Hx~5S)*&faro5qE1m7z!0|Kn1eU+j>2Fap?TSL3C`a)C|^{OOlVXB zKf;lsGN8O92h5TWzJ7h`L-O$G|tHAMnQ zWG~&TM3E>k$E8T@WPt9UICS~QUAf|o$Iov&d=l4BJW)JE6G5d33tnN$`fNtDph z_6&Xm?8TS326C=>#jlc;@+J0bAdR;Lnb0(M9Nn$mFjNvE0`L;UeKTzKapo6#)OJU%IpkzVRy5MBrh`p~!6hK@=SBA=} zrzgBWz*hN3x*;Pi6Ro<*-JF}W%Ja?p)5|S4MWHpERefU!561@?#B8BWl4uYsoks+j z_hSL^WvA;K%U=1|V}=hZ@t34LONE)0sDr9`-ryfGK?gaTmqvsbYVLNUhg6`0MO86G;yU!pQ* z#%xv5fG>v743{XN`rp<%f$7hYq{r9r1J!Jfw){}Tnb=4K@(KUQ8(HKCVptUIi&FYK z#V&|`kTYsf>-uu;eu=cbAcy?Rj3{!3#X(3SsWhj#CQEB*bZ7QDB&gUPwK^ zl9dHIf5PYV&rAN3lJbrej6AqD3@mBoPxqhP#yEC!6m=luY$A$Tci;Xjq;A-F^3&y`(kvtkqgCr8&EUs11keJfQtj$Q#(69ueaa zH@3CZxQ6xA@rpck_Z`~-=O2qcP(p&ZEAxNdqJGQYxJB(UolPY8LuSdnCw-5;albqk z-}F(9y(swJ#^M(ia%(LqcO}*<|6sRs|^QEWz8EB_}W?VtW;qy8Fym46)n>d#lv3|Gp;~1)C2Whjvd$|m7xeyy`f!JE}1HY8*itxZS(K3UwVE&26`|mH~(m@icYf49A zKzplLw&Ug<5G{Df2~`xsv+e5j{q)1udT;f9!b$xua^_3EgS95AS8Ek<%DnxT_TSrh zx=(7}umLrwNwHS>I`h8)d?)J)cVIuPIGUWr{<$yz*)@e|ka?<-tzOm0c9y=VPoY4^ zNr0&0f`dxj1GGU&Oc(owRgVvl$Uqjbp_u#~88imc(F3u{_oCo<5=YkH?|vn8?`^-9 z`9^&=mv{4UP3pd%)v7N-NWl6pln~sXekr=5_#gYLLJyj~_~DkaI?!5)hBJZG`wNI_ zT=kQg>hGYv!+0PoVd;+$@cOfMGKkI8R@|&?6yJ-R$Iyf4LGu$%d`(JWy0Fcw4ETdZ z^r$Zke;>tv{zsq}RK7y>GHf{a6DajSFq%z*NCZS!C&}O4!yWE4WDN2G)6aVB50es8@>*wcwB`6-uzI^#<;f}q; zRuo(*uiNo4b!@>UEUk^-7Ng2aEb&Pxf*8)@kkS8tgDI32*rO z(I$tXY@HR+euGX-?HsyOK;8o5X_!yvV^3*maNgy=p$IQ>{e#C=rptK}7~Qub#&8Jg zAW-*Uz@=;c&7hMh2>xk?wD6rxy0bLRdkPO4;t$a=7QQa&e`6v4PwCzz;-JZz#YOd^ z%h}kCgqfDe^7%u&PxPDR-*U#I|CwV*Z#vT&5Mu#06Z* zahCTAiTu!S=A|?mp(fFqaq)ZX#Tc^e(^umu-WV~TXuCu&e74Scoz6zmlAi-%S1f;> zA4WU9rCsyw^Nc4qC0?i8KQn>>Iv|ohpz4Fm{DS_^7q^igV0CXR<=fFFOWp>=M;rc5;gAA|b6u^Wb< zXY}tI&hUc-%(nAYsgYs;PZT$Q$aDyUl)XYyXB++(gYpk@|L?aGFabJWKLn46 z05b4x^_dPazT;c+e}lr`oBs7lM>+&YUN*P6>lg;CZidltLh>(o(7ahiT|oaYf?GkV zs(S8FCbSWL5Omix=Eu00kA)Tju{LWG;NDS zo2dWt>wiN9qqnQ74o#h>JYJu<< zL8tW%oDDQ<)q-6Ve}DMD#`0|nsNrq1+sshaMt;r)K48P;4}>$A691Kv{exIL3Xo^xpqq&i^3k zZ(ZMz18LS2Td;3cwXw@tZeu4r_w=_n7;|{YNvol#H5QpTzxzY7>CIFbGSyWbQuyesB~jK@C+aTg1!p zV0Ecs)(pTiC_t+2WB-8uuYLMQpZV7(PADMF(ph*-Lm?T)t4xMvIs!nDV3T-&`1ga8 zr3>m(sjgF`CMO0^1r68@!6Oe+b)0z*<$uB485u~k|7Vc@UpL6@CzY;93a>6i{+C~R zo}9i@Q%cUP)*)G0a|d&EG*5To0(N$(PbxYUu>Te)j(^<3XDMEuULQ5y6SNAYZE}!X z-9FXIHz6knSh<|@x;tIq{yaa53`*^`6IRE4Xt0VHR#m0s@&co%@#^+!dxk849km}{ zRm5U4;l|F>pzg#5?FD4KEcln&+_sf#CW4T& z&^HgRuQY}_Kxb&|OwY)&kD!F0!NbPG0)pF8L2cZBD+1JoZY&5|XVx&!EloBQ(9>CC zB3M{*fj~Uzw|Y1ax96?57TYTA*TX`0ZIZ|eSd(wU7LUJm%s$ZTmi2_|2pdqBca+rW zZ6vfg>A!s3J}KCbb)e8^OV-ahGee(&X_Gy+iKLqe5P+0{1avf@Yt~&Ahl7o6+kk&V zg^@v@A@4NXJ+|4lssupbbTbeAY(x6?K$PcDn!=Jk{lz;#zC$}$%GTD|jrFBq<{3i; z`B=(ubY0`Nt+dhSKm36mJSwBw#QJOaxveJ#5T`HcTW6`k4eES#G{~uzM*qy42!q8O zVtn`CzwF@EVaj@k-W1#O3e%~{A=3dWQk0ckeaA_z>#vEI7A?>oY3gs4Gwc<4Y zqSI3!iSTi4m2sCL*81uqxEYxvfe&*M)=51aPIPI$bRu!z==f7zL6dAh({nwRvHG$M-%*&~vkiD-8J^-3jN7dC2dE6H$LHLMU0YUF5{g z2;7-Pr&rLpuqZcm-cVc@TQd-$BTq8=qCPf2el}fTt-mz7@z$?*VqowG(_#Ic1G}a$ ztcicqz_V)e`^&~^SWni3{!b``qHJ``She)t&y{VV%4Am#58V)9gamtrea#f5BX4NT zi6&V|G7eZszIhA8C6h0e>%9ddHNWH;0f3$?VffyDAj4Yk-g_E3aXBx5?~zEzGD)u- z>U<_mK$V_Py$QXLntGD+%cuKSgnNZino-XG@QodO-Z1Xb-*JrOw6lk0FM4z6vCvjz zQhOPW zE+vk2by_~7&_Ak#w`i@c#r3@8eic&kT>m;dwmG50+!G>>-Iq?&QW z%LJiWqrJA)(o;e^IZXGBm8PxY(ef)!;T$qj|I7y*nZu*b&wrU%Br0V$1w(qL zkM;(ao7c}t`S!at*@>oB_JH*@9ADSZu0SyxhO8UBzEP?^im%0 zS@c?4igw8s$8G`{53>(%O}kn<8IOcENA~zK$M>6Y^CoZ}PY5UP@(7>Sm(bhP=yO-$ zttVUd9WX)z+r~wE=nS0AXWzSpQ46>Ga9h~u^$E;nAsqp{5xwhQiO?P|@`ITJ&A1 zfMa&;{J|&T)P(I_2J^nMB|5m8m^~trg{TRcu36=3yIF6)QZhAvnj!Yct8~B*)*(~M zHnu<)ZZ#r%>4>!1RKFE9R^t1e0!a0n{GDA40iXFP99Uh4QN7eVI|3T-b$c0m!oL>9 z74+U;fXwWevEwc37M6r~?)A)Fr*8$0lL5ZQ`;M7HFZ=O7>p+!}oCHnXz|(wzW1!sT z$69yV-vCX{eA2C_;jXz}(i@-q(%LZyYg%Uwu**R4Kl?`HGOIY~g+J@Pk=5S8SF zqXNt@=0P-S7pHBgl$0%D+~POYwGS7Frglb&NX{-U28-+WhKuVlR3aq35<8>NU@4TI zVLfHOb7Y%nVwR!SSajFp&zD=F%lZkrv=&~Uzsci!e+5P)qE=)ys(5>t)BL_vfCL}< zrFaRAH9H!fV}vc+h&TsWc^;DyUNZjvxmk?8HEH&IgLz8kVA9x>gNA&&}F32nDejg|BnRWn!_icK0?zsXLKTHW=978W3S);0y~ z*qnVuD_@X8&xx{A*uy&(Wy)M%KTvBanQ@)c4vp<@m_@TN2_i@Nb|Fp50gKIba$(o7 z?ws*HO=xxADqi?#Et!~mzCIPQU z@?e=23*DcSBPuyqXEB5#E{+x}co24iMq=1!DFm)=lmboXdG$Nqt0Di;0UOjQKSa7u z9d}-ZkE+h~e4A`q&74v6;Mj4A0qF=ywyIUIbbm8_)q}W{eFFITWH;H z$6`7G)OBONoeJ~Zi-2|>#`izmeCFe&6Ycy~n!H|S{p=NAX zz@r1V=?y~erkj5t6o;UWlJlA8F(Co2yW_bVS+&9q?iE40ugO9TbkkJmv3x1tdjb@Y z()D%vwWNlWjeo12HYKvyRGXtl$iIj#wc-ID{dje7U=B)l_4dmIhrBU!9POq z;k+by^V}RV!cM%rx@xgpuXu%x0mO^mygvIA^=JvM<^^o{b=1*1OB&AOQml33X!aN8 zNWE>{Ss(cp+CI_{96Gk6THKy1oh54nWAB_+e(aAP*y()u!uLopA6VCy-QB*iBflT@ zqZ!Gz+!c$@yL=+gTshD79pkam-TLw(1^8s(cW)Lsj5)f{{!T%w-2|KaK7(q#45`G? zeSCSmczu95iH%9GvXN=dzhNt#sHgg2&j;`1#ftq7e<+~Ip8;B&59*k}yRG4_Gs^}) zeXiI24eks7diaQKgx1xkhfk!n9+a+k?eNhaC;P1X`2B5!u*NNX_{f}jurpjQ&SHG$ z%aFN!>)``7hO`+lI|PFOejjkFG>(th7NdRor+dXvtybWg4ua$K)h!`yX&-KIr^I3N z#dYbQ@Ad^8%kD)hvp*t#FGhOid&c8#&#zav`fN0D(ZXb*j)}`O8utV1vsZna=&3kt z<(dV>=y)8UKc$btZ|yrwLlr5yGHyUq;5})}faY@qZ*iuG+!rjJ6eLZS`UY#SB-x(L z(8}oI`o+)0>0OhRUJ##dtE^+KngH)#H`M3Y2NH!HNwZ+XOL1A8a#l!ZBEa+URRAtt z=xHmF<9u&vb5=3T{y%Yo+;YzZ=8+6^h-#qw-=vLpy9`A7sbddRDh+}qBF{hmz;=mS zMT)3EL+EPMIPrSpY#V0lRKq!E`Nn-0(NVN1j9j2Pnl$=7&CQwJ(eo01EUH1%?78B= z*#q+i{|k}<-OVZ7sV9xAqr%Xf9&9L<6oD#WOU{AgJKm#l>HcqVWL}XILh~{&-;qj$ z&4opwCVG|ej8-?p3&Kx1mY+RDW%&uk={LC^A`jK8yHhg0^R{rIcOUFFqqq*YQ{1-i z+LCuKoG)b_RkE8}bI)#GsLiZZ+MR?YtJeuhC{PNutoTWYm%-Iry6S1b*1KbM8vwkn z=yJGbcCm^0E%V&&Yj~f7E%_!$@M4YnNn~C~m2kA&+J*MAg$L>)1lO}74whRGc~Ni6 z^7iN2Nasv3X1U7l%b)BmtfQz*`FR<3>=BOqb$t(AgiW|S#Hbh#njBAPO5`zS`)Suv z+XPw8NJto+^$Gog#`vT%xZH8h5sS7X`pUTq>cD^AGtOE@`R;o<;sjSohG3N<%2V-? zq6J9t$3N5x^^T*lN1PSAo#7ccl^q>TD0)U1d)45&-4W!v&H8<1U-53l(&Sbyb@W|A z^^ahZr|<3YR5}n6PsQ&nErFnDe?>6$jBSh5%raTl|Jh6qo;?dj#T%OYIfA6J5pHd5 z?ZO;c;PF$I?}pBH5k^ItQwr1@mF720xSLk?y<9dSwyfs{JR?pOCl$O1*o`Urq6kvD1 zL3bH)-&U=WCZf$$wS0O1&Td@{5E%ExlIF|8uql1d;CbZ4LQLzJrqZ+P?YVU#WRM!) zWej8e8P?ZrmfrTAE%04yN!lgS=l ztm!}hb57tt!qP>F5D=Zi>zJa$+}TaJu6ui%n@H5-!?=lc%zet`1N5O{an7s7mXWO>D)oX&pmRftC(oB~@j!e@Vejs-_xjMw)WfH_Omn=P$z(dm+oZxX4mKr;n zo=Gr(WQ)KF!yI3%*Py~JqKA)9Z$|Rcs1hDFtYQCz>%`DYM2F)o8)6}^fK_wQ89U1d zo173bvcNZI6~F}wY19ZuZui~>o~DFrBuDI*Z%&21nlkvd(MlNdI2zTogUd#;?;r9Ca+Oh+ADZL|avxQA{Fz1Z{6wa`MMm`H2Vlq zn7h(^ZNj<0v2PD-6i1bDm_H=?zC@kZgMjWw+P#39(f$Pd6bWRi^_>5HY}~r}cEEi~ z-jg(MbmzLuEo3XX!=T4B115~jdxAC9>*zOq--Yq$CTLr3fY(F|r;$PmrE5*fJbz=P| z^$!5vFCSXo>QY4@t*7T0a)0;-{xlJ3<#NLosWuQQ}<9dn{$T za$V-3v|xIPBH`YRD0ab+pSa zOmcaVD9>cB7;qbAX{mau$%3Ei8C?_R@+H|%iT|-9!;U;**Gz9<-n>Ue27WQpMGIk3 z68~4Q{8&$t=hJ>nL}K$^8edB{ix45!ymYDDZ|Y~_kD*F@v*FnJGtrSFJ%q6J`jdrC zKZjT=zOQ|^0u|bFoM40n@*>c@#f2u%@LypmmPu!>VF{ba;1K{aRPN<%^de2L15Z{m zl~8cbYxeYw8_PS=-)~!-Ia#M|QaA7eu*XMxejUT-md+Vf(~tY2oVc2Bvf)?%AbjA! zte^TD7l1g?;iLCLT;tkSxO3gSzcRPC(_bMXHGctk2!@jJ-ZEtxLDFSXhc-LMLUG; zws@Y-6RYx8`->)eC^g-JDu6v2eHB+{DaWyCPn&}x`r}XXn4tS!X7)zj+T02_Oa#!e z2bdDCL>DMzdjL_V}QKaV&R;?ZT4Ugd@!T9YA*oKOFFzoK9Y> zVJs2JLCuyGZku1Jy(cc%cVm~v&Cb34__H?QPUw5XJz7EEN=U)ax9B9VtQw7wa5$B< z6^vobl}PEG&D-(OXUTmFpZDi>{_J8W=Hc6P?)Luh)k@*{F_w=~TK0pYFW2R^7lB|- zMmrJ|wilX8^LK=(Q-z4#K;DX-%wOXO01_FU_Y!b!%r)PKpZ%!0DYMxU&#vfCFxQ7m zBK0iTB+F5nXJ$(57LS1v1k=&q5^s)K%}t=(x_Sz}M*M6=oVqW_DRfxCZWX7+Sek5) z06g^5=0h(rB^Xcfk6FBxJ+~9%>X5Oh=gW=h%j-it0fT ztEcmlj!-mYuYU{`kw&CL(TG-s?hXMMR1sC z$`_6QJXCMwpShVm>8V%_ejpw)Zf5+77tZ?mnCGjqOl#9g2_u=BIucOZtg}ahh7q8h zhQ-idVe+GjYPn56F9qlcF&C5aHBs+-MZL64VFJTRk*+OS2w60vgG%E!TeX{rq4g%5 zn)-n*-9lu0b)KpQSqvQ<6m|^xnCYNfrDOd;4~$gQ1Viy+{Se^D0KP1TppaybullP> z-dS4i$0}MZVKlBG6-^(*8kGWea9_wt5u^$WvM*4Nc<59a?F^1!&?Q42Qv92KQDLsS zn)HF@e%0X)jsRzLLg$au^Y{}mcFk(coyWe5WC%%16=b%;6!o9OQiHLbj=SkH zSAZWv9f#Y!@l9|0;(BQqWhU}N+Y8w8kCu(OfiH}tRM37@UryB+?XJVYYk1!ZJv^WR%Ink*uL`moo zp*oKbJ0Q?iy8VQHBOTI|k#k*8=(d(!uKW=EaKEAYu#0@^Nk4>WPv(MpjLZM7q>(}^ zBr8)k@TXo`ZN?%=2pKZ%cqY}Z&Zs;FumW(pln6SdIbF1u11K1Q&!=pdfN1R@WKmiO zj+qFiUHPTf6t;Gx!lQNT-~h4EUV70%oY1)rCKX!mkcNeDu`9E7hu_hw?@YD_oE9H0 zXWki&JK%SURf9u9q#NBIL*gK0Q(Zg9$8pIELK*ddekd?H9H)cPP&TA3K3=!ygvc`> zCRw+`0!b#@MLt@+dKI<$76cg$%K>IV{-)(0BP>1PtC!%1{Ay}~|N!@(W%^=ydUyRcRsnyv*>WQ$?Z(BGi z9Bia?)`2mTSJ>#x{%ihpc@wHeNb=dGd9r#Dus)c(!6_+00}+5cI&GtiJDC2y%4x;{ z(5}D6nhKb5b=;?rv=I-JJGkPjRe}ucVF?&C{qpN!7rN0@Uv0ykZ3?sBa8)oLUEt)2 zH>h0|Riu`N{sbFPqiWC}GdqFMtNnn*^M0U^)aLP->d@7;GGlsC<+ z`4b*wnz*7=ts9`?2hiOYIqGo!rXe1ioNx$Cq(-CKdB+uCAy8Cx17=$0rKR`rF}f%* zCop6VuAhqL%m)z3J7H>zzC$Ukr^)-+5G#p=cX0_`oZ-g)YD7l9An#=Mc+)n9nkRCm zJeAJjlb(K(*&&WMbDs;bjOO%=P$4pU(jQsNo#nJxQtDZCqXDOn*FUfK-vP@He~v0X zj^Y&dJTvFiUT2QrrYcm?MWe&AI|-F#yU)hcRgyAk4D8(x`?=VR-OG(nDt<5ClI`sH z$-{QX-Jkpz{+>|8U{c|8zQWT9rxl!y2!bb!3z|8D8OWSsK2;qnK>XHn)9Ngp{zb{d zWZ*}hrl;YEq3cE^n<~N>M3K{IbSan22zf8A6vPEH>Btq!WANr6kiI~8F`zwywYFwM zr_Fh2(bDs?dR1R0?S<$K^xl=5G_^MAI=i84pRrEHXP4zfQYO-QrM8>#U&u^ouZAC1 zMFrmOv40Yt%(?VnKdX)oaQ}(ld5Z}OAf=VSnwwdlU)BxH-2o(yk`8a=?3k(RuF&%QoI+f+R+z&yzxV3Kk=MB4{NGyy zj%|`R<@q(iKfY`nPlwHYih8>zVW4MVA(yWxDWPeUd6&*iq}LG>-6fApRNLYyr%}|< zFd0Lat64JSyyBEYUULgm6HUGGHy#0i0A3 z%kEpt)7A0N1Y@VAQ_P|yPOh31i@$-JU=nTNu{42R(8`AtG-|4B#%~OzUC~o)iTv=f zi(JG592#->!3~Pt9&AF+ea5w^F#oSXIje-nAC+o=y=62d*EGaqVqMfE+ zL*smc2Wqo2!BTRrJLNc~NB=AwDXd zKrkjUN_ZyY!QKMD-31f$)&Sn>56S_<<67cCk&g(|P$szk$l>9-zp390=?dPDgRFF6 zG8?{Aa|QWNz7ao-c|877CCe`~-x|ta(liaIeG-24*jsgAnd8GWLH<$ub9bT2jE(J` zBy-XQY3jUew9=}s%yy~z*-QqT!41B|rBrTXcVYI*7_e$;dB|7etQ-^65F)~}*k1a1 zZmKyw;aWtPXr!@hMNEjEw$VvGwr*0p-LseX*{&@?Glx+V6S3z-LJ@3(o0LX+7GQ{p{7o&OE_AD zmZiESRe@@%$O-b}{otXHwrJ7)NjBEZ%^A%(P=yNqS-2mO`Nt-{)`4!g{gt?PM(GVo zuv9R{mFW-#4;Q>>2ES6tc~7$rrwYrS8e1QG4Ty_#N#ftPiJOv|DnBSsd`FMZmQ{d7 zsVq@YvFdxg2JB$QMgLL_$+_O8q$?MA?1gFe9K34Bba7&}TtaJWYpZokWY=uC6SdT| z=1#JzHQ$!c(4pH{pGxhi9A->q(BSDD8cm#>s>IIlw87MMbEs3ozxSAeFoN?>ewM3R zu)!&JV--!;R*@r=mKPer1VGd>g5gu>SCNS<_@SlxjC4;Ey}u}ZpITxuXGTiTb7|`- zTMOO~9E>FZEm=1n@9#+pQdP~WQx+Pi19)`eBpq7S$z|^fc-hoZSN#S}%Rh|)VoNjg z5>ArZh~?{xEmdN37_)4Bt31DB1eza>fE=Zka_6u*pNkS1gKS4p5Iu|B?B3EkQ+j8c z?ezx_Cv$Vj!ovkGq8V0;zg%U|6I1$=B09bKX?oZz*~Gz-z}5*RiYfXzj?sfy;oYtU zb~mBew7RwxeyC`eN+Fb=A2x zE7-4tm~ZZT$CGI;Sst{Pm{P;k1ok} z>a;NTDRit8ljIt76MtOStJSRc=9wfXT;~XoyBZ&gKub}%Ho_$d5Lo9fl{)si2H#yt znkAc{Yl54P{w9}?_*8zWTPTa_K-OtVRoPdvgo*n~@lG|n1LjD_v#vIsi4Cu|Nbm7@cJxzk2={z6F@C#ItWk&v?X zAQ4W}?Ns^E?;Q!H!te$>K5C}1bj`HcJts=U(!F=QXCQTVeXj3i%f9@twN~xk3YzBh zNRHFt5yKp)2t6pbl^2`O5L_nf61k{pgWOfqyi18mDT6OiQ;4G_&QuG(<-vXPvN-& zlmSDf@h30XDeh0YC}lM~V1vXvfZnc`OE`0Ucd;(sE94Zv81Gg z@%Eh{pI?EnOHLE$(XFNNu>LMJ5z~dWW>(?D51jBZ#7JOi^2w~w?n*9^#=zH)OC%IZ zW$86|Qa*40**3I}NTVdP}(`MkK%>Gz5yPgbh^m-C()5ns-xG_?%^zkpVjlbiTHbRPygPAs!XwMFMn}&iT z!1C5zZO)3%)(hK;j#6C>_X_1Yj;99z!Va{W^t5bgd9xoF(Xn8nJGy5dT z?eVPmAp2CyBJb)kdJfmdP3iJUoxRj-Q+O@pbwEpT5ii^c?WCZtAwzM#bt?Zyc0`EB z?sQzDuWx_Ec44dwCDo9)ATso^>G;M$XSW4*pTl7u5TQTKFo`cc{P=uMwqPPMWWz*{ zu?8Kh_NUk~Yis3>#aGfObre-*`m$38|3KE_+g$J_ipcGSDl6mtbU{x=;!6gBGmWSB z4g2s`13XW0A%ie#Gz)_AJ_XOdi>$GuLd;ix&H7{C1Gn>zEbos@4|^3*<|arT1G!AD z7sl!AaZ$_FxL@edldEFXjpcu>NLw~zTWKmSJ@@;2GCIduSdMF)VxJ+hS`}Au_!fTC z)8wZgt@ccVbzodVi5%ULyo{TT72-Mfe$~w_B(-;s#L;pDEj;STpmgN`DXildvSdj9 zgjuivDSLi8<~bojw8)c#!j^JkWGdS)E9?ixmZa2E7Y~;z(QHim_ z<0C?#8mz(^_MBshiNyT(PzWz1ycgj)iu7M!*S{}XitX0{0+`_d=b4aP=Y$@u5XjVS zG|-kU{uVB|YR(KIH;3UtTCTamZroj=QA=wTlyEbj5gBY@{F^qLChtn5^u7o+@^x|# z5;Js$bhG#{?t+R93e2Dn#W?ly&7DD}8naPow~XgGSg=#jMhPQ*ir$E~qH&H-AMGq& zsiCB&?t~0^Q}`;Dxl7LRbHnfCHLoR-rYcm(%0@|;e9GNa0V`m2bGK7y1&dlhoY#eT_i5Pzrk>5_^Ne_Ha#iu{;%5>@Ua4P5S7A(NH<)qvQnu>o1$x4 zh_Ux`jx>JrV)eg9#~Gi>zutIYXZ#k6_ppY6)_t`kbym&dD1;~+`1TUcL1$({@Xoo8E-;Bwe)V1`?NF9fV>C*T%+IThaI92g(|@Y-OBTX+q+!ZEHrcS7rbe4;>R{bhnP>W^>82=5MZ^9uS%fhs6D{2<_SN>!l z_3YyUrKTBb;B0FBAa^VPpCWMi8;dh?F!-+;N!{M4YnBRGJFXgov?R=4+I)nuDUO=* z9jCa0GdLad_G=aZ%J;KL|8y0L^7k;kWVsAHO=mVzR1b?siT)|ZWV(otxSKshBm;A4 z4c!O$ISmY2jIMNxr8T3Rq8D6}YYd2HYYcZMT2~3D;yWAasz{UhKZZIKsvqH|_T>9e zSgo=*o6jaFA?8&tauib0j(!GjsY_;yutt*VbbdmVqf57UKj`zB6{$0a(^X^x5AKy? z-R{y%$aPa#$9Wt89erIBgw9sZnlNPl9OL-vwdSp>n8Jut^CWy_b_(^nP;;yOU9jtKUTxOFWfg6ilzF7t9Z3x#~mbq$))`nDe(M zb*;7OuOl5PWok;8UWAmski(XKop5!{f5KxE=jsftNFeA8aIviF7i>eAzwdj1X2L?M z!2D{-1?X-2T^#F+I&>{Ca{LpR5~;tYY-yiz0=n<=fjaXW=!Z5M3G_po?_vmcgroZn zMvX%mTI~P_XM0I4Wff36=IZ44yu8Gj=zwEyn|v93O1_%h46AEg!9qwC8X9%5;dc7Q zXiFy2(3?e&Vpdq%qspb_?08~mP zJmo`RewGnbzS51U@5Abwm}1<_92u#+*h{svKM9;p5>ZCQpFraVj&O+jB=GwKtb~q# zqARNl+g`m;=Q0L-gVDmmk=hikh{DBmF_)Z-50y2+a^L(CpabH?C5!QpZ6hc5imLs!0#@L(g#wFtqB>ISi- zxIo}t&fxWB(riLSx*WDTvpOG~2S7I`xH@Aqo5N9CHTR$dz(OUdgPqvNH}e!alqp9H zxTl;uanqAhV~#b`DPid)9jgjV{H9f9&K(eDK0-!ti`qkq>)m5G)GIwhxq zzOnZ;BKpidePpp{DSqM$6WnUIR>0#szmt?`_w!Lehhr=%Np(O5j~dHfFbina)fU;H zznyAJdfnKiWh%rMq$_%SpqsdQ zOwqFZO1RJydVYS^O%7KLG)AO<1hRt!Ps)R7(BmoYKiGNcJQpzTl#dyH;)jYhjPwfquGl`^W= zF3s2*`Wds}OwA~9-p6wOhOy?il|>mC!l#Nc@2S+YCWUtlvr|F=FHDuK4*TQMM{wa$ zPjWfJ2bA}<8p8L2pLY z3|wk2ce;$cu=N@9ylMARhM2Tnf^53NF|bn;Dwgi z%74E<z@pGA1q&gV!55du!!=jXy9K9-KO|4*+RN|I7QHK(@5|}_oATrRnGF4 zjsRb<2_#v2z4vAbelVI9W%3|6-myK8|&gK3w+Tl#EHW3X5gMui!bi zDl1v7?D)J^>?@KKfTP|-`z>ugd&M+q#qRbHS?x7T0JPk#te3RxtXJBi9nWFqw+3XV zo?w?&>*p?kJs}jxsdz~X!L%Y~VWa2$?G0QbRzyT;RI7P(u0|6d;_!*NYttwXpRNP^ zmGC;t`r-^Jr4Xc}ZJF%IQ5O0%>$bM8M%G$Y3Wz2aMQgKCMKe1Ky^8#*%2WTv9}5J+ zSgfO^|A)P|3XW?@wm@xx1r{?iOBORTGh>UHnI(&vnOU-!C5xGv$zo=(7+;?=aA)Se zIX~~~McfE|w089F-d$O_vMMWcE#VyEhunHbchGv99;%e<-8J&slz`R`O5%otoO`T#ibACy>te4m_ zS){O3*76ys#siyjy=isO#P`)=A?h2R8qC0TrQT0|$^(gq0vNQ8C;!&i@#Z*e=c|Xo zGo^~1V>clAi-Zxw)h!6nN58PknP=5|;0 z8%@h#{TOMHzLgNA68@K_bb}s!r^MXqi1o7 zHHye(?6F)Gn9YLs*KQ)aXMI%%Mxurm-aKtJA$({Jnzml=16;p_YFTIcZHmA#6m>Hq z>?ahLsQ#UwwMPhr9~mR>Sb#x~;UQpi5&PYnk5`Q`TQrq#JV(pNx0W)@gw~os7B9R| zkV>X0RUOPPgmXSpPc(nD%mJA|e&lUA?=imuRD32px3|J=AX7RY24aINGYY(pVOhTH zXkX=fOcLfzJo`^)Zdb;-Z91c<%F_&HS-OR}B)v)fR5$A_EkKV=2Lz+sc~1OMCfD;$ zL@f!J@~-=%NA{v&{Etq{x^HCiuztZ*ZW0smf>@m{XSx(!P6|t-AvTfzU9M8??mOOD z%cP{?r+1LW!F=-+{Z+tTzj3!$s0McDFNdok`6BXP!KP_8_g>!3wy)>ty0-gE>wnTR z$8K!#NSx}x2v{lxO`@Uf7SMHx0eajem%#M%Dz6WJ^jz62qO7^#_g^c4r=qZY!WCy$ zVl|(rLf?GOT!d}&-)@HQL$;6UlR}tEbh3G3Zl{#HZmD5lf(_7&Sfr+CU(x&4uF(c| zw)RA!*^Jr)MtLl%8r!E6cSMtQGR1S{j923|xNe+lG8Wx% zA}8NBdlmz&W3YjxJMs;vq*vqf1hdZfQ1%e>xD%jB+4n}6)r?vM+lyH=!<|rG8lWlP zw~iPDk_D;PEYU06o&aUrMkJtsRy-w}&>`|_j^tYuAmQc6FO0M`Xp zu~m>yB%*R8q=uom~%)j`XKCTmaFaHU?7r@`X9c9Np4{H#u+L$ec!yhoDn%YD(P| zKQ@Hsi|$$mP*vOlgBYjSWd3`hVu#u|QZ5+9yAP`&$m<46I*tn!gyF z8s3Hk88hjhXX?i3Ar_`@MTo333{Se&-m)zW%t|E|V5YxB_{xFgZ`}d=3KMnJiKS5H zUvVL&$$f%#!@2p6(BSFzjWw@}G%PL6Sr}=2Iq|>J=%V^xl>oXtxPU7Mo<(QMF!1Wu z>D_ke++Z-QI^fFTZUVpAU|<{er`N!2JB61!#wp^o9Ws2xzD#QQA^g+hNXn=z*JYd4-^@%wQICvh2?mPca(p7exP6u!2T8QnIKHoyw07v%C{n{C8~LiFExf znWX};k8KPdk3^64b*`7ifh{`A=&vS2db{Lwda)=c6IOx;2PFNK@_?}tBY*NG*F?6# zR5oJ70GnJ)4M##oRoS64=9wlytUdQgxyq0`7SgV`ycQ9(#VN1wcxB!m2mf7jINPMv zytBc^&kNX9#4gzW@9p-|ruW4AY3fG^q5kMB^)@iD1|%zAF%v@P4~O!p*F9Q3o-nSJ z&`YyP(aL(kyaiN|hzy&5)OgBjg)}Vtl`RE59j=Fld5DhLPEiX@=uAZGGim|X7eZHz z6{uoLM#~gY11`*-h3{sp9wak%K7^=KVh^=VEg&mgsst9%gTfipA^NyvEgf-l$wcpy zc8Ny5L3@EoSk%;TI$T+d(~p=#Mt_{V-M(UIL>5lFq@5lT*-WTue(6h%r!^{(5mlbm zXOY<(cXqQk0OL{fs=(uLir~DyVhlD(gb^XF>T3`AqIsfVX9QtDKDp5N_d2;5#bTwk zMNJNw-Ee+`TBQfm?~RvI{%tKkMIeGvrJOtkw-(=TkKx;opF0SzUvai&`;OSU06EUWTJBUB*)x|K!{A z__F(SR~j0fYp-Jeg-eY)V&&MvkWzyFkDUE50;MyHb1W?DkuNSZwTl+1bpMj_RlakK z%Xn7QMAt~LXH;squ!qkP>&=ElKFbsh@-<`ug0ws@;3tHF1^B(){S;%0CS4P#zCmD-#D zR{aeaiJ5<8@MZwG^DMJ$VtRHYT!gF3>X&P|F*E9o2|tU%lK{^U^TglsVy7H>0OF$1 z3n}7x&o>nuB63y0PFQJ;`%c=6B^yXCmmg2 z>EMb~ZGpJC$vHTk+e5d(^ZA+tO4Sj-J?$b%NcDFgURnfYUlFvj!Bic`vfeSit+a;? zhbWanA*nTK9Uv`YDe)?^*P8~Iw#r;g$}o?f$Jst)G$9?_L1Oj2Lw~c?#G8 z-Cdzvjv;Ou`Bo-1{%5bN-xoQ4gmxSc6P?~tx+#KfYy0yGvSq1LD-p<3t1OQP5)U5% zTSjp$bw?B)*MOeWV~`*r?t);UU(eE}zer!-d9&|{pS{0@kZ5y|!q~qZLv+=t#C0=g zfsu{bGvR2hqh<|kx;ydgw|F^sRJK9`&_@!!4WEpl&Gg}W?s8GE)cQdCqj!iqom}eV ziu(o!;_(^vjO~W5D*~A;Yk+VYP0NHYy@@066MK2Ir&*rOX?&XkK**|P?(#Rl;A>=GsN~j2+4Y(&89i8}j{p=E&1}jm@n0FtG=(B9HxHbQruC(HOP>RL_uB7fxe> zz2d8|nE>+Bev+rM+tuuI9&=k*I&XIV9@d7MjLE^q#6DFR^mB}dyKJe2m1GAlnS^H0 zj*IkiGy^q)TJ6dgCjM{2WynDXf3yy`Np84aaW~Mp#VuPwT9;xt&BF~%{Sc%%26m-} ze;zZX&(FWiHDBW{EW*C<9!u9B43pC_DS;@qUDTUa-hC4g05LM`aXb4+CGzJpR>4#= zG_uhs2_0mp@7<>mTVYUf$tg}Zom0J@m=MYW`BljCRZU|>PJ4(_+2Bt#dI<1 zfqVm_ZqWE{IT*atVQz(cwJ(%=O4Xhr`a8s~ggamO_RiO$mUvM|n=pL^-S|en`=U!@ z@?ou47v$jSo)`1Ri|`LL{fQmBlQZhcoos)c7xXD>MtT@7AJt-T9C>Iz9&=e0U-_0) zrsry#;DXUb_-+wMH#JmTfk5Ip0$!NJu8o)}bV%yQ+~;;NcXiN|$4R3A$&vH=F#g<`U) zfI86Akv5sQm*MSNYp`q#{4hA5efEYBK~FsSRl4T6&}VP{c0+OqMs{-;^}A zwaZI4rFiQNx;%yNZvsJw{eyaNf~C~bWqq@N1ML^O_@*U*JwwNT-j&wOI*;voG^c*I z^4t7SLK~1kDZc8(s(x#qH`yu-N?9tG0TQ4{j2pz3SjAFIAFSx*WRh)cic6hCigmb5>*1J~r)-T5XEYAahT6 zNrC=R(3qge6%mUm&@AB?( zlB_uB?Bb&9`7$GZwD?2h6pS<1dc~3@52PgHk~Vs7Z21)-`-@-Ax}RX4P^^{nM$ME< zl!kQD+{{bnL>vVzG;>&(bH-nu=cmT;eZ42P5vF0hGhHY!O`#j;4oaO@z-}m=c~^7PyV9^dW{{?Le8Gzk;dc&EXxWy%lltrPxV) zPj_o@`;903keppJd+_@HOX3t`ka~BxXH}aO4{+X#=y36K_cGZ9IkuKZ!L0f>=kT4? z@<({WBqw6GO?{x9`HpZ4VWnZ*C-x3+mILFX0SF^L5e{N8tXcm`esI zucaY~v^u|d5J((7DDfQnBQ4drYx||!VnV+|GA7n^N^_xpsMzi_3_!dy98lFYrOcmjd7rGnmD> zWen6n>R5~9h#YF=bx<@lHN*_kNKHvChRz#gG$WV`G%RYwbw+KssvI+=5I%5}VvxSw zVa(|sldG(9iB!MAzSy0+oMA5+kvzn{)|Gfgjrqzh5^40x=sBtOx zv1UQ-QTw%5(jRPB^6*eV3E8g;1&6yEX-mZUxE(EqrfqIS=zU6~Wp8*Z*V?8q@b#Sy z70ZNi=Ja9GtJF|8K}kxS@y+OAmPTtaR&8}0$nC;Y%rgn6EJ)X6Ux_wKD_4$Mq@cTw ztP1Rc$INk=_B|&w6ybFYd&Y$H)~%Ak>DWMxfuwqveuG4A64uPYY*tqKFiAe#39eso zxtx^YuHT!^%>e09au0Z79PYks)veMSn!fx}e7a-lTBehz+aG9A=JFQ6aXe<)#X}O z3(X$``Fkjj&CKl+1&hN`T!%4+>vbaHD|rtmagvb7Y-qm^NcEK2j2-6Ws}kBF@dNgN zMO$Uf`b`1SM+0axTKx6Zas}=sW|YIfxAtr*WHe%!XNVs7a^2kGJ9HOjpg{N|mdQP3s*9Kd=)BCJbN8H$z_N)G5{b%L?g$v7(d+d9IdK zv{nMZIcMHc%(EEbLf)?VK)SWF*zXFOtzBA=0yBhs5lqz&14$z_7IQz8>9 zMJ-05J$Nnk zsTNLVS0fncNC1Ge`&F2%z1Dm4Me2+b>=)YZqM^<3v=i z+FTler422sv<&Ny8he!}v|V;a)q!)svPVlTW?;?mM9q)m)MY-1xk`O_T!)I$p0Qf$ ziUfX=?<8~2k^O(<=+O`@EJS{XW&UzpI;&OWJmx>#Hz z!pN!Fpb~t3fW9=P^YF{FPKD$|L#bzfECj=K%)a`)L$JvA*}FJ(QzU5guY%jh4`$X) za*iWj^hmlIdTn@U?lK+CJ1kyT5df0JXZ^u(_P~n*gaov_`S*_8d94phH^nAkZkMUe z;0*~{_eZ;etqhGj3(v#2vQs@|1-*>}dT`_%FQd?fG2m1{_ zn23i_U&6;7!V&Wfuia|lhcPr z&R;9Y+B>lUf*%RIgn5y=51=je*$u-zVWbO-E<5+s_NZEl^73*9snq^kETb4e&GN=t z!0j8EGK)iI@3;HsI*(F^ndC0B5fa}(5U^&;4)7G5^~_k)Vr_{olzkix@D-Kr z4cyKY)aVD{j0$TN<#UBI!i`hMv(} zGqrTIVg$2mzFGIXp3GxVTxu3Ir<&i4t1L%A+W{yMZ$XB)Q2S#tS{Z)q4csH6D^gv; z*}l7sQ){vs8w@Fn{e+wR*AonZ7Fhx&Z$DQ~D?vp7Xirx(@@Wm&+xt)U!C z&N(#LXt<=jhDtDWy4u4vT;V2P^Z*&*+GVGfsygJf?cCQQQnM9Sm|LHj%LbMFiW)F< zDr5cpB42vVX|1)36{@%#Z;+?1*#}>}Pu?2hIp;IAj_A{#YHHTh)Knx2F{?(!__6Qj zIy4#WWs})`;LHV?8)oaQ7#o1>N(WdTrOI4)N_G7MlFXK;~4JDmU_VtNsk`E<`*^6>ExHtrN!Yo?nLq_L0 z5Q@HYJjYV-*JY9oxtED2ry4R$kBom~ARGoe^2}ZKN_!WLUA(&y)T4Nd+B$Ki@;22e zyxcjMO{q!2wU2ycW%p*EN}GmGjDDiG(Rv+8hch_WwV6J!394JJYi~FC0#}cUWj6wJXr@*NJ9BhG zETu+oF9Ch|w0fn;V}_%|0NdiNNgZ&FtFQDPxs1#A9kzkf2fjK2!IrauzGLN5*k;} zxZ931OL%^R6mB}69Td$!%gUfRI7(`acP+G+npmK8eU3ItaXXwhe$}BZy zx2cCYCf$m@quvA*a0|$^PnU$jf77`L(rpbQVz>uLu?suylPaqMikpiww(oA~u3z(w z>T&`W1^ilL!&?U0Su#$9JWnif zBTro^2{BGBy;}g2P(N|9pj#gU%JgCK}>9>n4GaG0{NawXb4MN6K& zjVxL8kgZe@vnqEE^&8qsj0~;5lpb;$`?nBQ8mhsz%XV-& zI<>cHYzSQ&aO59dU{p0s$9VlE3hdby&AiJ$xljWH%VdV3V89$6toUEk=_rkeYE+8+ zDT5mjCZ*(iV7c7%_0NGR!@Xg|$-3Wva60=yfUIb&)wrBv0adxly@H7EgTP*$&-fc= zZ_hXLlC?>Y&Gf)%GirWkSq~?n<>lSA3Wh|ADCcZ_S%j>j1=?>3%dY_6c3@;j%FJE| zOcd_YzLF(p?H@eXTj@l98qekrW;{OoZ`e>kwks?NF-4{(o3-ea6w(5*DLa(uoh@rk z=7IgpO#}ghg-?$JN0R3qb&}#|t9FKvtet7$WHM#w6Ik#nHc-fB>R^9Fm&ugr)zi-u zw3GtBKi$k*t(WPu_*?$tTmOSTS6K=;lop5!5kM-?g$SE1No{H%^RIdAr937Jv5rKAJM{JXYHYg^Wq?=Q)8f<^?}y0`z}0@#x`9eKq18&naNfVzS5%CkX7ZEPQMBtX9eFamtfaw$f-J@V1+g%*!=GLP_rJM8-yk|g zM#on|%dMhSzaUCUkX244g){zQG(mVx_uTzBUyjH_&&cHnGvZLA-EOExU;Xt9`3>TB z=v*iyU+lp~QuS|rCI9ff`nXE<^AcW^Iyd594+LNsX-k0|;N z=L^Ri$NQF;yoIKfC?WF z?}i0&a%&No@nm#4-w1GrpX%Ymed2O3hOhP*v3i8h9AwDsB{%hgJ=9tLOx)!Q>*3vo z-1&UMeE;T(il_SqqS4GSDX^rtlkgr4sZu9 z=fTD$K)@L7d1+j~(tipS{ENy5fQUaNCB>sp1D&%ybL9#e>Zj~?IJ%`cMS}mXpYaKb zBWd$_E&egg#q6LIDs|QIl6K%>7ypu;`|J}Urf^P zFCLI2Rv9<|q^Q-Fy~kG$u%zH|{8qpV119}Qe|6UuDez71rEHnbd3|GPu+_2OPr-oS zic1Op7m78p{EAT1+eympRZt~;V=Rd%!XUN43#b{0QvX#<1Sm4{Jq2=ld%tMIZZQas z&Xl!X5Tpt?fySH*QN;hdQVl3f30-};y){KGQsC1NIQL}KXC6~=ly@Y z=9h$=uF60fkE#lUs}?0L{Dc&m^9g42>fI6?QdH(&zV*DmT4y6B&f{vQMWum2VX0tcV6=huW_r@~1k{=(ATUxAjs6Qt{3^nOm->%WwQ z{B1Up^lYt2ob6aqu^{3EK0r(FnhwPO>%0E*^%gkL(jIx}2B-gsf>nrHy+>G*&_sVR z_Fe{S~gIQjoqHmxs68M$sPFqI?EbET0ZZu=$bI4?JDQP0< zd~e8nKDxAODxdEPZ&d*I3y(8=u!DE!)zaZ!(4>Eb4#JSk;B7WU=M(dFz)h9Kl)_~s zN$kkig6$)Zo2yn)F~@;5d(rtNInGV#A+`2y_2~Z|!3Ze97+0z?Pzdl)B|}ogxPmQM zWI)n>(qol=_4T|mx8i-a(Bw204>`57P2oyCq5pQPLHX~K>R*F4KiR5thGF&0_5zM) zmbEOlpR^Xn$JNa5RL5Kx-OQV!s5FIZ4bz7HhANTA?I*Tk6?j$Ie|On`4Vu%hkVfe` z%};moxdT9eU&=OZn-e5RKpPC%@P)A~V=W0vP%0{L2-)(BxO8b9|NWoX`>&t1#px#v zw8F+C43c<=+81Xul?gA9v>N(FA7><``-y>j-?JN%E1%7y0C%3^{JXS6H$^@wk-vDh z21p}+on~`HTXjodq{Cp_5CMTD+PVv>eIDBdlF1S;YkB4hoJ%dp@MyUXAQx=SI z02URM9~`I_xUmTt5y_^)yyPBEMpI-JI?bR%(@5(thlYE(HV%o=DeyyZYQ*hdGtY*S z6Ed>MbY^GXl^4wSe|^bv&?0Yi*u1vDDTjTa(KIVigNi!_=a5e}c_m;jA`|kA8EFk1$cEoc%UKBiP6aAnuB$#1)kS}9cBH##ySumkG z8TH14SHKDQrz-IjQv^ zm|PJE2R$Y1#aP-!7Djg+5gE_F{NKWB!l-pc?5l%0XuKd)^A2T-!kP8DGi4~S<19H- z!r?nX_w&cRipp%%e;-)f<{1j>&p!6eJKpE*9CW7M*$6*mLhUd6xK!jD;G z`)&psRCAhNZ+oBF#%9*LfQ3F)#vOR1r;U`ywgAd|T1-hXAI0ajkVbHChPQ7jkMdUU zmXxi`J9)A5)oEq#_IPctYXp;;I#g&w+76|~CWK@KLxNxK5I9WbO%+uB4kF_4pXt_| z=Y;w*8fUlnCw%f$WfH%uq5osjo08lq0847S2V6_UU9+|;^;KMR>ab3IXTRCizxwHr ze3?f_RhV0B4`<-PKDb)_Fe0=`Zx)Lq1Li~YYzHbS*@joMY|HFdDv0=4GWkz9)(!>n zG3P7=4O_QmwzEjKc>Q*{?B*Ip(8Sb!U^&Z#^Hh0z?)&*G96&&i%htzgceV-U%Hl+- zY2ri@xecb{QXH*ZT_pp5A8hXSjcvdev*}PRgQ+4>NL!y~lUw!S+zX;Zm=l^Z$-n#3 zG2xad;1#Od<7KJ)lTnYNKE$7xGTCT^Lg6(;BLs_W~x8yGAui6<@_Fw z-4py7F}74Jn{$FhV`i=KvX8CqT#K^TFl2!N6+cN!^t@x!6!=`Ktb-?IHgGAu)t6lA z&0te7hifr!jN+*q_UTwfYMro$;mYh?yKpdIEDAKRk`wL`76W(;!zz1V^SyQZk?R%2`mV6 zu#iEsyN8pBIZR}$U^r-%o=IT3MZFqhnrzzme&5}y55l+!MOy!5&FE4^J~N3C1orQ!9J>wHZn89Cmnor?16N&`SEPqjgM78x z%kI3X4U95nIx{fbOoe^UscVK7@hlJ9Eto;6w~{OlRxB#J%R@~G!^b}u{C%{eF2~FI z{o~NVZNwMxmwot(Ux{CWicXVg3k+IUGpan4rZ{+q_*>m-07#I?(-qa=QcVV;*(TPa zAiJEmxuM*4aknYOuoENO`-jw?rq5looeuAkT#sjN_HG?W$I&+Nqpn%JNwz0T26@D9E@n1^1|_zBozRf{4>*Mx^I6*^}&v} zSdAW-kA}TE2s`f=>bZGn&)XEraO!kbnMe3DCrs3WZJY2V#VL-@L;Ol$ldQ&Yeo7(o zPUf9;V)=!AFA;@(mL-<{VndCUuyr8|_{Q+mVe-4wf?Hdf_HGmMIDnL?#187;JV~?k zh`7gB;kVZHd=|Z3$UFu-v}0be{*yd*Qi5o(^<$0{!~W0> z1?HrfWp7)VC0nZr`s14#tmBVM{O<2LsChB8xdK&7IIDh*)YP^5r(|bp`ST8~cS8?0 zIH5h8*nyU`bymqp0EwMN_gflDG*E9z=-qx`sLwk(YBJ4#gRVoV0Pmuv8Qg&YRMRp zWwHXnPa7TD-{l0pdMF37zqOtnUQ}sxsVw#FaIg)cSjyabNl^B{3yh-@dX{>uGp z*{Po6NqZQn95u9?7CfdTT+_Xi${g60OVMjD&UTNvYg2>NSPEeRxKXqNjBp=Q+O-(k zQ+{iganM+vF(s+LpB39_@(KZ5XFeP>48uexP9F?^&c7I+VP$f%f86mP3*YCPUo^>_4 zD3C@6jy+8uP7=Y?R95r0KN?}WEQE3!n!km>^N6nd_2_^~N1v|*hFgd1rQ0j4ddA(p zgaSU3M%A`y%wmkbo7h&;2n5xUVS4jePZ~MJ#_JAS^L&97dB`nbDzY>Ib)M&H$GTqY zo~;xMJau@gl&_VM01~=I-r4tCHyiBL09Ku1(HP+H3&u%zUlcn7a-ISrqmW}S^`APE zFIz5AfB7-o;LcDUxC(E%js?Ts@N{k`Z9PcyM`hY{&~JqcV0g{(!`QvzA}Xv*aKtNT zEBKIOb#1c_yAG^Ya=o5L_0vts_6bC-&A|!2Zl{el1SgbPvE8bU~Sf zedf6+B%Vt6$5P&i<}=-jGZUHtG)hSyHF&HBI8-2{o3XxC_>2zW;BmxFxWO+d%EQ~L zCs5D(XBqbDe1zRaD&GpN%Pud*CLWzoSyOddvwP39pEoKf4|N#I-YV+6r3j8qBBjI> ztuM>q>W-KE5x<8Os}Bm+f3Txu=;FtqU*d&xko6Tl9b6DgT}QH)ucpe4k%twwY1}J( zeYAO1E8$TH2DX0HDBB}F(TQ`GXT?uak@UHmQ4fCT#UmZ-BjfQhc&WA&X2M`8_n8*m zckCZ=SorKk3L{u$e}TbdVRDdlWWElQp|~h>XiMsb7{Q78C(o@A(F*B4+Mt%Bu(V5tJEa@lTUUA{yVH*)#w06JaqQzW|-E@ zagqe%R8ot+O(DCx&A{^==}}dWd6t^DPYVX$yIygH+TvzlN6o>6!!1-LqRp+}xvPb5 z#fo;IX)wwThb4C6)Szv$=^k6eKDqo);fW2I$KxMJXV5wJoUxlkQ)9M>r^|~DHvO0O zB`5CL|JX4T7(%%XRAtnDQYvJ}1yF)aScrEMY$XLE4JWv>7+Jd&-D>XB4^250_M!rob@>+zRBI;%9I>Hr5Uuxp{1{>aJ5L_3q6}xqsUUc4QJ!?4!v}xvV2wo@1@86t*|9 zORU*oO;_p+UT&ERI;-Y%(c21hansTz8-GRKzug?IBtt|^DBqhl4fbs2zN2mv8@Pq7 z@_{FF$Wc+uw=^MgF6BJI0jTQW@SXZ#V+0{*m#?VTxg&pZb#Y)|H`l`_77dh{9J(C5 zic&LA$`DSgsOnXAN(?!b-_)6SHrrW3`Fm>XlQ zwz?7|_t5WPZ*Rr!;`ns}D&r~GIN*U#xw%x&vbiJ%pb*)Z+I6#|&=z1ttN4(EQ7ah4 z%0tH))cnbb_s_&QGZYA*_LLPWRl*7kc*GniHAA9Dx)l^udlFHAaibZh(nxma{+q@4 zk9<=2ni1QDSyV1(<<280m)HeYahuxYx8-)wJ{7}6(d>~>vIm?Mis0c=G$E4KJiSre zpT$g9d4&yWzt5EWR7`O>%?d?CDS`&Eo5hinvb2E5m!NdssQSf_+;U# zb&;)#@p7{@R`XzqD-OxDIa1KF3uT{d8WmpEVL)+3r%>b&g@}Jr`8{k;a}X9Cl+I)P zbEjz_4mz=bw=a07J%CG^GP^g03r+teXQ)q|7|Feb<^DUxq~kVk``0U7Y*OzZqAfe* zO$G*hEU0=F_N2!QTv*3)EWJjNDIM*opszb;3hJ#~uZJW^PSwKxUy55gd|o*5a;#*vtwlqFfNacrl5qCI`^t0ky#0 z|C)!|tMCs%fzw%vHC>qv5@jpB`)h75gxxow5J*)V)ZaV1nC zC^PR+=Trv^lZy@^>}QoBHyi-e8>?Mr`3i>n3qb(%JrA|HJMjrvOg~|(E>3>a?-|+U zLy3mjd&~RFqJSwhgJI%(2$0sp{oNa1kbCPSl$!KL{iZWlHpTVJ$wrQyhI>-Kk^|SQ zER95_zCFM1EqM?iAp2IoN3ZW~khU9F@wfLi@%t@e3VSBvlv7$SVUIe0m+1<0oeo)! zxlWzNz~yuXpfB78Z#RuT9Qy3PFeL*VbPXJ7wGrX}d03VX`I&CnIRkREkTn3fdD|(n zL!w8%>h1lxQzPt%<272Lj{L%0I+lg8&cQcOS%>X<7OV)7g!9pT5d=;x_4dMwIk=oI zRIPUdPlB5(5?P5C#a-Ta+PJyl-)kH`)3xSirKMpco1{kiB@lb!3mD5pq6qA{n5mF1 zt4TFQ${P{LB#rJJ-Z-XKD6|M^7k`F+-z0~!{X-mSs{0UuQ=Ck`7t7F=Wm<&nilA%) zZTZXcKzx2}S3Ft?8r{1KPIz)!l)0*n?D;WM6Ux^?I1%z)D6^blk2)}shzhtqA_lBz zwaDNF^~ZI;Cgn(P67M}^>(ExBi9HTJ=ZKTkrgJ@Uu+| z7(~Z66?6%43mytDD=qlY2Z@IHwGR6Hu`IpL0&n*vnb9b=@BvV50BG?#Llk`l-^(a33-Xn5K>rf+K`f} zI2-m{#}2}Ylp22bSR3EqngU`f9G5FO;uW*gH=PNyiA|IA(+m0TK1^cY@N=}-M>-d= z;K5!+@F305TcvO&rlQdy^6y;3wJ(!nNSUCovm*K%cguTfTa%AwSJcPWPeF%jz1J_SsAF9IV!`E-JDW30=~OcOa8K;IG5)9@L?x{< zTLYE5OFIv*cnn&O;!-7!D4;pVUxu4eUWiY>^l-G`)#AZ&HvuKS$`{5*p`3w`^_<38 zsR#XdgYTolanqS@^IdjB;6y*yu~klFW{f8qK}kA5ClX!G7=W~_4Vwbc$(g#sYV4k6 zMrnyjKKHC7-v%IdSkzi(SRK%Qk=xz>-8kqPd@CY5orl71sl6Pa;0-NfzEbha>3pQc zyH?od4n6`37l`*Mq_e|T2(rRx%yNZO2(;h=w453UhFkZ}!L>`ToMh&{&lTBkm zLJty~@b{s(y8sEHh4uL0QG*&*81t&zxHuaLm?sM?4(H8maVf9&XI|JGvw*Q`c=qGo z5(Av0R5mvnoHCTd9%;v@_zfFd_70uBc)3BrDZ;V>fvSCp71HGS;y5iUv9KdN@3Z4U zzqwbmp~1+!RHK!YX&mpol;*vs%6q5WbdDMCQHzQX2{uR1##4Qg*&#*tCMIgQkQ7?z z-n~dAs>YJHQ@XgK2${Gd&fS`Xgw^ClYMkRTWvYBLvg0>kdU{mg@DeZpm(q2pre_X=o)6jQ@!OnLixjSN& z;K6zpg_n;r+p^yIfm_09u|K-cR5bZ&w?bSk)$e$o;x~0uU#QgOcT~l02U2%7leM@h zelZ@0@_W9*P}K8$X7)35Q%L$=Lv4uW%CPve!J1bb<3U5&XN{9Cn>#-rt5aeqXG|#R zE{(v6AH2<}K5dwWo*BZtYxzhlr^)8J0^bgUCpnUEV*Yv1o(}zIh=3k5Y66A`quC-W zq#6YAhhFuU1&LeF{Y@IpqRZW-YnI54E3{wsJu*I|+3$8d$=)?sCD*=DsurFfAN;3G zQ|1kskKq59+7tN8i;2#0p6BLOR(EE5X_24?p|eHa*c`^6!+!%AI$czilnd{LW1NDW z*8y~dEi9g+q?H*QAS3O%0a;8p{S-|k1}I-gBwyEUD3~0PkvIHQj@*?>iCQzbSvpp4 z=beu7ZEZO`q7~Qj&jJX8&IZX|l*R}(g0HUzOvh@a&gMifAHBXY~NJx(3udslznW5F*r+T3tn0J>S^;> zq{^Vr`bsT<^Vh!cA=bQNW4-k|LUeGXZx#;2!tk5J$y#9U+{+nJTB<3v~P z%Cr2)m_sosXe-SBg&#htFo(Z-f59YHjis_SSl;)HXv-l~?EOyWIl*iZ)%ESYH{Hkf z$`*7&dw~yTt6C%bgF;A-WuPnplu%y#ceM!~JICp(t7WE@xRY(mcdVD3#LEnK&t6WA z!y0Nwk_B!S!_Nwxj#0z+3To;2TMU^|YXd779xDf5&&)H0jFG)#79co7QaJMEN)_hN z?${!1Xwhlnj=I~o;7LRh?cI#ls4BdhS&!K7w*CLeM5!GO&9~iTxkMauRrWYeD$?=& zv&MuO7Ua0;lod97#1pXrE$2OcV^zXcjkWIzk2#dJ=Jwq?X%C?d#!CaU@=T2O(9RQjIueD*&62L zAc`|XpN<$@167m}5;)R(3dS=UJ1gA~dZ5K4W9h4r%CM=mmSgjIHrvoqJ?^}L|B3%d zJCY@;nkN{w=DSqeq@$)xd=I4GA8vHVbEdxenewn7h1<;KSXRUFw>B)9P{i@=S;bM% zQIh^wwk9_A05+#z}7+%Sw=?qSQD0S4&SN$`3GA zs2P18RnS($tXnPTwfY7}R>*3w<;poVb<(N^IzvkbT>4%N?rR0@d2Q6bA zk8f9gF8N$aiy&Uv9c7AJRpG>b^rZuv8?vx3{G_ME@d~oEuMJ4_Q_ubt% z%UJ%kmFY4*0iOxxSa?)>=tDzd#ehnquzv9=wF|sRs{oy@Tq`*7h!U4?WmA!zcvXMl zOcY|(y2-oZEU9;J&OdKs{`nIXU?2Pf?a?laPz=)jjGA_1CjuaMEW6)FZDS_NGP&0S z-v2m9wvOwzJS%{}d)F4EUQ%3f7LY_9e|)9{wN_R$h#DfK6tuMtcTkFd zCH@PQm-7p_UpE70&sQ1maO>YB)gNJfvTZ|E< zQG|bh?EQN~-o%x6Z&4Ch3iN7$2U?^X};9!79{rhARqTo z3X{^Bcby(on9aC!NVFkEs~T*Fg&yP<99iMeJ|xO8jv)FNY&?stwOp=yBoPv`5#rU* zR-89Wo_L`nM;;UJ$3FrByK(q7k4qJ;d`YWd53kH@x$OITC(H4!F-c#%b56>A8_mOW zenP3200q`uQ}x?S=B_XxYk7tM9>ap&)I;haNcM^d#e`Y_`os{vg8|JDN8bWz&hIE!&kjEu@?6|t zFW)_QQL1)c5@EbTjz|9V9 zeB^$-+;ODzWPj`~Sw#)ind4nL7 ziqK`9_k+-7Bf+DD$cCIGWbafripFhCQHGSj8=yUl18wR7g+rFB1R%}!(-aggL)1Y8 zg@7d2UY?z@hffaCHO92wItscn5uCEQQJclPZaNM%+_m+S-XRY)2EleDO#Jf~HFeX7 z4Y&P9cJQ+okzQ!HKx7UA&0rA=iCG>4+19Tax-d!0#8JdMnG5rk2w;lp7PL*JV^N(zhF z1yxXL6;rtK<%DvXR~wEW*lx$#c&kwt?1NCn6JPT7L!XTTZS&(N^B?;Cgv>r}1BU+D zYeVm?7Y()jU5tj`j7=)*N_ z{C?xP4mm(MAspIBJk#1lpX$m zp$1wS&hPr(oK2NIA1C1qemxVCTXqfs7?#btl{=PW@EkgkEOHK#Eh&GcoaAHNPwk6z zri2-Tn#*34p{7GpFGc+S-Pxf3XKylRYnrlF4AHXos;M_u{IfOLIVg6nEFf4xUl|aI zi4k_sUrw#d23*W16+#WhD=nZE+Fo1lv`l;H#eqvsAo(BCu6Gy*FSl-@w2YsQggsAt z6|`GmxFj`2h5jTW7T9>8ehnoF;d)}+FRucYzO3U5e!~W1Go!SMPBZMZ1Nyx-deQ&b9SZ z4QOZ-eUnRF+N+_ixD%Z{(wGaP{bQDt6(FGk@Z4RPn>+w+CMOOWHJC3YgVs`^QBX^; z8MZFQW-MVTXuG*M680GCMVHOTFyUJ#aD8Oo+Gxl8xcXW~1>aqs(d=W$y6d-W?9(e_HE+ur^TIpj&JY~j7|AAV1sze5}GEl|bB7d`=2qT=e$qi2oJ686a)W2>zXHFK-K z8;h(s2ix9}I2#Tv>u@8m%8E9n8@Gn00c@;5b2{U&d*A}#jUL>3RJq1ES6rZo01uIwO$hNqWOewlgY z2(DJBXl7(jpAtkPv-L*V#LPny53A}{ze9|}4c#{tmjbSv9}wNYo|S#J?s@M1`$1T{ zrRXKDgH8p6D^xptF*5wUI<>+y-FyID9Bl9{|9eoc*}8Bu)>c`V`G){Ft4}^V2W#Mx zq9Ab7`v*>BfC+W>`{?7$3nKFEo7GYSjF|yrq^re=mej#*F+Oyf29kPI?0x-;TIWuwGeQEwOG?*XoS#bxZFbEw#7=9=vN>vqo>(|m|MZdlW@$2UVgdm*MLQ+<&>yk zDGd6v>(muB-T;P?rC0~nntbVCn!iJKO5Fse)rZKwTGWcm5#~aC6our9G(Ot>kd5|* zP0wufKeYzp&$2A63go_eHQ}IX8a!4kHsSxIhw(@7nwxdZX=_B|4n4 zi@0UzZ(G_K9&YYW#PF==V|+K4t>+f4vkR-$CqB0KX2hINPxUi@l6YGv~f>J{(}I*<$wXH^u-pQcX7A z=2|(8#j3;4a?e#rLsLUS%AhfBtpX#ZZ@IMY;2BHbDkbOOEi6%vY50bc_u*KDytSyb zw+gV?pe}a}T2(Kv8K>hxGnq0g5f{-Wq~n??tG@m>thkZ>z=|+_qLP->aPJtw`WNU= zGSj-BuX4RnJEpN=)IDo*)R0iZ6Kvf*dh0FM>eNkisU8=72uW{zI&p(XC4t|BlBBCz zm^5}}uQi`&yWz9zzURE7B#F%utrmI(a_;mq_hCf@E16&=COwv z7e4Y;AA4-({WzU2TCx6I)lj&OUYU9r;nb@%^tBwdRSbdPK$n0I$Q2B*lgR@hz>Hn$fw%^fu+LzGS+Z?6VinQ8PLXmhJeUkQ2pP?<) ziT3D*2nYBVyqc>xJ|zlxOIc^Eh|)5iv+))Vi}jt(J?>Bi;6zTy_K6wzp%MF3<7eg7 z>RE=9VCBNtIIV4dkn1xS7n&Cp7k=KLH|*{4r$q3l+GHB7b}9?OR6gwo89<0GP5V8< zZR?_`6-u}jh`|#nH$BNvwf1ejdd$8?^K#)vXy?5<4iJAd7s67bnVMwdgagNv_FOjdxFWMlTGm5-gpNh0)nr8Ki9I$R zyJrtYe*gzc7X#BOSa|FVEBW2!WWj`SSW;f93%B1KS&H0qO-`bQ=z)u8A6=1&d;L5Q z=TjJbdu&k~q=ab)=Z{Xl=0z;(IF)*_S2v4kUkCb{v8R5{pvw&hS2y&{OB29Vmr6uRzdomP)AD|bSV`kki%{OSc68^l?gcFK9#D#wFXYa$E_5U#x%C};7 zn}TX{bQyaj<6u|v(d*W4Cej2)gM@cm`lDU00==D`^|x*mcnUnlXBQ;Sp-VJaTaTUWpV`f-@93ipibHF z#t_H4DFWzzA==Lv9lo%;Kv(l6s1vZ8+V zn_jP2w-K#21ln2r6j&SgV2NUYfx7jL5uVZ7Nh{;yuH;$DU;yDGvQ5|9?i3z8G3U)f3c6`M9YWZq}Zyi3yp9E4l_Gy{xFRNlFeH6s$kg(nK;n?*Mm?swR~Ubn2r zg@yzspR zfkBz**Lc3u#~Uex5ekfJ_q096c}Km*I|m1$E`6S9+M;t8+=8v94&(}^MuE3Qx;YmW z{8D=|$$iwwEJ1b3YIYa?cTZJ&53nad_j#}^Fi#ML28tQ(PC|Il-=4Q)#8$9HJO+Nsr z`EEK3Z1^-qHDcnRGug%U`zPJ%2d#(X#N(8z2C5yJlk`m7@NUkNcnPZW*>qm+UpeBg z^8Jii>!!qV=sp}O^v7gLJzSo3T z4}2kIUtV_oh1Z0MlbNOL#s)dcQNhgv$Hj~rM}%#1U*y8antlgY&Aug+(Bm*~iFch|csUJehlEmKU3Pk+xnSL?)2$6NyJi+l2$Hx2q7GF>B`l zj+8R&5~BBV8G%j*JhHHa9a62B^P$nH!Y0Y>BSd=^9WTfi|m8 z2n#ehz>kX0F&(z@HUoaYaw2)~)p(!c`IJ!9Z8sL(Fg&fzzqlh)CNu>_ps}u#kJNPw z{i<>71gLj*bcxCX6`GT}SXUmk+WY?_C=Iz(VPFw0rq(Y8)JW6ZQ;XR#R&_TEk<6*u zLNv6*lM}Mt$|PjZ{+7386~C2kFlZ=tly?`}J(1-z9;AOB^u8G%@?|mLYi_#|P9%|g zk&7o`9BScgT*m{VqTEK$Vh^zJ`CGDnY9ZWM=G_Q&fCUtO=E~t_1r?V@16Dq!fX6VegFEG`m|6>bf zDMM$EJ=P@uKq4q7MYkrfPX}2_Fd0UB=|88t@7e_?8fu+Q$UJn${ccFbIO#{LI22R^ zBUMxb1^4znSAweW>Ju z@4M;g^Ub^nFxf)4mF>LT;DPHM#iTLH5R!P4C%2d%Ur&k`G3ja|1}>nKQDo$rJ$Or> zYGxuMyQ~tY>Y=r}vev@8oa#M)n{Gj{J&iOgJ+E6v>Ooqdr#NLV^X7t+4vLC4XR+{s(~DyxR?8%A$DYi5PgO4scvhXFuCCeVQV1EhCCSpF zoeC~m-5GCP)8E58uQyoSk`X95m+gCNnO8c&7StVft|UufYR2{An{-t;?f6}v*LDp&S$@K*5jj~D|>_6GrVw&2I2H0n2zfXBQt$M0N8LW(G)3f^}qS>(0 z?+mSLbl+=+7|t8>gjwz~SLv7M&8DVn+_cJCoim)0bCUcpo-bk#DyH_U)l9OF^yzMS zt8cC@oDy(RZ?sk&uI6%zj2dou4e4+s4tHKG+uVb` z+#;gOU-Yp;!XZWD8e&ym0gTPkM7eu$qmJbT3(tXaoSR44=;%mZ*7`7OWwg|83k{P_ zX($lE9m}|OkA|;g->+BpSAO^5PjfS_)>^8o|7V0Jhbh{Qc!j|VY%^2wHKzWGa z^@xd)Y2xaK3875PeEhczSrr&9d=-$1t@FbNQd=d(hHQou)o<$$Cefs%FZ9w(qv@_7 zUDkvs=cIjIpBF7m7RJuU^NpViCT{cC9~5_iaKN}78Igj-q^MD(cR)z@cjdMIB@w># zod(X=W~+&jm}s?Dv!=o<``;ZFo~&WyX2PxgfLm0-Q;^MwF1w9#$|Mm@aczc=bGvZR zCv7{KCP51>WXXOaVv#~oj_ZnYPa-+mS4*6Aq2?ktC9tfGVlK{msdnZmJsJ$E;OKOa zQ+0u>l|_5pHN6l&aE$B0_m{+@KmwYjv!h*@?+&BY@tLVnH7(3<;6=XKJBQL9?1(`c zRwGkCu8Lx)*Pmv2_pn{dgTtMD2Ly>%)Cp@7^XFAldo4yYeS~ME!d%Bxi-7XbFlS40 z?ChJH1>N~d7s>nwLmhHF7M9KvL-!r|BFD%)eSHBVGWn`K)=jT2)A)V|#MNM|-wF{R z{K8{8{vGKFFKbBGf#^|Ze?Udo0-_v;U%B7N$N+O!++G|XK&xR!M*P#qbEYYTjSJ^o z3@n`_-VUqjCX#U$M_1cpy7B}CnkeBe9fTac-#^0{T$IfzKbNYT-O>`-NWkOiD{79WA^3-AGcHUi zX*yetp}Dz-F}qn)xlg2F(aMjp=SaE@OD1w~N<5$5&~`(!_7$OsNx))V+{#j`hfu6h zu^Jh$`nstWqGqS+k}sI<}tDhg#;P+I0yR`=TX0Gsdy(oZ;Q(x(+Wdu#!_ZrysqHhcSQ^ zYoM9_fds)gnrQ=KpU%r3dK(!z;A-tA*D8Q6K=@R3b>3BwmPDi?(TTxjF^G32W-}mt zp6hmj;*w}{S39!0zG3i#V&Ba8aSbm8f)YVlpmhU|BIMoK67vXv2r4_`wACW20nO!P zv-y3u>R32E^K(|VeFy8NjnbQ3kgbF8_tnRd;>5?})7Jq~%>q&r5H)}9sp}4EQw+`Z z`Dz2?_RXb(KpJ`Z%&Mb-W7$=$&N?NCyJFw(Cy4w_K94A|K4)hEF{5jlbaocT&79~tcN|BTF+Dj7Pplts5%Ix0zBndW&i)sf z~<^PYB8dTY(6f3Cpx_j~dGSyBprR}^cs zE%wx@sj}HQ`c*Dg$d}uiXR(~qq>$1f( zS+r|)K~DSaO>@2%8s!x!?#^%>FGoY)l)PHn?cb&H{~H4Txb_5po;_$L>r3Ffgr}m! z-kM7!Z#?ac%WgLq9^LvpeVqoA#+RLD2w6`v{qU);%Fqdy(V3L~=RdY4{KBvX>Kz@! z&jZ&X5kI_VO?~s6oedOlz3VmX>)|AM^>f&M^R)ZiVR)BM+3R1Q<^M-0>wtBVV=$YR zH&KcUrzW??7SafxSF(XH+MWnz)a^$5R|pb>USjC`dPwdo=KtNA^4~CMRr*=#ZgBiv zt~}V)Ge42v#%B9vYb(d2?U=mnQ(pyWKl_goIN+F{NXC?-%vYU$Vm9_P>Nb)raLG zbGV5JgKsO`vY0wX24z$1STvYdBMsZ;Q=DA%An8{`wsxTb`1mRH7llw2qs*YPL>0MH zs=xm4=e{>ncl&0nV`q(t>e2rhJ6k_cd_QPjp-3cSg@X>mV`f4Ajp@QabfI7bv+Ip` z_Hto&IOsUbZ7}^kA0w@%bDOjb101>iA5UI`-@a|q!u@3E)Uh2!Q+Fu(z{!MP4nZ`@ z8*SUi0}ZyoCtmPpFGRK15Vx#)W6($Pnbk?HM8D2HoJ~_k_&yy!NuJ}p0`NUC6J{e( z{(KJw(3R2+D$qS&_0bksS2-w3k{xYBj%Fi@L?0+)4?f7Ptbl~$i@w&amFkRt@teuT zX|+b~w66qE3w6YmT=hcO&4_sZJ?TFX7$-?FdC)9_u9#V4e_M;d&%med{nX~N9Qn^m zkJ+&CBNzR5MFdyT`-F9^No+^VX~7{4voAH8pJ9j?eyspq$Qt1L)f21Hv4|y1&DF1J zztfx&a&zNPNfr691wyRa{$ad_(KZTsh}h2UYswz;^`Oa$>MS)y#dARgRG*y$HK#Lk zn4c0Gqe(rOh@7zt3f&7%BgP)%LG|xRZ0O`Aw=8mE1-26-fE5}g2|&O9{6^l=#Duvo zZ_jzbLw28`McSk!#dmy2lBgH95}9ELT!xtcZ57wfXN^AH;VG!7uiD2i)uED0 z+okZit0FMCvuY5Vp+1QS1m}CsG@1OqN{>dwQ^-E&;}bDXFb1AnP?j<11p95Qwl+Rl zf<~U)-<1zvb7|-$8n2rJ!5)z72@`!wKaTa>kw&}Q;7>wh5I>adT#uFu-n*Y_7;9SW zepJY{m^i6vFxcFU*zGvNbB}hypiba#8?>=Uw&g+ zBE#((u5TZn>^tE**Y_7p?044pyP2!!xRsih7vD`+qYLv9&aVR?bm9{F@{~Ma# zpZ#SuaklFz#YJkkvWH?lfr`YF|3|pc0UR1X^xXT-PegiYChoHQ>yZid=$DczzW?MV zb?4E^;JzQq;BgaW@CrF_eh#rpVIU8}kWSF=IWPa`Y0VFXPh`?t@Qqi(8T&6kYZB&v zEQtKDY8V!-PZS#7noV$iGz`Y6jwHmVL!8*e;xvmqu?C&cEZIyXVH(O%$jAN%(Cpt@ z%DNlydwOcThTHKS67Emayy2M+s)8R_=Pvhz=8SyaA}WzVV#l7Ma*ZAlcrPbQECSO_@uhN2=51Hp|AK?9n16hxnk=vAYi*iH-A@Mx&+2^rW z&FQ$+7K5<WAYmu%kdXFIu^NNNg2E25>GV#52w*iwVJI~I`jR) zMiaGuFiu!o6~2_xRCc=r->rWK`GUVe@uMpmy6x(h4${)|A&13mY_`@sdH}&4fgkKm z$s^?stqRcI!o`msJs|YE^A^}a|8xCdhLjt*U+AXfrSnRRO2EBXYsa0idipi84(n%k zQg+E`X12Jtp`4C=^yA)&4zey8LOz`xaEPKodt`;d3l#xHH>so|!v_)#A&>Qk?&+cs z4&g!t<@Tn?ebPtZUPMXT@p>SJDEdCFpHrtq<8>TL?P1kr&@ncNrw@D7%e!ic(x~0> zO8x~p>u)oH#2>4p{g1RRN%s0Ov@XP%>niQgkii1CqxK}8S3fQae^|mfjd2O5%UDJI z#wR@w@euckp`6+Fq(`c|NV8lqO;W|d#sen}UpqzlN^cV3`>KmVP?aM7_2Oa2Q_Q%Q z(IB`9uZ`UX19;$@a``4By$V$b)TJYQx>lApUb1riiG1ggkI2RH%255)PQPIJLY9~X z@ErhzZ-iFz$ac<4qqIGT$hGq5exWuMF#lq8U-YFx4cdSrK+Z_IsXo1(*6-R*jXWd9 z>-4cj%JJiru++@gCAqtekH8EmpI5V=nRF1UP0_3)i$tpsXi@m@j>no!U&;nh`9w`mQ#11@mUm$tp5t6@g#}JL zA?81s84p{J%WjukNGTCgC-UaC>$W4B${+iflWDdBm4%3N=~p6F>>^c?Xg3>xo+Blt ziM)Ev6r~BnK8Ds~gH~6=AG|+U^ZX{(5)Lj_g<4Itv^E`TW@UVf+hi$&|#+ zajqM>OI*qc+>;Y9jy3Te-gm^6B`1m0kNDzXG;jK@)c{Pk1A|;2`v>(B!cdwbCIXj*jZ>c5%Ss_Rt=gcz$Jw1C0wF@; ze5$j))U_F6&{*uF1G5TG$K4U;y)WeT$n{{?k1j@r zeius@u?tnm?=|ve(CrBHhk{j7KpE)y5gvV{y~rzFAmPr0-d%|F<@L=vt$-<|D2bY*D z*7-o|!FRTA4hf5JpiW}Xk2-2Ho#|W&44%jy>^t*vhnEcG*8Sz1j*(Cq97&tb#Bu`eXQWv|+R;66FJraoDB-GG_OBdZM=XzBu~E!-xIg zgjX6Z97TKUe+*2k`Li1tgxrL;r!@GK0mWBOPZTKLB!U|Z@5TF5V^rj5r z;5$nQ5-kFU#Gw&c#L65=csh>bxN1<9QN0#Z>s$*7{qVF~I^;O!oc>+hBh%~&zjxha z#=tlS8wl`tPUhSC(m(;dCA!L_D{v00SGhEtW47FIoIF7JrijDM(U76xH@+y14?qGe zj+o*1tm91RG5%wHHw!5Jf#D2C%=)681BKu+;-!Izp>IkP`JFR?bgw(Ll(b9N%8mcKW&OWO=w>zuaF*WEc(FW*F_)_%wGdxMBM?L<=>Y z`bwH+wh7A^+syES^&XdQyZA1~FNzgIsXKg4#0*m>F?TfZ$5N;xclzT?8#>|7vXL$$ z>MYlk*=C10oV*5=dTdwff}tY~)s>IUQgO(}j%InO`5Z#uC5i}?G| zy$IXzK-Yk`%)EQMv1Rmu#pbLsmBvv=!!uz(LoCj>kRSQBDYRXKCcf&Zm*&g3be#n| zegQ1V_WO~VI+sNY?dTZn(c)#7iS%d3Y8m}c?@8on!-0$r-S5oy zL|xksX#*Vt#;?pk;ILb2hifw_ss_`{lC5eb4S}}xK5c>94+lsq;OQHtcO2`Qt`Cs; zqPsqy>&=|}B2*!7?~QTX-rct)4Err@PY>0{So3l2>_2Ua;ZDqg#8<4z(JSjn#ge;b zz__4JYWvQ$$5ly91Jcolf;>T-9qR{+i9Vg_qGVMf$1+)(->qJ`;}wt0D*N)aasIr9 zQVs9IeKaM(xqfCOPs0R#RGYht?4Ra0`boZ7e@^nz=!-BSSjhb}%-UGLcq71QbNU=et6T}9yA`>LqYA0xq9G=+p;^MN+!sGe%c zH!!bpI1Z^TqM*;@G-zN{Ii?2jx=n z?d;~)IR*|(qDLW(*LsH%upNPuE~a<2TzN_JU&^=?de+T~w69WF66+Q@y}WfwAg^ceIOWHb_f`tHasE$?!+^nmDis!2Rn01s3zk$1@K z^InyjXb&|qsrR~wV1J~US^js$yD$KuO*jCP_?$S-#MX+UH(7MQps1na^_rMdSA1|f zM>ox2$@{SCGkichsoFOujT3a2<<9Tu4lU%k1^(Uazv{0>JP$IYFQnJ-YL1Kx^N-&$ zkr{*ltCQvg@i98*R1(_-9x^VVxnKN+wN$^d5|Kv5DBpE)RExSR^>FT0s@X+v8HSj@ zRAj!7#QuwHsN&6E4#8r@ciopxY3+ii2rpD@=xLgdkCa`F5D8m4>Lss{BJOBZnRkyCeuoWMdPwX%LS%_lhoAkQRBlm)fQ;-;FAw%?Y zv^hRSvMoGgArvEwhlDvc_(S2@8t23#lnOJzf5dO%*yGimfg&g_G{rZd2h_WsK&u$q z*Xu>(&Ubr^eh-|`*-jqbBs1R0#1U+L9>o1o#dG6XDlO)>^n^PFpi-E3Y;6`ZMC9#^ z(KM@TgBAM+LZ9e?MmuQ7HD{vCV#6zV4`6ppXbK}1=VXkRc3u7pT%i|MssSAKWle3W z9s6KqQoTqg&zrRTA2XH2kx-YK?xB(PvOc51#0?D~uqP28*dx4bt{b)D!krizbcp-F@GtH#A^U3~qZ2z9LYey7To5 zXUt8yjf9vZl$kh=?lbOTM%axqh)GWqg>_x4tqu53bC7U4>gHP9YYd5I`}MrYbb~lP zB~1F9o+RgYB4I~l{~Gr!<8YEaWVi~ZF2F&sdHBG2B-$#NrH&}W4TCDDR{bpg)j=-Z zBKP%>&jlsnS6cvsE(WaAB)9kPcJ`u9FOy0FGC3NF|%aQI*IB6 z5J&6IBuAXgOXVf^*luV1>aF*3=*XKY7To>vr!cY+Z z_H+~obzJObzs4Ae8u)Dc&>{O|v;F>>%<1Y05UUQxj&It4(%58OVUmUH-Ab5GqhDv>NuvfS zZ3wLd_@HI`%42Wt<#8m)PT^ZgU5>a2xjySATTHu>!K`}0zL$pU6c@71VRJS|V4_VXIv6w z&rXFtmHQPIa34^q)Lt!;KXdvyIQgH;za|a$SeD|+Xoj2m7d8hD^0@sKjxZL{9OoFv zdbfFrVZYlzBH zIM5P0)x(8v(LS!8qw*Ks0Q_GSSB>jynP= z(PGHJ_v)uyMUEra(z z!Wd1IZ!%nGlC}Pmk*45-M*<-C*~d3<^T1yyQ`z%x*Sh9#nsqHctL4;8`DMk2COY;6 zeSfYgNX|~nn?pf~U@8RE~&B6GYP&NY7F%AFI!G{1~+Q zA(xj0dZdyJ@r$M^d^~YyNiIET$k*p4)m&^yqSXFEp=_>bHX zq+V{(|7ZC{`j4Y`*e_Wl9YE_uUj{F~*k4~?Av1)h*RrC32=(wG_b7<#_ECznG2)w+ z03lAoxg-H3C|zg+%+RPf;~!4e-Iq;q{34+E8!Fi3C5wT`EBt*JK9*H%TMtXHx`b2> zr}|h=S$jLk+A^AeWyIrFl##Jy*v)1zSdsBoGBOl7L2Fgi`%TcKRvJ}Sb@ixZh>Kpf z9rWCBRPb`{RFR@CZ#sswg?)olx^(bmbOP-63I(RLM=qg{ zUbyJq_^io7=SWS||CI5BeYNjFBJrVbifbJx;Xc=i*Iq=zu!Y)p;tY&tzSKv!P)%vv zJ=ynJtmJTVl#=k+QZB}>#@B5+2%_U&Jm8RP=$3<6N1xZ=<{yU8+k7Ek5_&Z|m|?O1 z?!ZSctXB9(#oUo<4&7!(2$gBbL4E^AW0K)}bG(HNAHIcp0EXRS!^ek^W0_jjHQ8no zL$p1YzRPzn6-k-bn+TdMTbVakU6)%MlAof=D7bY30KU1nE2|HNpN^&ZsatXU+N#U} zr*Ampg}I~edQ8{>Wz5hHf_VFADdKoF==vZ(^|93tmNznL}o z?SU1XpbCppn=h76r+N=vHH_>Ef<1KHSK}rE=&KeoEDwXlGKDI3vV$2f>1UE+d7Ap0 zk*=2c@!DEmPRs_2Yn8_ew?1Bk`PTl)abUMO(h>bW&NK3e!qn4Ls+D=X z`aIs(6PZ^l@@VNd_(E-VOfP7>ZO&TiUlA*uTkr}7iknn~j5Vi2f9xue=L99MokMW4 zuWpROTr8b`3DTgF!D|)A@)!Qs75Rw_CGVG=E_>DDCQfMEs&wtX>}jt%fT~Vib~bet z-YQK)yppkauIq6Ndw`v7MO3jqTy%Z!zE#cE@rPmf72UDKd}NiQa-^bWan-o0xaiO) zyNr;E4G4P}ZRYO@?cn(_o4)_UCjOt$P}@A|Iz4Q>d#bsjb5zCId3usEZ8|3Y5Ok^g z@i)iwQ>3h~FDR*UKEFisRrx?|uZ%@u$ZV~RL-`Ud zJ_by3Y#DHyOi+6IQOTi;n6U}YRXvF>iK@eBUD^(giL-Wi(S?c5 zWrs%6ol2w6N$s(6v^WQ~SYH1%&nM%hKqWLRe=t37O=C;{W8hc$&j`S`t;BvWDx`C8 z?7c1ww5SbyLM^d>91W~4Q%Ngu!rbDhM)!ku$jE{LQ4{FEJvdioEARYb-3*I1Qn-^) z@P?3W(v+N-SInV(X{qeo@c35syYwxi`EV1qU73tObzuE! zf-he$(b|I)y~!69i#W#&V>T)HBs(PXkql5_8SR4phRnP6P^NKd1`w5!<;aO3tyvdUM`L_yz$Sc4wjZK`N>7S`?Fv4D}60FB@m^Z1J6D zx>EDA$?d$H^iiE0--2kPXL7RB5^HMJazLyWqn`c3K5_9zei|x<4;D1$OWQa>=k;x0 zoT4j{{G{SI*2Cs^$*@Q-E%T7pC3bL!%n!X~a=ykFl`XDTjU6^V&BCr0IX>K<6kT^} zAF1B{1nm^`SI6y#LnKA&_V`e_k6gpW?nL!Nr*=Mm&6mY?_%_SW>^LOmKrkFPx=+8k zP=`)gx0~kSbS3tL^d?eqykxhO-&Ecp8#Ugac6IgD*#oa=N&xM#_<+sj71EKnYfAf- zULz`fP26p3>&wbEhf%!oTa@lWwpqoVyiUbzhvUV7-kzk9(AiKYUFW^8^V1$8VdKlz zA$qoCmXGd{MjAH5Ap62)ib83N^y4{EjhCtx@jzH2u6Cm-H8Qlm>*o_O>lGn0@9KXs zw|vh>p%8O*Tvg?h5@f?Hv^QdZaHemt^b*`|3!a||HLI;1I0$_MOaZ4;eIIXOm8W`K z@?WfU^dF)z4Y)L7x$8r=1zJ%LaP`{Zu8V*x#%Btv;9qQ$@<%cpk;!Q`;7zhB>1zW@QJ7!o+pR7=3zJ zY{P|AQjIn6cp+f3cAc=no#LQ+Jo`WECx_-9hdCtu3JYa(*tZ&it zWqa|Y?ls&jv~G8y|YFszRx{;ET#V%Y1PQ4Tg)xP z+Pqdl+?-n#>fs~ehmWF}B9~7%3qbu&D`2N0Jw{B~n%tWf0Gux~@VH{F`Mi7h%TC#f zIDHz;1MN6Sk~jQ^$55a4JtX*t!0+uX&=G2#pCx;3u?uS2iqY33zGTeO0yNsUtt2X7 zlS#oOlY=^gU(!5>22f3b0WOxI&^5rxtZV$DfzwLS!zy9bDrU7>pqA( zAPv3EbhR5rxj}|&6onOY*IW}TGbw3p3+Kxhwzi7iOZ+cvaD(kdsfkI9wH530E-XY zOYgQDSjEb>!C1KkGo|w1XnY0v)-(y*NJ-|!$t>6{_2Ar65mN28(y&8q%V)0-|CL9j z26hut^Y)Kph!Ltq!W!e@VtltAcHvlyZaU%7jAW)M9_NwqOGisIv=9F@FRVxnDkN?F zSyCc|7jqz8m#Q%%28qq%Wp^w)2Q*_|`&bjxV7_IQ;}y2QR{_+vHhLk?wU%c6yw2(F zVXv{Pty0nIFnoc&8`m1K_v!<4wi?Ti(dHFBY1d_*unO)Tg!5E3usAZi?xQvz)ZhJ* zwI+UJvFiV@_mx3$ZdtntfnW&)cSwN7CAho0duW2YJBv@*#wLyDUvhLl~u;%rt!8FtHb zTgUyVQ|Pra$QNrhSkbs|7}rHxlTb!wXb{`$wCnPKC?=qp)7*&GhD5aV(_f6+FXCS= zn(-rOJS96zk$8d&;G$o^_r1SWrMNMkXED_3tTj?T(x1&s;T3Ct@666Ft#XiYM>}jM zAXLG}w^}iq{*8UVFh&u@rmKq**s};3X^TIEmD-&v#}Qw3;#&}<6QcwPM=N5B7O6+sL!eUR*1y)e00A7vvfubvzx&6}VFpIkI`jOeV{KWHXKub)TB{ z9~*@@&7@?*r9>@P-Mcy!VyPbMQ@?z-+@;RvloYZLB7MS&RU{0b^^CPd>M;$GwY(jg z+aB`TG_;-cdgA(w-)0>Vn9yScCt|BT}h&F8x*}*TBS66>aOw3CWfOFD;`LOr&x`;_~ga$NyhvT zy;DCdq^c$VT_s?+%a{PZ=44!?<|GCk%|O;=XK=!lB@@kedipgMmg`kLhvC*5!|0$B zHM1?a6qg13iP>S7BqQEycnLfZSQctr%g``{{)O>-;)?)Jo)vGioRAL_H#v1=kT{Oh zsRyjNeRW<@s)DT`);Y&lI`S35#sbvbO#&P8R7tuGayz z5`1%y*TTG4HAMO*fYyx#)rg0Ah6rSOP!VIddHy_Pz4Y7FQi+{YigPcqMSZbMUFY6_0~S8$1e|X^4np%G(x&H1k?2qf^Ze}Y-{}%Nx zT!|0mN^vrk`cWvNX+LGLy`+R9+8PSO!7n1Npu-1VOzdK<19DcIc}%H_kO*`QEkz^Q zdy470-0n1EvyH%hWv$UVinUS5DN!Ol zn~8nfsproXBgZ1*qBF1{j{1d_T~SkNU&p^g(>8qQkf0M5Z9))#a?x`mWj3ze?w?Op zSW~lQOeDUJ$QC3?5R0FqR7*wrDZp23;0Z|DW)b?vLHJ>|>~JC7RdI-q4^hIwQL%D6 z8L;a(RN%!~(ua6g$-1eZ5m789HEo<0dfZF1a}hUjJltKRB*R*?L|VsLsy29^g@5th zS{~gZzscv;V0bAQu2G>E67*wHmH6I`{Fev`nr;T=Ujj_8f7>K4RK*ye>r5Jzg%>IW zwMh(an}!`q!x5TlhhF^pD7NcN8_JI>HJ&*}L+Ov$c&ItAi9RWj zv|EXNe<@>)XM-+YBHQ>mb+n(xot<$p`I^+1SGknf8D)Yn^90x%Q5`lRx`x$|{GL%@ z4JM@O7TRn-1&e3)XUhcBt*M%&ROg&JJYO#p9kCB%B8^gwa1Mkb&iG?1FjNDViXA@8MDwb|=`DM{2bFGIFs&INhR9ne^g+EZ!E}LxjHN@{op^60R~VZ=@ML1G)eK zCor)8d>PWed|qRLNt?Y2?xu3P>sF{^ehPaoqw5_;iX<{s!SCO2kWh^3?z3R@ko}f4iZm z8Y-vf!I@^pg3#4^6GArs=3fJpf1c>@4fycGUDxM(ieHkzgDCAQVW{c~5)PXDIq<)n zdmw)U-Lm)H)0nLf>y+Af?_daVUu{UT`~Ryh{yDSv`oGnZq@rML8&zO@=*zpcAGq%% zYJP8r|LG6v*IYC|*|EVf`rL-9zYc(kJ~EhJ?|i8X`QO0nz3*oXT!&Lv@mCy9-duBUuiOuy)UxEux~c~Y z{Q%ahOH5F<&Hj5Yfd3h4zx;kY7gh1^1fc0R+fSs2BpE*UzmUL7bOd%^nZ_tq`~if@ zk|R?jpxUB99m8knZ@~>`TcP|fthR>u`&Hrp0A1)e{v!`&L22Mmic&m9`i~SP2qn!w z*&}NW>p!yxl+XU;TER}*|ID>es`)1x^wapyGzcZ;e_|-O%Kywzzd7?yK1TxnXFmUJ zVf;z8n5{7Xk!pVnB!uE26b%0>kiS`@|J9KH1?0Nk{r>=2Su}`1M(;cP zm3hdAcHRfJFnMBB1pU9+<$hA{i?yB87T2sUtsS{+_-~p&v&WYIC69{!-j_!Di=PIT za|DgP zZVwwPIIiI?-V!@nj4^PHHCsdyo&F<~B<$4iNs4Oxpm&8OOlH*b-+B(lqR`Rf`O@te zP`M?re8yIQ;r1QEZUx@1THb7FmV zrQ&uclp3tT#7$y)BUx#{BKf`=LGcvbC-VVKtR?CLRGei^$0woFFXy!XjeA!F3xzhC zmmbt5(1awC*|0E7K0tBaZ+7fSpTR331hg7IV>&DW4XxBef0X%A-~M*%ezqjh-|X$b z(F548FQDYenecZXG3y+zQs}i5Sr_el+278kZx88bM7~glB^^S3ymJo&=NAqCZX?k6 zzJRNg{sIi6p>|7v!LR))XpX4byjCCJr(y2Z?+F#Cn!n)qC~x;yQJiCX6wKBV@s2#K zzsk9j;c{c@=tppLHq~02bEiL7YX!52VrG@Z8z}V7dqeb;M#u}EHt{O_mKM6 zgQA&KD3Y$fm-z$Kq+wpRfv!jp2YL-j)%agWK&}`A6`C6P(ZsA+XX)kPw>Vg7*{lh; z%8q0|jjJtgKtnp-Z}&Hg=3P}l#vilET!Dt$X)a|J{_69-v-*RE^#0-J|HCd&G>4_j z4)#lG%KPwtGa>C^gvu{Y0I2GtCqgARd4(OU5X{^~tKad)it{h>6%0hnT2s?6QYa=x z0u}~DCrf1+!P4lR-Y4kWU0r{gs4~hhw15h^L>Ribxa(NZo(R`tB^&q)|1J>W3;4rU z`?s?B;+GV&?j**i>BrdkF zpOfr_&IkG5&8PSGeC*2<@+qM65sr43$_jvmirJ$Vz11S=rvt@r)%GnB$}*c}>i(K{ z9m*G_MaYh_6C0$KYdV4S7iIE&kMSG+Pn7>|Q~$$a^_3=r?t9Eu*>z~}F5g*9UG^{W zjIV-!X!rb|+RfnB_E+&laxnehiAVQTTNHZanD}p!`MvG`CK;j5(vmBs1(ClD>oW-4!~0!}%bkSARjD#>*;t;Ntd+rbf5(oX9u8&zksP|PYhsV4rb40`_9SM7y= z*ZCr+&Vy&QjJ1S`jn_*v_vm?3*1jgw7P!Rp57qC2!)vN;P7B>!K(A-ud&ahBBCs*4 zD!LgJCrj4)`P|&DmNhNBo_6$x{}iPf!qnh`-^B$_=o<@x#LQg9GZXoEL10 z9a>(H_RqS%K+_b8UxU*QNwYkhS*^4=ymg|D^taEu2g))6ykCt9cAuj9-G+yxGovm( zJHN9;RTGIW)qbC!f0s({(mo^;(?N+*Sve|Eu3pcyJA|EJDoJfIDiH7Qw<$Ox?yOX5 za-|&q?hBoSLyY5Po?uaAP-Ka>5AusMgVZWhFi7)pPpIz51{_tX;IxAS#KLG8**?=Hej=nlGV)t(~2Ij@m`#zHa1Uj@_t zQ0shs^DCrus$+k+W$|oKx+Vs<*qryN^PAQC=Z(DNFN1G<3k&(G-5pR1JUMQ>xU4kkLPz6&3bYNWEth6RIXFVUvKp;MpQt~U6pN$}h zN=a(6j6&8LxOdmXt6AyCi5@U;oMc1PeyY@huwL?aV0+wHMZ`A%I{+7_`+0o0&ONN}$?EBY()Icu-d&v78+kXA5OnZ_ zYMV}porL4#WN!WxD*F@{{g5|lFSR;jMuqmP@RDXOdi};|)0sL???znnK9w2$ApT=B zGb+t3--cQtKK?8p(OaD<@szEu)QmFw@n6tbRzZ%L21|R8Wx5@b>&8e-n~DHh9PFXbyYPM z#rN%%bj)Rq22dI*(Ok(dkA%|WAQK5vTrSnzNl6w+Hlb_NoITD^Kqt z`jVEEHplU0+;30W?k)pwJfD6hRA4@6D*Yxm%)N=}+{OaMaBcd86iu=(K1MKyZIVezLmx(zSe^fw=)O`zHmBot=+}G-j&$C1 zjL)?;w*L0%*7|gg7dLYaC*&55CH~<&EpX+$Mh`Zq$L};gL^Ui}NmHnbTgM;(T|?Fq zJ|MmhUAvOTLj*RYD_OELPL&328)}DVnO%g%@9Fgdc`P3`iITwq#aJF1paL@gzNP{aHtsf&bJw8?!a- zm<*LN8urfUH)9Ov6Dg>jD)X$-6Rf`S(KL3+LUUZ{d~D-Q{5bUVjt{gF=`UUbt`^O> zKmJkJPJ5&S&1X&g>{qKM3f-oUa8uamRl3YLVS*%h8igvddphalvC;JDjY zHR@qe7#o=DTg0@KH8@d{i{*Oti5;YlXpg$dLPMjnRRy(`8{&$!;x#CVZ=+`ZgLI*G z_Vjk|%h!WQ$y7EDI5V!tn-Q3t)$oj-To?-8jVy=b>o~H#e5Otgd9(w1{>PRD!IP)& zMY9I^x~A~f6B_m_dX~FO^G|O>XE(wIbbIBd-1?d-|QCQ0k0vk%cSH9&AuoL&SsnOTXDNCiF9x z7~)OXo@LsS$anBvxe-4Cfr_1A&SuTMbnwZkaI5Pol5hImFuA_A7la8RRm1 zuY?#3kbWaT+`H19xze0-g4^M z91LUCXvr2m`dxQOk_r5k1>Rw`S9h_EBAzFdo@4DiXeUc9QV}zCO2>^985Dio)1s%) z&q8edR%6!aMM3&9{PIDgm>2EPQ9tPxwu!#ZD5cx_MW7~$JR!HDp9W53@!`7j;!w+% z+LNxfaz@eAl*sIc8R~cklpUYId_8{6*0;y!x|zY5Ak^0!EkEcuddd}ZE>O07!ml8! zy>*p*Ij8$?f3OhUhxiQ7`YtsFWPUv4#Cp|ZJ0jnxiKVYGiqG4$S3Kad?~=xZKbAKC zipXM+JfaSnviC#`a@N@x_wlGIq-2>*N$tp-g15CbAIF>sRUtX7^QzoAujX_pNTRCz z=*aE+5x`7Og(6rrRe`v-;*iZv@_@E`EHglVggfxj<|EfRUV(L*mZwh74oe(-7HAo} z2yG)YPK;CYTjdQ$ z0D;R`Wo5Bh8C8~f9)yyE=y%4pk9P^vj496HDGhIH4#~Dqt`s&p9_VWy#xPkV2zlHIdjzhsu=ww+K8SF=j!PLi zShP-p*p9v|ZfTxcR^8{PX14`nrEb3_E3;t7T1_Kau;U(6N!{hzK zWlv*hw%$n4%+(u%wX6_V9MrprV|XXHkKNE$T5#K_M0CLeq9T|bW`~HFYQ>)Eb;Rgp zbPdOK?3ZS48{Fv6m&zs6ZA!H`;!9Mo@=?6F1EGAbJ*1tj+TJ_oIb<~lISvw(FISHx zZFwWkK32U4F}fVzj^gGL)JX{i*XFD)A~|dWb>4$0)G8ToXirWO62v=JgT|&~Ky|ZK z)N;do-;pz)6%5)VLH}BFzkaL z7i)FIh0-&V0~uY0fFNaC`uWNVwbW*_JuaXHT$(b;IK+c(k0f%urnrxX2YIn^n@pla zWi!h38Cv|zTyV7Fjhh^-MnJA0vFC^%;MvsZb5@Q&w>6!G&G6NjFI}cBC8@0AE_kc` zx_<7x9>AHz#?2(13Uu29@u5Brby!__Og3&g#F_g}u}}X%6`BhLw#bcEbG23LBE>%*ut$1;E4Dxf!kqe5EWyi zCnK;z(?PQ3D(QVSIeurL71g8-eH>r@!gU(;yoKhnMKrR*l!Mi0b8j#HWaIV`^Sau* zV-~E`UwGb9+v6qv`~>wk|hE!Ey*SB8l8onC`pf5`*cmogDvM$?=W9mdCfi?Vy}5)f>xsI zD7%ggOUKiJ6(fOZX-E?!L-vRR#Cjy~vxsQ#U=M%X(zN!ju<9!Ng#% zp;{6=RnY{;82SxzvrhhLnsb4fAyBgeqO{bYf+Jjao70Bzg4anXxCme=4N(H|pME9$OGc#?@J`n(qGGH$`|> z=?L(fts@R+%8D{hiofY5cCt0NAqf_vblGT7yZL+z2)9?Wri~^GlzlqtRC#Vf9@~{+ z42)fxv8S-Hq&6s{4S!;~*|k_|wkePaI10^gI}xhcMqaOnR$`af%~z@1FMKYoWW$Th zPk%k{ekna=zmCo8ScPcA(`|BaTcNqP&2wWhOAte9IVRy1y7q9Aj=GjnNw3dkF~r>R zM-*c!k3&)2562Fn7INMD@blZnZB0F{TjK3g50Llgm4>Fl)C}gu3J9Jr6^~?C01i~W z+qt?qzKs@*`hvc)yK9GXbAzRhpFHO%#D=U&%E9%!DOzGg*e3>aoj!y&$<*8R@m|QQ zP4iDtmeU@@G&&Q#i2&k;ybyXHmwD2_EgRja)#qdWvYGw`#q|*nxgR-r9J%G!z4UYU zq|Ga=GRjU}hj-moc86Z#I)v9wkMxNnME+Y;7=clB*{Fbq&A+*sF_0U^%C6n0^%%vT z{jP5<_Y*7!tv5uIoSt5X)3 z>%kFIMDJx6I$UM5f^zvZ%-i9)e;4$%h<|>G{)4n=yi^TieE*|ne&ZsRjFnRHQ4&45 z*44M!!;5iPip7l4nwte}%M&)aQ+xnFJ^(fNiv8N9_CQr4j=_@ZDg0VzELq(v%sM(@ zhf{rYF7&Boc+6Zx+h8JrVO%+JJdKwOr&h%AKwbWdzqoPE*)UHRa1h>m_a2qjpgPT6J%-xB{V{w9$6WBaB${sEubx5AbV9PQ_^@3jEsCr3fjcO(23 z+bSn}v+P3qY`r%cd~NohCLsAnNUNwSK^rkIo7_JCI=8DH@lN%fHslEu;6X*IuChht z)n8f++Bl+*(|_KtRVMDrO}{sYxyNl^#WooKN$jS&ETC+pH`ZI>#TRZa((@>`h+4KI zBJ#s7K86MH2s~_&KLq{M*E_y1+!jjRiMU)}CDXPU6JdKAi#hYr*t$hUGo6jH z4Id@;Yt|+$uk$iee{LSQTceuzWGGkSX*wyv+Bf9Bn)PfN^GIoEB-8Xfzslz2sq0}a zH|#?ukTYg7;0@|-ChTv8NBJz=<5hw`GxV6_C^IV%k9yR9$Tc5jCZDRyR+i>~%`lT} z_SnIC)03uPyyPX{-sl4OrF4-p=XPYid;h8Z>`pa_HG=SB#G-3w`>E+XmSh~{zV(A` zx2t&odc~l~d({rD_6tJ)$S1N664Kk|MRTti&V?HSyi^V_pQmqgaMA{U${k<9*l^^o zWr=kE2|>9HnDCL0SKMN6f+23(Xcot8v$cuv0#2`g!4(>I8YxjcCNXz(U|POUFz-87s7d;bCu0x1o?Mr%w!n?7x;Q1ZTk?B)lvHm+1@Cm44L?Rb3^d~s zE!iR13J;^)tSgVhi95C@c8Z zwjp=<7SRr_B{BCP3vx+lM&XXK@7@#SY~T-|+>253=nL`evQ0m&J>N55?k}_m7C9YS z_CrgGoDlt9usc!D#cr)yo0P3F3!g7HYu>PJFn9}e`<##-Wrudy=*!uMN#-_`_5p#Wko|6B#m^^(q z*!CPJd%N_(Pa}vWLG;}qqJ$Uo-Nl^y?$SW|&pY>cw9~9dFW`22i3I^QVri1 zi?@}GMKPGqIly^SFo?F*K&is)%z&24pA^Zjb!Q|Ky)5XgYHDjGyb z@hTgf?X1Ec2{?TYh|x4J>Uv^=9~m%P`sQ5{S{WCt)-=^8=v}6T66e%qbP8xE@U_q{ zMjLQv#xeaG!8g^V2aq`}X6qSd7aKizOUeanhEN4%2&>TjXVAk$SR^mq$fL3};wmbc zOvL@qkOZe4fs^hZkl|H?=+<^@kupeC!dmL~?V#WVu^F;*pyqihvD@cda)(v_w&PlL ztr^geZMDZ4?l)D@DSVoo#OB9KpEm@BH;p6@L(5-2Bt`Z%DDpQJ)gLpGeZ1F7Q0l3Q zBn=Yd&C^%I^|--*ChN18#5CuTz%+NiK}$-Ail!B->(KOOE>3z%XB+D|n_cEdgS&pz zT3l6Zr;Rh7S9r|8WdgSJ1FauiHPLuG3`?_Dw ze6v+ISVc}tXzsXqZ03j$=GubSyLHB_R9FmO2X#^MqR4j-!7pwaG#2f~D3eHBG6|$) zl09s+M@zF_+Jy#r>y}z7=1!h^G*BFCci?Ogk>%bk6m_@u*N01-HY*(0lm6I0QWbMs z7FhCD9A9WLZQu~CjD7ZYxF0%?B-IE#|2srTj+`M*bgK76l6_7GJ@0( zu<07X6kk7#b8D;|M6|}_ud|ap$ezC%jARtO@rbOC&3_BZ>{+wU-ZKJF>{(xBb39nY2{MC$M@|D@HoX>5h$QY>;H-8J@=yvr&Uu^I#M}h11)& zWSC4PX%S&kgZT|o@12C;uNN55Y33!72pLEKys%U)4NsX~0t}b}+}v-c$nx!J%7Yu% z)D+_byC(N@+Jn>8wP@rj@6Y*I5I}pQ>~YoY-GFO>(?}Sk>Wfo(!iVbDu?LjU z+lREbOI(A;^2b_fyeLxE)ZWWlsF!~5(!C#?nf^vfx?&d%06T2AIB>3be zw=Hmn>%aaCdO|JQn?{vC#=OQLg*9R^oqytcx?;-xfrNGpp1EYEAK6e$^h;v4)Ai7c z02(2w^GUqv^%V;j!B95bLOfjM&iDa*w9|{wF(?QW6`=ZAY<|SV7tkO9` zI@(2MIm)D%k_A&dfNm3Gd|FTog8Q35a{}GAs%~NgxaTmLYrQaa8RkdxJo;m8H*0;S zsNo2~Boh5}tYt9D=au2IQ;f4*qgam$HmReO(>=O%&&t*Dg-^s(koU|_53i09LMhHf z-zV3T^#9s%4ZO~eMNBR}8rNE0C=I30PHIUv_rfQ2R+KTk`{AioVbyg=Hh_@tWo*Fn z21!o!AkO$YAe#brfP8C$M<+|KKS=!8^UA4SbZ~^7e=yeBaa4?_5@K4!uqkJlqwsmp zxzveqs0|ot$kplALt$#tPnlY%5~8d`ju+S7Pb7UC zhuGsq+QY_6?WZ+|gU^%fwwO4BUM`=eNV_T4YLVg1?ZCen?Na$oVaFEJmjOWjbI;1E zV^?hMYFGA3N{P0S`hq#@&bXxWpUNBX-nl@!hGnSLd zJYrG84nC!c82QKpXq>-OijbD1qaIo#3;ZMLWkYegY&SUF}Cz5AACS>ytzAt-2 z{O*yl_Y)Es`gGkz$0B>^0E_ve3Q%<1e%J+;wNQOhKUopyK~ z90W;e5Rq-q_Z?b5M4_^zfcl%dcr%9SCM?>>juoG@0&W3w!YiiLqaC-C+(jHI)k%qh zj){ERypr~8pagk|eV^}XH_^2w-K;@mOiHA`tq%9>xTaZc`lXOU>*?4s%QZF!@L zH=IH9lE6fSpV*tYqUO8Y#cz)v90izvnHUQ1HmX@<{yJa#avtN6uyqr6;jO3M!Lxb5 z;?hF2YHu!7?CpwYy+b1KM~cN(>F;$7zf&wtq;QO9^CT^dU8k?*&&azH1j0gMI!G34 zp8er!P-pK$U|&@|lvG!Q^kV~NsP^6tdjp6vK;hTY>^1P%@ zS#vtvlKWADCjgk2(s|lX}|k^++jN(D)wW3?VTOc*^G~E&z4s3Vc|+!6D(WN2JsrK-~`iO$>`)vhJ7QYInNqq3*7ejDVEt=TM*&@;K7 z`$dwFHJj3uCEe#E!lnB_>W?1t#Tmq)1LQr>YRbl^fR6k(=@Rzfkl!(^CuH`Xi|avYshA8}gL{$K08Yt;1+7 zH%o}B>QCq3f(_fl+^!k?48{R`!NO zY3uRdehB*H-bfuJ7HXoM&JVLGe6YX#j5_F3vBikmDU&)S0%p>D9JNhfeO2u=Tl~caj)P^Fmi~0*>(pXd0&1sjg*l zTHxfoJ7Ag?a=xQ_^z&%3){~|p<1%H+i7V0iF8mOtsLjDue_r~k$5YO!X+>J?7#FR$`6cnv;!zi(Vj^I|y<*n3Le>ZJWGvhmOI6ho0sWG9IFUItxJ4dXWr zFx;7XsFo^YcJUE$@J#Lxs4+03*(Xf5ou3@>0Lfp6Vi8BQqtfn;pdH2@>UfpPDh@FC zj|(?-XVc@jqPocVMN=bc(HCZ?mAxa4sXKnn05+F_*8^NkyL@-5Ay|_xhNr}XGzxAc zaa7EY2_2iD2g}mKHgZUeu480~#l2XiVPxovNt=zUYBY_w&%!*&U|21g_msu!;iaSS zum|&U0=aV1$E=(4QA+9MzVSgAqMTftYvM>a+^@Kljlz-Vzr@ObkE zt;Hz0ccX_v%`)EGqT!HpW@Hfn)=Rfny_jA1i_PxvMN!@bv27ZnjNf)LGp8U3Rl^J;NQDRL0P8 z6R!uH^@Zw=Or=}5#)}ze6dJS2?E>X*xT@<5_hE&9MHAl5yd>1xfq|Jsi3}X zC%dz!UE#oGca=7NEu#oo9C!my7-P0C<-%Hj!)cED>lWiTb^{ZMrUMTZi#_;xN;u&! zwm87B;U}`|rSyO@QCzRR*=q+m9q;+RN< za4(O*bGy;78)!dJfwz434KeUu!Z^8e??y{>IY`Tm^kio{%7|iuSNLoDZQIhBApTD} z3=diU=E=uokDcgXo09_T|o_UbRIE|#HF-WcfMBW?BGfcDCH$g{2&-GU~AXtZB{eJm#Q^Ot7FCb&nb3VhL) zac_906#X&<7od=Qq#KedFbfNy{vftr1zdC#5f0>DxA9L~Xa~UrJt@eL?F6}^bxP`? zoZh-D-9P13Y37B6J;efYraEZ1eK%g8HvlVF2jb~qhUxMGGO4h9cUa_3!>Y!e`Bcv) zp&cjn%*O2+Dr3*AY&9^`=LA#_H74usX&;$2Fk?rx6>zsX<$9=em%w}v_zXIYiZaVm z3-=*2>Yq=Z?shx0ZjwT>H1?E09w4!@qulWNd$w}?B`V=*N$Wd7shi@De0dWgYk=(o zG++~#!H&nWd~Mk=b?xb4vdgNH>8uYILgO)WW$-hN(19@hf)c~rL;kaq3sO|j=Mo=y zVYp=%rPqBN^%((2H4K_#1wS!6>;J(2V1H_3U_8X%n|vFJ^StFvKI>#qFnHp@uHs^ z1Y%RMCV^TemH~7!O;Tm%>;~<;QmZ4~6>T+R;L0jV-5AQJ*})8k-gqhn!jpcxx?0LiRsz65 z9(Z5B>x)SYLD#`Hj%yi9XiZ*r(#Op@uBdGc;|{4CH&C z~`6E^8je~G);?R!+WFUx;ZUFB?>crXlLu+B@(v0snzBUn^}aVlhMXxqVpU?i7df* zQFcolZE%cjZ#$CUKZ_rr{s?$eo!i8%KfdtBDdq*<0N)wv`MVKjE<(=7Ot)4a0H;c z_xVdS#?DFN)pRA%p*M!}^$_wL=EWx1w5N1u(LG#uRvl;ineP-nb=9gQIOX6KXm{v6 z{a}8Iy&K||H_OH>ck(b zdL)lH!rNC9jT=3pDQNc13Hz1$D3`|oP0F%P%r3dy>Ae=CT*x_t=@<0Yg@r3DLft?c57%BG)z*q{&bYLg|0S}If27Q)I0nPq}l9)H0*hZEM!A;IX?~Lr%nP6 z2nw^j()(&!_4v$l=>TpX7*6J~5%+~!ccN+Y4*o>z+YRI6-n7wl)AoC-LQ|Uvw*VO3%KmY_ zr4v?SY0dq-E3Uy>m4{MN@h(OHVTM`6vD_hl;goo)IU^l5al;y5mLH7(B~S)|p&t{nYhSMV{WZtEsB?c%!Bdrwrdm(~hm|J(IUQADWuIO1__jrSDHg zNBN}NVk$o#hT-)ezw4BFBpCz0YY{yy2}y#2+HMU_t13xdCQ=8X%*+!U(Ns^u_0aM; zDFZQ=%qS+yhx~AXnZLmrMgPVphcSyV?XbP{#+B~ok<_#43jet{XTj@J2k%ZNp+W4{ z!$Gc6x=p^QYp#i4{`eIX4f1){8cLZC$R{;cv9S-c`kznqvLqVi%?cpO8F?fhAVpN2 zv{&=yNymwDieqD5ab}Q1iy1Imc=<+djUof3}EvH3KXA zR9J)d;7Do~1hf!%(=U@Tv*Ovqohz4Ph9Ru3rA~99TdRWcD$Sf!AaD`d>GVN?%5vMx zWiL*0oFbF?8KB_Pk!=!Hmvp$vMxFoK!?iM;ST_Yv2Gv3%m!z*u2Lt|6IcIA!jx#!! zJ|9OKu>BUh(=w1n<#zH@av333SX#&^PvUdkK@H>FLTKbT%_si@!F$!287r-*T1f`Z zNBj{+7ep7nW`Qe2iIYL&+JF&srU{Wy!I_!}m(C0cXo#S8zF}|U;@3|8=#_GzhtwiZ zszg*rDi+e!M9*66jaa#NZQak+ee}>|LNxn#^2$~|wMjK^7+rBk%}C}T9Oh3`Lvkc5 z%8A^nGI-5;DI@j~$ocJh()IeCv>Da2)&=)v%fVO1ZyV`jAGsp8ZMTU#gpSYFL$Iw| zXA@cL7PFA7Ee>6aqq-Q~pZ3;V4pyjRrYr%m3*To4PC}*7jSIX>2Ka%QV16J%xb@@N zbr7g3Pe*S!30%xL$XBG|WtAcTLzc0^Ai%h>h|FRdi3PX!iIl@Cbw}to!i|&OyXC#9 zhD7mW&#h`zO1bhxKM}p+(?$m(CR0LL$uopl#b$lp5$;lWZH%WSsRrD-XvX=Tw0s}kTb9%lEEmKWv{S-Lx}crh-^VLVH1-wUQwCfZ}v2kH|)&R zDNNyqW9Y|5XZP2ey5K9Zk6(?6Z-~*>Ek{%fC9w*>1r%(G;T{mEEQJ?wIFW9bok2!~&{yj49Do(x)t7YH|_a+Y}#ehEE zd09{V^JT3Ci~}>fbbyAN&6HN%nUoJY_X2sCG%oO3D%A)FRsKC4GrOjHWIQV zgB0yq6edhwRWO}Z{8r>LrSmHoX4WG6!)kMnk;>3;mZl1Yt(bLkv z&tf~pv)LyK#%g)Gj0Iy_#Yej@DE&7c41U!^9X0U_$M5YweTnea%VyI@?+F))Y4cmb z6S3wsbz4r(G<2WL4e}CM_p={>!}pFYcI4`V_c6I zFiGpTsB>Dg_>Q6Bn?GjgskV{0n^Xzkb7H;=Wl_FMWn7ye32fq0a488k93A-=t?Ohk zELen91hkIWwA@1gu#SEP8Yp41{Pw?=IdY;UhOk>sb`52W{g94dhvMibh8l5_t&Nbt7C$O9VMrYTx!mAoitu+>Xa zKMkuCaH|(e4~J@9+pP0)@Ob|bZU>LdPg6P_X_v}chqHfYRy_KgtERWo&@hu0ET&wG zd7L|cm(I~ii!;qed_M7gNu-OWA$=N#ZqHVGL$I5RWhA@da~)+KgihjJA+|{MLHgQ# z9H&qPk!)WD4M@U|XCu7;VBEadgv17_fPE$SHW@we8`);bJTBiXREyg2Jr7#DO{COD z)V>Yc#gx#gPw5JN#r>p3x|9aq2xRmio@-}j=r=b^5k=brBQCU~`xaKx=13s|CvZrC z)+9ZNQ38z$psEH&abmH?PWc1bO$$pZttnO;Y?o@L=#>Qr>!Ojfi;&$Gi_p$!XjEsT z9jd84tkd#e8#1?!=M=Mn(oMpnI%4uD|&uBt?1a zq(iKCM)5iWEm)IL1`egkD!vImS#|MZ2A=7@WVEK_o9Mxe?U$gy)HASm@6md`KxYM8 zR-WH1^&%Rzo7N2G7ID{#^(c|aoRH1j{kj{)g}%Iwg;zl@Wh6JTD}!$mVHM@=Kmcyc zQ6Sx~2uidTQ6gZX^lUwfpqg;*vJ@6qKtE{aN6T06n2$3ar+6b5vm0J6QT?md)98@1 zm~W6|$Ed2Wr{!2#c4{vYmE=HXhzqXF4t>^_c(72xLTm>Q>{DIA^S0`a2XAgC_pTS3 zmfb(SFdk3zo#G6Hqb}4vR;WIzcHD)e{Gax|GAycf4Ot>fkw$t*=^S#%VZNpN>~r?^eCJ%>_y6$Yy;xk&de;;8dq2-w zym8p@nIR#<%F3WJ@lydyv_s#Jx@?!1Xv%>iHY|at#|D_BXeU@OIC2oX+YyoV#&pgg z$$)lQppH5xBRQ};z{avp5aP?5*S!%tBkw@urG;5hq$0PuATD#L=R(PiF&=zdXrxi` zXpU=B21_0mVS2^~kIwQtbGj0}L;Xg#73YJ|!pv=u5a=;MKqqALDsID_^1Rgw%$HGX zXw^Gw!_hJ6gGb3znCe*ZUhuc{3|wYD+gS*vWg!ScaUOan=}1G&Nt-GlyHfv>v=B3N z$GxXfPmV^C5#o!l3GwapSYN~pvu_W=?6$MiaF)y4ZqU$>V$EE z&1#6zZaLG$bBeSSRt9vkh~?;^3x5KdnrcimfQQ_9}4a2(STju{rFrAzU#etX7ms@tiaLvR~g){APRi|h%(}km$xqEj8ETJ!_yx!^ZrDgZmcu1-! zbwt=?CYL*xdLuM#FX2~IBstnPrySCtBW~rr&Yv+|4^$dq<^bM*zMUwxF)=g20?jf8b{PTD*t*uXO}X^P>awo@aXZDo9>T+pGBCVT3WOC`su3 z#cBtOS8HS1j8f=(-v>s%WSU@F-xt!NIL{V(5^H7SnqLhjxc647vHp;pN%bH9RD*;P z!nqU9(nmL*X>v$nUIpsO@0*-#dD-0%ch!+Ip_zI2t}F7ZJQUE5)mQg&AL-|yX+rOz?d7Sqp(4F~=cJIEkw12C zSQ)*(V=!7DyTU=_CrHqGUnMHEVrLDYmeq}rHNq9mU{_I}NPTyaWA;a?&X1*NXOOm|kUBO)mKWF8S24FMPV2Gg|`%pB~Fz1Dtmp8(eZG4d6uyg+)}8`-oQ z(3C*(-M8#Zne9S9atj$+A^royglR1=%C;n*tx@CYmgpC*zUq6H;jE;1Zaqn(C#@lO z3sisutk-c}tz)7NKiy0W`-lw-_0glQhU#f@Yj-7^erIjo@)l0bCE1Ems(!L^Uw@z* zm|qnjdoOL`J#IaIE3wiBmxpl2J)81Az4C)tdzFEpPw5QihcW2|^Yh%c=jwunVvXz~ zE^(UbVYZZv=b>W8TO3)-UuWN@i+53~L?u`RB+|at77@+51$t=%ZBOgT=xk9Mpy+z7 zo;+_RZinp8;|aOP^EJ8G>IjW6JR*@^5$yQCZZ@frWFuJiS912x}I0lvR=de8edb9=?z<7qr zO&zht8ZK-s({GqAbL-h#2i8r;t$xXTScPKxj?RA+)ZrgfF1pAuq@lIOvt=kB6D^ExpwWN98kp7Ls+Q{W>e&Vnq^Hz%W>DTyY z1MM9Y#wa_#RDSU*oBL+4pMk3#OK~KyOS70OjLKQW>h*i z1RQcwbw+b&dp~5q`YR|#d1)hHPIBaqhFHh8L@SmN<6)#Ta4|TB_nF? ziojr91u<_=Zls6zTy~_w&ET22qq3IEesDOdqD~E!ocpz|lXs105F-|lzwQlC^ zAT@*Ut$H)jY}5n}L1aaeMMI*JZ~_wRVk2kGL?7@;$o?dmxV`C>qREdHBFz$Gxz!Q; zpFUaeXRLW`pEX`^#>;k-HJ&X_G;Qg-7=ldiQ!s0mITwGPcwuHWo_&@bcPyxXB^>xp zYtiMiC9%(BEm-m{4t3P71_f#ozkj_6m{2Sp(H?SB%=1 z@5`0OS|j5QfyF`jjdIZ0eV+ilPpIaz@!~fc&*prFDk7M1U0c^t_B^k=o^2})_;b}B zq0dYE3kE&tf-r~YIb3*hcj45YZ}H3NUStUBE}rR>H!Hu`ik6MRFc7U=W)3&t#95kk zrS#jq!vlx8KGBf-P+WKK5p_{4GnS2@NI(ZcA|8pwjJ;+DDqep+dhSx#;m4}YN?j3- z{l*#R<;1WT@nkJ|xPq+ufvgMF*6n`&lIp-1IkiP|KTggUjj?^!JW!D(Gr|f5b`_y4fhUqnhYFbdMoT& zqG(O-5KWU(bseF>3767euGp~CofxJP7>=rLBOAkUVW#Bz}G$6B@+;8e8sv@{86ZMfsTCc$p^7qUz2w6M~PV z6`J~_;j>XR+EHo4%(zdv?Vg)jah&9|U1butg6^t*=8ahj!9+w8Of4Z#@b8+_Jqgqf zPl(vuY7khUPd>hJSFB1RVD8EWYT?}wQ@}@ZUk{>^1H)vFRDLS z!_NPEEB^?sc=PyhFurE7ogvYROeDn0#Q%YEmCcA8%Cuq1yabZ2|PyRN9vMMbf- z3^{8$`pVasKql5eWhBtPg9JBMHCKQZ3)AxW{S#^Hto(W}#}&Ooq{ZzyE;U^sW13f-4HbpTIi+i8wx!xiuyJ{2-*jb6qxs*cM6s=+PV^ z+z~?z!pVERe{Ea#t^0POS&A*K)nf3k`bcNMOJVP93%OUm)jC$IZP-h*TCYHRN(-#Q zq-+j5tRL|Z^U|s=>n&S^n-%KlKKE5j)|x@MeU+iFkby)9#4v}^-9oRFZzKd=zpn9( z*SDuzMx%>Xw5Q&G80`~-{F;Bc!>N?PKH8`*z)ObEv1`%o+RW}@o{1CeO4RtZ>DS`V zZA*S_e))5Ag_0kP@Y_Tmn@}d2UxFpkz{cQyN_B268Wt)fy5%coNh5P4p_JAvu?OWz zWfHp2qc}8m=>oG#r0Xs{);VhPGx2PvY*s;{);Pt?ZG~Z^3(V_oJ829=uQK%{;$f|r z1+)rob`nc6h5xv9MajB&nkH|Zu!`e)7bZi8>0#6Pty)8}jyOD~Z5x5p61VT6)s zxG&TA!>pP>pZzQ*$^Hbk@(fE&Q-BP+9q>SXVp${7W}-JkgwmK|Pcma=zgG=qeOI^b z8m#d}1`}?)Ne?z&wMKPILff0PmOJ!n8hqE9&KAv7-aNZjjAUC_aVD~F8oO;Th>yfv zB;p&fI}REb=Rm=5>+cmHl=j zo;)Ai<+(B(=!{uS=+!8;WA3(d{LJ5~0(IS4AG#6Qd`JjR8@(;8hhQc);gkkPV?U4x z40k>Y>e_Rs-3c3E4uY--ZVFsI$TJA4KDxCx8U$O|QPM~;POLA`sHVuz#@vv^ak#J4 z$}}JmSST9Im4w(^*^)Fy6+HE>>F?TTXb6e1;VQdN@e&l#m5~|%+NSzQZ$ox@df4b; zT$cp8rDjjOv4&kYYla(tdogE9)D3*y z$6E=KG%gZc#Ulolvwo!cdkIif1}WcUGV%J8rMFx$)^Z^(!#m(+P1uTuU{ zjKpy%xG2g6bzkT6SFjSWKAC7rpg0gIInYTmnQbk%HvCy)|32oGKKCe7bJX)YJqki{ zk5LPhq;WPCO zLppZ@ty_0e#uJ(IM*_^cFKiCnc?XYMW3)zzy{yJV2qNg(lg%c{FY?)U9&ot$Zusbo zk8o5$3{}Y|6tO3zi!Klhw=a9Yaa=-FS@vm+o07WFDD|ck@jRr$r{mqBoM6lAmv4Hj=?PO3f6b6|~m(mQ6Z1CdsRMjCIw%961~Qs0Z|| zsXXr4Z7jwA%mx+U0aysq@yD+x?2g^NIZK->I#!3|avxqV z9+*OXo#2Viw91sNSfcfct;eO4j0oqz&-7jMyX&e~BSROvRWALW+@Tt+uJ6Q)be}Wv zKdOpx^x7^~d-LSLdZO`?P@aqZc9>xSIkNl++`DMWX_C$p;=!OTjQz%Jt}6WDNjdnl zXXOdXh{}z$4IfcaUDs#k z+D7I*`{V2P7AM2gNopjrBrQz-^((ODZ5qa`OBvmQEw9r*wulP<=N3`G6cP&;FG*_dczT;CelJT6?Xv=;x z$E4TdunBzj?F0w&WIyHBPBa7RLdGIQb~Zjo-{||TY;m)%H(a$4%vRUaSiwZTeNNf3 z-nZjf4}CbVg=4T0wEFHPaXD%!IW=QuvPbm!$Kx3=;-D0hnz4bW?UD3{L!FBT~cFaW2w{c$jMWS>T%B-y$}tDq$e5- z*LX4nbJRLM;5!%`siu6}d-=E_%(ibzbnQYGNq=14#1L6ruVxK=EDQB|H6!`OJL#kW zTxNH~lqf7?Fo09({{49)PcW}??5YgJw|IEgXsEt&?PAsRgQQNBICUtwc)L7>czb^w zj&C|ohSH^Lu3d1;(k<}LsI`swmS19e#jNMcO`$$-?Ujahfw!r4I8mJl8#2{c6^oqq zO=d~3WwZe}xsjm{YXnq#N^zeJOO_@yI!8ssdse1x{`1E%xTZj2!WEPtspYw-?Y|I32GK&aap#3R-`^ zU?vJjyh)fPfHCABr;jTafx&&R8;uY!<@DOi?^@faBLT~# z3Su*B_Uf<}^CXb(s>b6dkw%lWcMn93%-y+B1(|5?RRTshvTKc6XTlSIEFA&v$Lt-T zoMJOJ>m1|LsbcjGryuX7mhDS>q4bX~4NSI=SI_2!PYKXXRFH{4QD}4QX(S3bI;_yZ z{`#kqTWhO=?EL-ibi6!n4QWEdG?80f)aPX_XT78@%}}<0df* zdZZS~1DmAVe%^VL*AOq;yabCfF5j4`Y&FH$+pl<`nnA0|Jd0n>X;*D}COz>5#_G)d zP+~^Q-nRu;_TYIRd511bX%H0-7F;dPbzD=6g0m*ynrP3Rx4A1-f}#W=5*A|e=C+$J z7Ch4sP`TsvGDrb!`?jm9Pf7cseMZ{IImhBLE4t~fUEaumauzyv+yt_H+W4r;o8{Qq zFiflvgS33uY^`gQ*J#jXcIr!wxV~G-hK&ag&7$Db@o(W^B6>tWpCQGJ13E)E-TA*3o$zt>vCVZ4PRcQG}Fid1ciF*}wz;Eh!NbS_SbJj-dv8+v?fajb8_tMlNpf)K6 z)0GBazNjo{K`bTB2I6TSQzUDT6MZ5P!dk(;yOX|*4t!gj96u_Dw zI(eG}E|>(jWU8PH$h|TIrB3S;esQwGk;4*}&a7iqW)TjvjR|~@OCzK$lzaR8ETg;n z8tc2*KpbU*qQQmHY78*<9@9U=C(bUETp$$RY_)ZxI!pvr<-=J>W}8^Ao!=_cKB>^- zntiTxR91azcl^+)xjBAzbydA_AkvX&bkoPQ`p1?AmC+>Qce`d|!9dID92tH}vTRD% z;D)P13_C>&Ra-sQHI-i)<0&gy5x}gi6uS=}gQ%{rp%aRbcLI+Ny9mBr+=ork(zs4k zfsAk4#E-T+&!o5nGcMLwj8BkOjMgeGj$fRUR!l*}Uttb2^2ttd$ycCTFB)9&bO2q>47& z|AH2+>iX;&4Q`?ZsC~*Oq|_{>w+SXZM8ySypfi)}twnDas!jRm&kOht-_UAkQfgoF z(8YFMFcJOsEu^c5uu(|MKJsvtt>$1kuq$`H?!|zn#ro-POA`$@c6)i?1_6)COpA6bW zoO3ZtRM>ZsB0Slw)`r$d*UFBq8_DLpyrT^=T*0dKdXY-z18Zv+VAa!uaBtJ`_HaJ2 z(+{Si5v+;#40RBa+ro~kK~4h$?s?R}I?qxu^e{lrw8)(y|E=R&(D=_;MbcSj~vwV@-dZuD9HE*xuIOho@M)GwN5e5?CV^MdyFQEk~Kf0FoNiUVh zE~4ML*3;I_#)7NOMmWu7x6zIdS#!R`qJT6=dqo|_v&M3FvoOu|At*;werjxac1*`n zI0Mlu=Zd~{L@qw-28~b~79l{gELQdFNLg@yGQu79^4+(FIIkNtyH}fKbOYpbjk2kL z>4AX&>&sCU3(4GLg8mf@-b%t=&4-^B4hKIKWZ7zLXgN4z}7+! zf1LBt7#BNe!3tbT`ydHDbv%7)Ny}3M;%s-fj%}2aWSYOPA0r+h>a)o9jD#Q_>saRG zgZ*X;C;HLBp!LFXaI5S!*s5>xi&v<`Y4G5Emaeu!x6V?Ry`UleRjZ1c2-KReOpt?^ zRg|#6_MBqHXalFnV#rkcCZ`1f4`ec=4LWG0NP56bTAB_O-jt~8m<{VoCh;YubK_Yk zVi=sW6OSl2INsj)Fx=iVq_gNUN0H_;XJr+a8!ItD>h!8r<ct%tkc>b6}h zdsfWR%#9%5NUv!|!v<7L!56>5hRT&0QRb{kS?#X=;W`D~&589!MV)7ieSO2kLyPe` zpKUa_#+hAINKD6O$aN0i=&sYyxpDJ~7qR+`%hWXM1rb6kH%ha$4Xf0O-rS#Ik=uqQ z1oMYc7xzp;X!7(2onIDGPum@qTjn)Z9JTH78h&cHEEwO8#)EG)SI*QJp~yu;$}$eu zN@RxWxRMTMqe+Epb!%o$j0elIUB(x%fn%VU`$)g_1n(8v-mxl}u9-X*nP?n2V(E@?M}kn5KPU7gW8KeH#$eX z%Gx=t$TYC3GWD~VhPhLt^LGs?N6xuMaK`#b>hGgLi!~V6LvG$C%vzcBOIOT#=IRjr z0LAP*$BeA5AL3xn1cQi5_{Guf0)8ymM^kpPt7LxCxFCy5uw|uACJtz*>P~#aK!ZWkMElKX1It5< zN{U&>IYG~eZ;49^wYS5Ub@#8utfR`~OPI<^!EeJr6dJoU5_zj{#Zy&JGa1zcvB|T~ zltd{EP_PT6J>wA**fwN;HogE z13ntI^~Iuc{c_6+0|3IPEpn+ve;$82$VR8O4s)N@Lo9OQ*R% z*NENg>}X%hmP@$w{-ioX>6^4%?{Lw^RLxfzG@jeAwCPX;B{ROvk2Okz9pjebJX(p% zU1|P0vP#yo^x9pq*we1PV=8rS8wus%ayvZHZ`k8L1^zijQz7zmNS~xm_A(jU_Gs60 ziPJ2n@Q5J{5+N=VB>B8Gnuk6w3wdIbM0=SVFY#lnGM+W0qjU5x4v4Kz6)(#U%#sc} zk)~6pVP??nA2pj6a$&TZj|_8DFFZM(yA-JhDNm!5dworE#VW^n83xJB2W1FgF9={l ztP|2`sbzL*zj+*kYp{9U9!f%~D}Eo|GVN{@`X!!vlhI=Xf4B-XQUNW6{3t9e*svw5 zwH^yrJcUP6xR9cJnii`J_;7ron13zAD8NCYR8I%_f}6muugU#u@YQUys3F(+H?nTj zFY*Aj&D;uWzs$iV; zQQ%h6t4OKie00|Bv@D6fF+nxUle^`@g*oVuH=lG_DS%-tabK6o)^^9G#T^L%(uT`rU8I1s@*~ zd)$l5h}TV)YP|;Yl*wMA`6jSC4B}u14*c%ym{&ee)!s+-UxuQlmFtbdcny98dLzsn zY~l|$C7zzK5Ul1OOQh79W{Ev#d{dla@$U zLs6g7W)s)Y8DBeKFw;4ikLB2BYB8(>)DNz?DweuLD)yM`?MG&OwL>PVPng*bEO-#W zobK#=uS)VxUf*4j#o}qWHSiCEehs6a4W9Jd77jdZOwaw@{kb?|qFoZ8SaCH19-O|b zD)kWyC-630x|7<7<6Zq>Tz|RI%O&#l>4hkMU$~NvZ>j9gRB7&g?cR?;EkDC=jRlLD zj?U-*t7-IarIY~7#mhSr0gb3;4<8X*dP`w}s_f)#Y{Jj{e(fntLuxyN&E=6MzoHY5 zB6W*Jf!TN%52F0JQ8#o_6s#pu~cOo)ea~ z{uZUoTZA6()zims-hl5Ln=oMS-(_=|cdegn4{PCQzHZ8nO~_XJ_SZ-lYv1I;g83t3 zpa0X(|G?1&khGMu!*2jh+Gn#QJQA_p`eXmkrnp!zzRB!~+t?2@=64fL0J|<8n~T6* z`$E4VZcG^eNqm2HCk>2}n-pRLqeLFdH@b}RmYAgG<0C11e!?mN(BV1wje`iGhu-=1 zfqziupLY`CDFXp#)AQ|N-rXOM9VS%dc)HuNnRvLXjF3{5z2cWe_VDL>^hs2p(I@PiTBB@Rdr((yyh25Lcbd-(6GKcR)x`xKEFy8 z{x`G(koW*Z;(_=0G73?4ThrHO$+Eo;a8xoNP}HFoR6~_sSZVlnTkc=>|N4#I z0U8gP$(QmNP)g2QcWgu8ahL{;QiGo)Cz`e~82J;Jt?Ln*YrJ7Qg`VVi?X{ zkJE2Tukk*S0XRBJGQy9S3DVDL2MYBQweA?e_w-k>c%qp^@w-{M6qW)Hnt0~WTWeTV*oFVrvC}VQi~p)DA9)`L zB)>LVqK|8(nFe{S%zqm&SN!&Y;rbvP#SAoVSqWy0PgVVNlcLHgezV}uOMrHvw#2<5CR!|s0d>zi0j0FnQqj(_&} zH_iU%{{OlEFHrg)`u~5)XSIn}9p8KS0Z#@BcdjQ_>hEm$mudoo18pgM))2A3pVWNZ z{6_f@D1&>1ly6GYSG3&Q%nsH zFAs-8EhprVCA-szauzID^;#JxmM5Mq@HtIiust{HfDPKzngA+0{Z>YeWk>QbP{_;eIAb~OUCPoRYq1??c*k#5^S{#C1>xFANp`B! zh?3LmNN4iSL4v`kZ$JroBYE`qNB&0TKvKO2<;eL@RQv~V1^@}-9x;F6liwxc*Y-{#`98`ul)X)JMHLf1!rIrHx;jNIe7s)LN_c;J-^L0#Xc5Dk%Tk1F!hK z0!k!*$DHT?T`CHYlJ++j_;+Rg>YG+PAZIQr=l^#p1|So^$L_QKFMa^r;!gxXr!!vb rKf>+T?b5{m1K1yM`hNwO9|m_fkugx@?K`EC2rkE|$V` diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index aac1e4eade932..08e28582e7469 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -614,23 +614,34 @@ the expected correct result:: assert_frame_equal(pivoted, expected) -How to use ``parametrize`` -~~~~~~~~~~~~~~~~~~~~~~~~~~ +Transitioning to ``pytest`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ -`pytest `__ has a nice feature `parametrize `__ to allow -testing of many cases in a concise way that enables an easy-to-read syntax. +*pandas* existing test structure is *mostly* classed based, meaning that you will typically find tests wrapped in a class, inheriting from ``tm.TestCase``. -.. note:: +.. code-block:: python + + class TestReallyCoolFeature(tm.TestCase): + .... - *pandas* existing test structure is *mostly* classed based, meaning that you will typically find tests wrapped in a class, inheriting from ``tm.TestCase``. +Going forward, we are moving to a more *functional* style using the `pytest `__ framework, which offers a richer testing +framework that will facilitate testing and developing. Thus, instead of writing test classes, we will write test functions like this: - .. code-block:: python +.. code-block:: python + + def test_really_cool_feature(): + .... - class TestReallyCoolFeature(tm.TestCase): - .... +Sometimes, it does make sense to bundle test functions together into a single class, either because the test file is testing multiple functions from a single module, and +using test classes allows for better organization. However, instead of inheriting from ``tm.TestCase``, we should just inherit from ``object``: + +.. code-block:: python - Going forward we are moving to a more *functional* style, please see below. + class TestReallyCoolFeature(object): + .... +Using ``pytest`` +~~~~~~~~~~~~~~~~ Here is an example of a self-contained set of tests that illustrate multiple features that we like to use. @@ -641,7 +652,7 @@ Here is an example of a self-contained set of tests that illustrate multiple fea - ``tm.assert_series_equal`` (and its counter part ``tm.assert_frame_equal``), for pandas object comparisons. - the typical pattern of constructing an ``expected`` and comparing versus the ``result`` -We would name this file ``test_cool_feature.py`` and put in an appropriate place in the ``pandas/tests/`` sturcture. +We would name this file ``test_cool_feature.py`` and put in an appropriate place in the ``pandas/tests/`` structure. .. code-block:: python From ebc0c0986fc01dfc937d493786d6fdbcd1c7eddd Mon Sep 17 00:00:00 2001 From: gfyoung Date: Fri, 14 Apr 2017 09:06:45 -0400 Subject: [PATCH 19/56] DEPR: Deprecate generic timestamp dtypes (#15987) * DEPR: Deprecate generic timestamp dtypes We only use the nanosecond frequency, and numpy doesn't even handle generic timestamp dtypes well. xref gh-15524 (comment). * TST: Use pytest idioms in series/test_dtypes.py --- doc/source/whatsnew/v0.20.0.txt | 1 + pandas/tests/series/test_constructors.py | 27 +++ pandas/tests/series/test_dtypes.py | 238 ++++++++++++++--------- pandas/types/cast.py | 25 ++- 4 files changed, 195 insertions(+), 96 deletions(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index a105a6801fb61..cb3e20e50380b 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1204,6 +1204,7 @@ Deprecations - ``SparseArray.to_dense()`` has deprecated the ``fill`` parameter, as that parameter was not being respected (:issue:`14647`) - ``SparseSeries.to_dense()`` has deprecated the ``sparse_only`` parameter (:issue:`14647`) - ``Series.repeat()`` has deprecated the ``reps`` parameter in favor of ``repeats`` (:issue:`12662`) +- The ``Series`` constructor and ``.astype`` method have deprecated accepting timestamp dtypes without a frequency (e.g. ``np.datetime64``) for the ``dtype`` parameter (:issue:`15524`) - ``Index.repeat()`` and ``MultiIndex.repeat()`` have deprecated the ``n`` parameter in favor of ``repeats`` (:issue:`12662`) - ``Categorical.searchsorted()`` and ``Series.searchsorted()`` have deprecated the ``v`` parameter in favor of ``value`` (:issue:`12662`) - ``TimedeltaIndex.searchsorted()``, ``DatetimeIndex.searchsorted()``, and ``PeriodIndex.searchsorted()`` have deprecated the ``key`` parameter in favor of ``value`` (:issue:`12662`) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index dbe2db67359f3..8ad07afcacfcc 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -839,3 +839,30 @@ def test_constructor_cast_object(self): s = Series(date_range('1/1/2000', periods=10), dtype=object) exp = Series(date_range('1/1/2000', periods=10)) tm.assert_series_equal(s, exp) + + def test_constructor_generic_timestamp_deprecated(self): + # see gh-15524 + + with tm.assert_produces_warning(FutureWarning): + dtype = np.timedelta64 + s = Series([], dtype=dtype) + + assert s.empty + assert s.dtype == 'm8[ns]' + + with tm.assert_produces_warning(FutureWarning): + dtype = np.datetime64 + s = Series([], dtype=dtype) + + assert s.empty + assert s.dtype == 'M8[ns]' + + # These timestamps have the wrong frequencies, + # so an Exception should be raised now. + msg = "cannot convert timedeltalike" + with tm.assertRaisesRegexp(TypeError, msg): + Series([], dtype='m8[ps]') + + msg = "cannot convert datetimelike" + with tm.assertRaisesRegexp(TypeError, msg): + Series([], dtype='M8[ps]') diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index a2aaff25516ae..6bbf00d6cab22 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -1,9 +1,13 @@ # coding=utf-8 # pylint: disable-msg=E1101,W0612 -import sys +import pytest + from datetime import datetime + +import sys import string +import warnings from numpy import nan import numpy as np @@ -12,152 +16,199 @@ from pandas.compat import lrange, range, u from pandas import compat -from pandas.util.testing import assert_series_equal import pandas.util.testing as tm from .common import TestData -class TestSeriesDtypes(TestData, tm.TestCase): +class TestSeriesDtypes(TestData): - def test_astype(self): + @pytest.mark.parametrize("dtype", ["float32", "float64", + "int64", "int32"]) + def test_astype(self, dtype): s = Series(np.random.randn(5), name='foo') + as_typed = s.astype(dtype) - for dtype in ['float32', 'float64', 'int64', 'int32']: - astyped = s.astype(dtype) - self.assertEqual(astyped.dtype, dtype) - self.assertEqual(astyped.name, s.name) + assert as_typed.dtype == dtype + assert as_typed.name == s.name def test_dtype(self): - self.assertEqual(self.ts.dtype, np.dtype('float64')) - self.assertEqual(self.ts.dtypes, np.dtype('float64')) - self.assertEqual(self.ts.ftype, 'float64:dense') - self.assertEqual(self.ts.ftypes, 'float64:dense') - assert_series_equal(self.ts.get_dtype_counts(), Series(1, ['float64'])) - assert_series_equal(self.ts.get_ftype_counts(), Series( - 1, ['float64:dense'])) - - def test_astype_cast_nan_inf_int(self): - # GH14265, check nan and inf raise error when converting to int - types = [np.int32, np.int64] - values = [np.nan, np.inf] + assert self.ts.dtype == np.dtype('float64') + assert self.ts.dtypes == np.dtype('float64') + assert self.ts.ftype == 'float64:dense' + assert self.ts.ftypes == 'float64:dense' + tm.assert_series_equal(self.ts.get_dtype_counts(), + Series(1, ['float64'])) + tm.assert_series_equal(self.ts.get_ftype_counts(), + Series(1, ['float64:dense'])) + + @pytest.mark.parametrize("value", [np.nan, np.inf]) + @pytest.mark.parametrize("dtype", [np.int32, np.int64]) + def test_astype_cast_nan_inf_int(self, dtype, value): + # gh-14265: check NaN and inf raise error when converting to int msg = 'Cannot convert non-finite values \\(NA or inf\\) to integer' + s = Series([value]) - for this_type in types: - for this_val in values: - s = Series([this_val]) - with self.assertRaisesRegexp(ValueError, msg): - s.astype(this_type) + with tm.assertRaisesRegexp(ValueError, msg): + s.astype(dtype) - def test_astype_cast_object_int(self): + @pytest.mark.parametrize("dtype", [int, np.int8, np.int64]) + def test_astype_cast_object_int_fail(self, dtype): arr = Series(["car", "house", "tree", "1"]) + with pytest.raises(ValueError): + arr.astype(dtype) - self.assertRaises(ValueError, arr.astype, int) - self.assertRaises(ValueError, arr.astype, np.int64) - self.assertRaises(ValueError, arr.astype, np.int8) - + def test_astype_cast_object_int(self): arr = Series(['1', '2', '3', '4'], dtype=object) result = arr.astype(int) - self.assert_series_equal(result, Series(np.arange(1, 5))) + + tm.assert_series_equal(result, Series(np.arange(1, 5))) def test_astype_datetimes(self): import pandas._libs.tslib as tslib - s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5)) + s = s.astype('O') - self.assertEqual(s.dtype, np.object_) + assert s.dtype == np.object_ s = Series([datetime(2001, 1, 2, 0, 0)]) + s = s.astype('O') - self.assertEqual(s.dtype, np.object_) + assert s.dtype == np.object_ s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)]) + s[1] = np.nan - self.assertEqual(s.dtype, 'M8[ns]') - s = s.astype('O') - self.assertEqual(s.dtype, np.object_) + assert s.dtype == 'M8[ns]' - def test_astype_str(self): - # GH4405 - digits = string.digits - s1 = Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]) - s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0]) - types = (compat.text_type, np.str_) - for typ in types: - for s in (s1, s2): - res = s.astype(typ) - expec = s.map(compat.text_type) - assert_series_equal(res, expec) - - # GH9757 - # Test str and unicode on python 2.x and just str on python 3.x - for tt in set([str, compat.text_type]): - ts = Series([Timestamp('2010-01-04 00:00:00')]) - s = ts.astype(tt) - expected = Series([tt('2010-01-04')]) - assert_series_equal(s, expected) - - ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')]) - s = ts.astype(tt) - expected = Series([tt('2010-01-04 00:00:00-05:00')]) - assert_series_equal(s, expected) - - td = Series([Timedelta(1, unit='d')]) - s = td.astype(tt) - expected = Series([tt('1 days 00:00:00.000000000')]) - assert_series_equal(s, expected) + s = s.astype('O') + assert s.dtype == np.object_ + + @pytest.mark.parametrize("dtype", [compat.text_type, np.str_]) + @pytest.mark.parametrize("series", [Series([string.digits * 10, + tm.rands(63), + tm.rands(64), + tm.rands(1000)]), + Series([string.digits * 10, + tm.rands(63), + tm.rands(64), nan, 1.0])]) + def test_astype_str_map(self, dtype, series): + # see gh-4405 + result = series.astype(dtype) + expected = series.map(compat.text_type) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [str, compat.text_type]) + def test_astype_str_cast(self, dtype): + # see gh-9757: test str and unicode on python 2.x + # and just str on python 3.x + ts = Series([Timestamp('2010-01-04 00:00:00')]) + s = ts.astype(dtype) + + expected = Series([dtype('2010-01-04')]) + tm.assert_series_equal(s, expected) + + ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')]) + s = ts.astype(dtype) + + expected = Series([dtype('2010-01-04 00:00:00-05:00')]) + tm.assert_series_equal(s, expected) + + td = Series([Timedelta(1, unit='d')]) + s = td.astype(dtype) + + expected = Series([dtype('1 days 00:00:00.000000000')]) + tm.assert_series_equal(s, expected) def test_astype_unicode(self): - - # GH7758 - # a bit of magic is required to set default encoding encoding to utf-8 + # see gh-7758: A bit of magic is required to set + # default encoding to utf-8 digits = string.digits test_series = [ Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]), Series([u('データーサイエンス、お前はもう死んでいる')]), - ] former_encoding = None + if not compat.PY3: - # in python we can force the default encoding for this test + # In Python, we can force the default encoding for this test former_encoding = sys.getdefaultencoding() reload(sys) # noqa + sys.setdefaultencoding("utf-8") if sys.getdefaultencoding() == "utf-8": test_series.append(Series([u('野菜食べないとやばい') .encode("utf-8")])) + for s in test_series: res = s.astype("unicode") expec = s.map(compat.text_type) - assert_series_equal(res, expec) - # restore the former encoding + tm.assert_series_equal(res, expec) + + # Restore the former encoding if former_encoding is not None and former_encoding != "utf-8": reload(sys) # noqa sys.setdefaultencoding(former_encoding) def test_astype_dict(self): - # GH7271 + # see gh-7271 s = Series(range(0, 10, 2), name='abc') result = s.astype({'abc': str}) expected = Series(['0', '2', '4', '6', '8'], name='abc') - assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) result = s.astype({'abc': 'float64'}) expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype='float64', name='abc') - assert_series_equal(result, expected) - - self.assertRaises(KeyError, s.astype, {'abc': str, 'def': str}) - self.assertRaises(KeyError, s.astype, {0: str}) - - def test_complexx(self): - # GH4819 - # complex access for ndarray compat + tm.assert_series_equal(result, expected) + + with pytest.raises(KeyError): + s.astype({'abc': str, 'def': str}) + + with pytest.raises(KeyError): + s.astype({0: str}) + + def test_astype_generic_timestamp_deprecated(self): + # see gh-15524 + data = [1] + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + s = Series(data) + dtype = np.datetime64 + result = s.astype(dtype) + expected = Series(data, dtype=dtype) + tm.assert_series_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + s = Series(data) + dtype = np.timedelta64 + result = s.astype(dtype) + expected = Series(data, dtype=dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", np.typecodes['All']) + def test_astype_empty_constructor_equality(self, dtype): + # see gh-15524 + + if dtype not in ('S', 'V'): # poor support (if any) currently + with warnings.catch_warnings(record=True): + # Generic timestamp dtypes ('M' and 'm') are deprecated, + # but we test that already in series/test_constructors.py + + init_empty = Series([], dtype=dtype) + as_type_empty = Series([]).astype(dtype) + tm.assert_series_equal(init_empty, as_type_empty) + + def test_complex(self): + # see gh-4819: complex access for ndarray compat a = np.arange(5, dtype=np.float64) b = Series(a + 4j * a) + tm.assert_numpy_array_equal(a, b.real) tm.assert_numpy_array_equal(4 * a, b.imag) @@ -166,23 +217,22 @@ def test_complexx(self): tm.assert_numpy_array_equal(4 * a, b.imag) def test_arg_for_errors_in_astype(self): - # issue #14878 - - sr = Series([1, 2, 3]) + # see gh-14878 + s = Series([1, 2, 3]) - with self.assertRaises(ValueError): - sr.astype(np.float64, errors=False) + with pytest.raises(ValueError): + s.astype(np.float64, errors=False) with tm.assert_produces_warning(FutureWarning): - sr.astype(np.int8, raise_on_error=True) + s.astype(np.int8, raise_on_error=True) - sr.astype(np.int8, errors='raise') + s.astype(np.int8, errors='raise') def test_intercept_astype_object(self): series = Series(date_range('1/1/2000', periods=10)) - # this test no longer makes sense as series is by default already - # M8[ns] + # This test no longer makes sense, as + # Series is by default already M8[ns]. expected = series.astype('object') df = DataFrame({'a': series, @@ -192,9 +242,9 @@ def test_intercept_astype_object(self): tm.assert_series_equal(df.dtypes, exp_dtypes) result = df.values.squeeze() - self.assertTrue((result[:, 0] == expected.values).all()) + assert (result[:, 0] == expected.values).all() df = DataFrame({'a': series, 'b': ['foo'] * len(series)}) result = df.values.squeeze() - self.assertTrue((result[:, 0] == expected.values).all()) + assert (result[:, 0] == expected.values).all() diff --git a/pandas/types/cast.py b/pandas/types/cast.py index 85053dba0c18b..3954fb5c93da8 100644 --- a/pandas/types/cast.py +++ b/pandas/types/cast.py @@ -1,7 +1,10 @@ """ routings for casting """ from datetime import datetime, timedelta + import numpy as np +import warnings + from pandas._libs import tslib, lib from pandas._libs.tslib import iNaT from pandas.compat import string_types, text_type, PY3 @@ -620,6 +623,14 @@ def astype_nansafe(arr, dtype, copy=True): # work around NumPy brokenness, #1987 return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape) + if dtype.name in ("datetime64", "timedelta64"): + msg = ("Passing in '{dtype}' dtype with no frequency is " + "deprecated and will raise in a future version. " + "Please pass in '{dtype}[ns]' instead.") + warnings.warn(msg.format(dtype=dtype.name), + FutureWarning, stacklevel=5) + dtype = np.dtype(dtype.name + "[ns]") + if copy: return arr.astype(dtype) return arr.view(dtype) @@ -871,8 +882,15 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'): if is_datetime64 or is_datetime64tz or is_timedelta64: # force the dtype if needed + msg = ("Passing in '{dtype}' dtype with no frequency is " + "deprecated and will raise in a future version. " + "Please pass in '{dtype}[ns]' instead.") + if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE): - if dtype.name == 'datetime64[ns]': + if dtype.name in ('datetime64', 'datetime64[ns]'): + if dtype.name == 'datetime64': + warnings.warn(msg.format(dtype=dtype.name), + FutureWarning, stacklevel=5) dtype = _NS_DTYPE else: raise TypeError("cannot convert datetimelike to " @@ -886,7 +904,10 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'): value = [value] elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE): - if dtype.name == 'timedelta64[ns]': + if dtype.name in ('timedelta64', 'timedelta64[ns]'): + if dtype.name == 'timedelta64': + warnings.warn(msg.format(dtype=dtype.name), + FutureWarning, stacklevel=5) dtype = _TD_DTYPE else: raise TypeError("cannot convert timedeltalike to " From 3fde134617822773b23cf484310820298d9f88ac Mon Sep 17 00:00:00 2001 From: Sarma Tangirala Date: Fri, 14 Apr 2017 09:28:03 -0400 Subject: [PATCH 20/56] ENH: add option to sort class labels in parallel_coordinates (#15908) closes #15908 Author: Sarma Tangirala Closes #15935 from stangirala/master and squashes the following commits: 1467f9f [Sarma Tangirala] Add minor code change, what's new doc fix 3ede37a [Sarma Tangirala] Move feature test to new method, add to whatsnew 756e8d8 [Sarma Tangirala] ENH: Minor change to parallel_coordinates (#15908) --- doc/source/whatsnew/v0.20.0.txt | 1 + pandas/tests/plotting/test_misc.py | 20 ++++++++++++++++++++ pandas/tools/plotting.py | 11 ++++++++++- 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index cb3e20e50380b..a18ddd9da8816 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -373,6 +373,7 @@ Other Enhancements - :func:`MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels `. (:issue:`15694`) - ``pd.read_csv()`` will now raise a ``ParserError`` error whenever any parsing error occurs (:issue:`15913`, :issue:`15925`) - ``pd.read_csv()`` now supports the ``error_bad_lines`` and ``warn_bad_lines`` arguments for the Python parser (:issue:`15925`) +- ``parallel_coordinates()`` has gained a ``sort_labels`` keyword arg that sorts class labels and the colours assigned to them (:issue:`15908`) .. _ISO 8601 duration: https://en.wikipedia.org/wiki/ISO_8601#Durations diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 812f039f1a2c7..504c55bcfcfd0 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -241,6 +241,26 @@ def test_parallel_coordinates(self): with tm.assert_produces_warning(FutureWarning): parallel_coordinates(df, 'Name', colors=colors) + def test_parallel_coordinates_with_sorted_labels(self): + """ For #15908 """ + from pandas.tools.plotting import parallel_coordinates + + df = DataFrame({"feat": [i for i in range(30)], + "class": [2 for _ in range(10)] + + [3 for _ in range(10)] + + [1 for _ in range(10)]}) + ax = parallel_coordinates(df, 'class', sort_labels=True) + polylines, labels = ax.get_legend_handles_labels() + color_label_tuples = \ + zip([polyline.get_color() for polyline in polylines], labels) + ordered_color_label_tuples = sorted(color_label_tuples, + key=lambda x: x[1]) + prev_next_tupels = zip([i for i in ordered_color_label_tuples[0:-1]], + [i for i in ordered_color_label_tuples[1:]]) + for prev, nxt in prev_next_tupels: + # lables and colors are ordered strictly increasing + assert prev[1] < nxt[1] and prev[0] < nxt[0] + @slow def test_radviz(self): from pandas.tools.plotting import radviz diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 99e56ca80cf97..141e3c74b91c4 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -705,7 +705,8 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): @deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3) def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None, use_columns=False, xticks=None, colormap=None, - axvlines=True, axvlines_kwds=None, **kwds): + axvlines=True, axvlines_kwds=None, sort_labels=False, + **kwds): """Parallel coordinates plotting. Parameters @@ -729,6 +730,11 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None, If true, vertical lines will be added at each xtick axvlines_kwds: keywords, optional Options to be passed to axvline method for vertical lines + sort_labels: bool, False + Sort class_column labels, useful when assigning colours + + .. versionadded:: 0.20.0 + kwds: keywords Options to pass to matplotlib plotting method @@ -785,6 +791,9 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None, colormap=colormap, color_type='random', color=color) + if sort_labels: + classes = sorted(classes) + color_values = sorted(color_values) colors = dict(zip(classes, color_values)) for i in range(n): From 9991579c812e5a7c977e69f03b390adf7974445f Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Fri, 14 Apr 2017 09:31:29 -0400 Subject: [PATCH 21/56] ENH: Intervalindex closes #7640 closes #8625 reprise of #8707 Author: Jeff Reback Author: Stephan Hoyer Closes #15309 from jreback/intervalindex and squashes the following commits: 11ab1e1 [Jeff Reback] merge conflicts 834df76 [Jeff Reback] more docs fbc1cf8 [Jeff Reback] doc example and bug 7577335 [Jeff Reback] fixup on merge of changes in algorithms.py 3a3e02e [Jeff Reback] sorting example 4333937 [Jeff Reback] api-types test fixing f0e3ad2 [Jeff Reback] pep b2d26eb [Jeff Reback] more docs e5f8082 [Jeff Reback] allow pd.cut to take an IntervalIndex for bins 4a5ebea [Jeff Reback] more tests & fixes for non-unique / overlaps rename _is_contained_in -> contains add sorting test 340c98b [Jeff Reback] CLN/COMPAT: IntervalIndex 74162aa [Stephan Hoyer] API/ENH: IntervalIndex --- asv_bench/benchmarks/indexing.py | 20 + doc/source/advanced.rst | 33 + doc/source/api.rst | 21 + doc/source/reshaping.rst | 10 +- doc/source/whatsnew/v0.20.0.txt | 58 ++ pandas/_libs/hashtable.pyx | 1 - pandas/_libs/interval.pyx | 215 +++++ pandas/_libs/intervaltree.pxi.in | 396 ++++++++ pandas/_libs/lib.pyx | 6 +- pandas/_libs/src/inference.pyx | 25 + pandas/_libs/tslib.pyx | 12 + pandas/core/algorithms.py | 46 +- pandas/core/api.py | 3 +- pandas/core/groupby.py | 30 +- pandas/core/indexing.py | 8 +- pandas/formats/format.py | 15 + pandas/indexes/api.py | 3 +- pandas/indexes/base.py | 73 +- pandas/indexes/category.py | 70 +- pandas/indexes/interval.py | 1062 ++++++++++++++++++++++ pandas/indexes/multi.py | 4 +- pandas/tests/api/test_api.py | 4 +- pandas/tests/api/test_types.py | 3 +- pandas/tests/frame/test_alter_axes.py | 66 +- pandas/tests/frame/test_sorting.py | 258 +++--- pandas/tests/groupby/test_categorical.py | 5 +- pandas/tests/groupby/test_groupby.py | 6 +- pandas/tests/indexes/common.py | 25 +- pandas/tests/indexes/test_base.py | 4 +- pandas/tests/indexes/test_category.py | 19 +- pandas/tests/indexes/test_interval.py | 798 ++++++++++++++++ pandas/tests/indexing/test_interval.py | 245 +++++ pandas/tests/scalar/test_interval.py | 129 +++ pandas/tests/series/test_constructors.py | 14 +- pandas/tests/series/test_missing.py | 11 +- pandas/tests/series/test_sorting.py | 19 +- pandas/tests/test_algos.py | 35 +- pandas/tests/test_base.py | 28 +- pandas/tests/test_categorical.py | 31 +- pandas/tests/tools/test_tile.py | 300 +++--- pandas/tests/types/test_dtypes.py | 118 ++- pandas/tests/types/test_missing.py | 8 + pandas/tools/tile.py | 240 ++--- pandas/tseries/base.py | 10 +- pandas/tseries/interval.py | 35 - pandas/tseries/period.py | 3 + pandas/types/api.py | 4 + pandas/types/common.py | 23 + pandas/types/dtypes.py | 109 +++ pandas/types/generic.py | 4 +- pandas/types/inference.py | 2 + pandas/types/missing.py | 5 +- pandas/util/testing.py | 22 +- setup.py | 5 + 54 files changed, 4195 insertions(+), 504 deletions(-) create mode 100644 pandas/_libs/interval.pyx create mode 100644 pandas/_libs/intervaltree.pxi.in create mode 100644 pandas/indexes/interval.py create mode 100644 pandas/tests/indexes/test_interval.py create mode 100644 pandas/tests/indexing/test_interval.py create mode 100644 pandas/tests/scalar/test_interval.py delete mode 100644 pandas/tseries/interval.py diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index d938cc6a6dc4d..a32c9f25a0f09 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -226,6 +226,26 @@ def time_is_monotonic(self): self.miint.is_monotonic +class IntervalIndexing(object): + goal_time = 0.2 + + def setup(self): + self.monotonic = Series(np.arange(1000000), + index=IntervalIndex.from_breaks(np.arange(1000001))) + + def time_getitem_scalar(self): + self.monotonic[80000] + + def time_loc_scalar(self): + self.monotonic.loc[80000] + + def time_getitem_list(self): + self.monotonic[80000:] + + def time_loc_list(self): + self.monotonic.loc[80000:] + + class PanelIndexing(object): goal_time = 0.2 diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 43373fc86c4d1..ea00588ba156f 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -850,6 +850,39 @@ Of course if you need integer based selection, then use ``iloc`` dfir.iloc[0:5] +.. _indexing.intervallindex: + +IntervalIndex +~~~~~~~~~~~~~ + +.. versionadded:: 0.20.0 + +.. warning:: + + These indexing behaviors are provisional and may change in a future version of pandas. + +.. ipython:: python + + df = pd.DataFrame({'A': [1, 2, 3, 4]}, + index=pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4])) + df + +Label based indexing via ``.loc`` along the edges of an interval works as you would expect, +selecting that particular interval. + +.. ipython:: python + + df.loc[2] + df.loc[[2, 3]] + +If you select a lable *contained* within an interval, this will also select the interval. + +.. ipython:: python + + df.loc[2.5] + df.loc[[2.5, 3.5]] + + Miscellaneous indexing FAQ -------------------------- diff --git a/doc/source/api.rst b/doc/source/api.rst index bf9d521e2a12a..6ba8c2b8ead67 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1405,6 +1405,27 @@ Categorical Components CategoricalIndex.as_ordered CategoricalIndex.as_unordered +.. _api.intervalindex: + +IntervalIndex +------------- + +.. autosummary:: + :toctree: generated/ + + IntervalIndex + +IntervalIndex Components +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autosummary:: + :toctree: generated/ + + IntervalIndex.from_arrays + IntervalIndex.from_tuples + IntervalIndex.from_breaks + IntervalIndex.from_intervals + .. _api.multiindex: MultiIndex diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 2c5aae133d4d9..b93749922c8ea 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -517,7 +517,15 @@ Alternatively we can specify custom bin-edges: .. ipython:: python - pd.cut(ages, bins=[0, 18, 35, 70]) + c = pd.cut(ages, bins=[0, 18, 35, 70]) + c + +.. versionadded:: 0.20.0 + +If the ``bins`` keyword is an ``IntervalIndex``, then these will be +used to bin the passed data. + + pd.cut([25, 20, 50], bins=c.categories) .. _reshaping.dummies: diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index a18ddd9da8816..04aed6c2c5466 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -13,6 +13,7 @@ Highlights include: - ``Panel`` has been deprecated, see :ref:`here ` - Improved user API when accessing levels in ``.groupby()``, see :ref:`here ` - Improved support for UInt64 dtypes, see :ref:`here ` +- Addition of an ``IntervalIndex`` and ``Interval`` scalar type, see :ref:`here ` - A new orient for JSON serialization, ``orient='table'``, that uses the Table Schema spec, see :ref:`here ` - Window Binary Corr/Cov operations return a MultiIndexed ``DataFrame`` rather than a ``Panel``, as ``Panel`` is now deprecated, see :ref:`here ` - Support for S3 handling now uses ``s3fs``, see :ref:`here ` @@ -314,6 +315,63 @@ To convert a ``SparseDataFrame`` back to sparse SciPy matrix in COO format, you sdf.to_coo() +.. _whatsnew_0200.enhancements.intervalindex: + +IntervalIndex +^^^^^^^^^^^^^ + +pandas has gained an ``IntervalIndex`` with its own dtype, ``interval`` as well as the ``Interval`` scalar type. These allow first-class support for interval +notation, specifically as a return type for the categories in ``pd.cut`` and ``pd.qcut``. The ``IntervalIndex`` allows some unique indexing, see the +:ref:`docs `. (:issue:`7640`, :issue:`8625`) + +Previous behavior: + +.. code-block:: ipython + + In [2]: pd.cut(range(3), 2) + Out[2]: + [(-0.002, 1], (-0.002, 1], (1, 2]] + Categories (2, object): [(-0.002, 1] < (1, 2]] + + # the returned categories are strings, representing Intervals + In [3]: pd.cut(range(3), 2).categories + Out[3]: Index(['(-0.002, 1]', '(1, 2]'], dtype='object') + +New behavior: + +.. ipython:: python + + c = pd.cut(range(4), bins=2) + c + c.categories + +Furthermore, this allows one to bin *other* data with these same bins. ``NaN`` represents a missing +value similar to other dtypes. + +.. ipython:: python + + pd.cut([0, 3, 1, 1], bins=c.categories) + +These can also used in ``Series`` and ``DataFrame``, and indexed. + +.. ipython:: python + + df = pd.DataFrame({'A': range(4), + 'B': pd.cut([0, 3, 1, 1], bins=c.categories)} + ).set_index('B') + +Selecting a specific interval + +.. ipython:: python + + df.loc[pd.Interval(1.5, 3.0)] + +Selecting via a scalar value that is contained in the intervals. + +.. ipython:: python + + df.loc[0] + .. _whatsnew_0200.enhancements.other: Other Enhancements diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index eee287b2c157b..c8aedcef77502 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -41,7 +41,6 @@ cdef extern from "Python.h": cdef size_t _INIT_VEC_CAP = 128 - include "hashtable_class_helper.pxi" include "hashtable_func_helper.pxi" diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx new file mode 100644 index 0000000000000..60a34aff16e9d --- /dev/null +++ b/pandas/_libs/interval.pyx @@ -0,0 +1,215 @@ +cimport numpy as np +import numpy as np +import pandas as pd + +cimport util +cimport cython +import cython +from numpy cimport * +from tslib import Timestamp + +from cpython.object cimport (Py_EQ, Py_NE, Py_GT, Py_LT, Py_GE, Py_LE, + PyObject_RichCompare) + +import numbers +_VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither']) + +cdef class IntervalMixin: + property closed_left: + def __get__(self): + return self.closed == 'left' or self.closed == 'both' + + property closed_right: + def __get__(self): + return self.closed == 'right' or self.closed == 'both' + + property open_left: + def __get__(self): + return not self.closed_left + + property open_right: + def __get__(self): + return not self.closed_right + + property mid: + def __get__(self): + try: + return 0.5 * (self.left + self.right) + except TypeError: + # datetime safe version + return self.left + 0.5 * (self.right - self.left) + + +cdef _interval_like(other): + return (hasattr(other, 'left') + and hasattr(other, 'right') + and hasattr(other, 'closed')) + + +cdef class Interval(IntervalMixin): + """ + Immutable object implementing an Interval, a bounded slice-like interval. + + .. versionadded:: 0.20.0 + + Properties + ---------- + left, right : values + Left and right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'} + Whether the interval is closed on the left-side, right-side, both or + neither. Defaults to 'right'. + """ + + cdef readonly object left, right + cdef readonly str closed + + def __init__(self, left, right, str closed='right'): + # note: it is faster to just do these checks than to use a special + # constructor (__cinit__/__new__) to avoid them + if closed not in _VALID_CLOSED: + raise ValueError("invalid option for 'closed': %s" % closed) + if not left <= right: + raise ValueError('left side of interval must be <= right side') + self.left = left + self.right = right + self.closed = closed + + def __hash__(self): + return hash((self.left, self.right, self.closed)) + + def __contains__(self, key): + if _interval_like(key): + raise TypeError('__contains__ not defined for two intervals') + return ((self.left < key if self.open_left else self.left <= key) and + (key < self.right if self.open_right else key <= self.right)) + + def __richcmp__(self, other, int op): + if hasattr(other, 'ndim'): + # let numpy (or IntervalIndex) handle vectorization + return NotImplemented + + if _interval_like(other): + self_tuple = (self.left, self.right, self.closed) + other_tuple = (other.left, other.right, other.closed) + return PyObject_RichCompare(self_tuple, other_tuple, op) + + # nb. could just return NotImplemented now, but handling this + # explicitly allows us to opt into the Python 3 behavior, even on + # Python 2. + if op == Py_EQ or op == Py_NE: + return NotImplemented + else: + op_str = {Py_LT: '<', Py_LE: '<=', Py_GT: '>', Py_GE: '>='}[op] + raise TypeError( + 'unorderable types: %s() %s %s()' % + (type(self).__name__, op_str, type(other).__name__)) + + def __reduce__(self): + args = (self.left, self.right, self.closed) + return (type(self), args) + + def _repr_base(self): + left = self.left + right = self.right + + # TODO: need more general formatting methodology here + if isinstance(left, Timestamp) and isinstance(right, Timestamp): + left = left._short_repr + right = right._short_repr + + return left, right + + def __repr__(self): + + left, right = self._repr_base() + return ('%s(%r, %r, closed=%r)' % + (type(self).__name__, left, right, self.closed)) + + def __str__(self): + + left, right = self._repr_base() + start_symbol = '[' if self.closed_left else '(' + end_symbol = ']' if self.closed_right else ')' + return '%s%s, %s%s' % (start_symbol, left, right, end_symbol) + + def __add__(self, y): + if isinstance(y, numbers.Number): + return Interval(self.left + y, self.right + y) + elif isinstance(y, Interval) and isinstance(self, numbers.Number): + return Interval(y.left + self, y.right + self) + return NotImplemented + + def __sub__(self, y): + if isinstance(y, numbers.Number): + return Interval(self.left - y, self.right - y) + return NotImplemented + + def __mul__(self, y): + if isinstance(y, numbers.Number): + return Interval(self.left * y, self.right * y) + elif isinstance(y, Interval) and isinstance(self, numbers.Number): + return Interval(y.left * self, y.right * self) + return NotImplemented + + def __div__(self, y): + if isinstance(y, numbers.Number): + return Interval(self.left / y, self.right / y) + return NotImplemented + + def __truediv__(self, y): + if isinstance(y, numbers.Number): + return Interval(self.left / y, self.right / y) + return NotImplemented + + def __floordiv__(self, y): + if isinstance(y, numbers.Number): + return Interval(self.left // y, self.right // y) + return NotImplemented + + +@cython.wraparound(False) +@cython.boundscheck(False) +cpdef intervals_to_interval_bounds(ndarray intervals): + """ + Parameters + ---------- + intervals: ndarray object array of Intervals / nulls + + Returns + ------- + tuples (left: ndarray object array, + right: ndarray object array, + closed: str) + + """ + + cdef: + object closed = None, interval + int64_t n = len(intervals) + ndarray left, right + + left = np.empty(n, dtype=object) + right = np.empty(n, dtype=object) + + for i in range(len(intervals)): + interval = intervals[i] + if util._checknull(interval): + left[i] = np.nan + right[i] = np.nan + continue + + if not isinstance(interval, Interval): + raise TypeError("type {} with value {} is not an interval".format( + type(interval), interval)) + + left[i] = interval.left + right[i] = interval.right + if closed is None: + closed = interval.closed + elif closed != interval.closed: + raise ValueError('intervals must all be closed on the same side') + + return left, right, closed + +include "intervaltree.pxi" diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in new file mode 100644 index 0000000000000..4fa0d6d156fa2 --- /dev/null +++ b/pandas/_libs/intervaltree.pxi.in @@ -0,0 +1,396 @@ +""" +Template for intervaltree + +WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in +""" + +from numpy cimport int64_t, float64_t +from numpy cimport ndarray, PyArray_ArgSort, NPY_QUICKSORT, PyArray_Take +import numpy as np + +cimport cython +cimport numpy as cnp +cnp.import_array() + +from hashtable cimport Int64Vector, Int64VectorData + + +ctypedef fused scalar_t: + float64_t + float32_t + int64_t + int32_t + + +#---------------------------------------------------------------------- +# IntervalTree +#---------------------------------------------------------------------- + +cdef class IntervalTree(IntervalMixin): + """A centered interval tree + + Based off the algorithm described on Wikipedia: + http://en.wikipedia.org/wiki/Interval_tree + + we are emulating the IndexEngine interface + """ + cdef: + readonly object left, right, root, dtype + readonly str closed + object _left_sorter, _right_sorter + + def __init__(self, left, right, closed='right', leaf_size=100): + """ + Parameters + ---------- + left, right : np.ndarray[ndim=1] + Left and right bounds for each interval. Assumed to contain no + NaNs. + closed : {'left', 'right', 'both', 'neither'}, optional + Whether the intervals are closed on the left-side, right-side, both + or neither. Defaults to 'right'. + leaf_size : int, optional + Parameter that controls when the tree switches from creating nodes + to brute-force search. Tune this parameter to optimize query + performance. + """ + if closed not in ['left', 'right', 'both', 'neither']: + raise ValueError("invalid option for 'closed': %s" % closed) + + left = np.asarray(left) + right = np.asarray(right) + self.dtype = np.result_type(left, right) + self.left = np.asarray(left, dtype=self.dtype) + self.right = np.asarray(right, dtype=self.dtype) + + indices = np.arange(len(left), dtype='int64') + + self.closed = closed + + node_cls = NODE_CLASSES[str(self.dtype), closed] + self.root = node_cls(self.left, self.right, indices, leaf_size) + + @property + def left_sorter(self): + """How to sort the left labels; this is used for binary search + """ + if self._left_sorter is None: + self._left_sorter = np.argsort(self.left) + return self._left_sorter + + @property + def right_sorter(self): + """How to sort the right labels + """ + if self._right_sorter is None: + self._right_sorter = np.argsort(self.right) + return self._right_sorter + + def get_loc(self, scalar_t key): + """Return all positions corresponding to intervals that overlap with + the given scalar key + """ + result = Int64Vector() + self.root.query(result, key) + if not result.data.n: + raise KeyError(key) + return result.to_array() + + def _get_partial_overlap(self, key_left, key_right, side): + """Return all positions corresponding to intervals with the given side + falling between the left and right bounds of an interval query + """ + if side == 'left': + values = self.left + sorter = self.left_sorter + else: + values = self.right + sorter = self.right_sorter + key = [key_left, key_right] + i, j = values.searchsorted(key, sorter=sorter) + return sorter[i:j] + + def get_loc_interval(self, key_left, key_right): + """Lookup the intervals enclosed in the given interval bounds + + The given interval is presumed to have closed bounds. + """ + import pandas as pd + left_overlap = self._get_partial_overlap(key_left, key_right, 'left') + right_overlap = self._get_partial_overlap(key_left, key_right, 'right') + enclosing = self.get_loc(0.5 * (key_left + key_right)) + combined = np.concatenate([left_overlap, right_overlap, enclosing]) + uniques = pd.unique(combined) + return uniques + + def get_indexer(self, scalar_t[:] target): + """Return the positions corresponding to unique intervals that overlap + with the given array of scalar targets. + """ + + # TODO: write get_indexer_intervals + cdef: + size_t old_len + Py_ssize_t i + Int64Vector result + + result = Int64Vector() + old_len = 0 + for i in range(len(target)): + self.root.query(result, target[i]) + if result.data.n == old_len: + result.append(-1) + elif result.data.n > old_len + 1: + raise KeyError( + 'indexer does not intersect a unique set of intervals') + old_len = result.data.n + return result.to_array() + + def get_indexer_non_unique(self, scalar_t[:] target): + """Return the positions corresponding to intervals that overlap with + the given array of scalar targets. Non-unique positions are repeated. + """ + cdef: + size_t old_len + Py_ssize_t i + Int64Vector result, missing + + result = Int64Vector() + missing = Int64Vector() + old_len = 0 + for i in range(len(target)): + self.root.query(result, target[i]) + if result.data.n == old_len: + result.append(-1) + missing.append(i) + old_len = result.data.n + return result.to_array(), missing.to_array() + + def __repr__(self): + return (''.format( + dtype=self.dtype, closed=self.closed, + n_elements=self.root.n_elements)) + + # compat with IndexEngine interface + def clear_mapping(self): + pass + + +cdef take(ndarray source, ndarray indices): + """Take the given positions from a 1D ndarray + """ + return PyArray_Take(source, indices, 0) + + +cdef sort_values_and_indices(all_values, all_indices, subset): + indices = take(all_indices, subset) + values = take(all_values, subset) + sorter = PyArray_ArgSort(values, 0, NPY_QUICKSORT) + sorted_values = take(values, sorter) + sorted_indices = take(indices, sorter) + return sorted_values, sorted_indices + +#---------------------------------------------------------------------- +# Nodes +#---------------------------------------------------------------------- + +# we need specialized nodes and leaves to optimize for different dtype and +# closed values + +{{py: + +nodes = [] +for dtype in ['float32', 'float64', 'int32', 'int64']: + for closed, cmp_left, cmp_right in [ + ('left', '<=', '<'), + ('right', '<', '<='), + ('both', '<=', '<='), + ('neither', '<', '<')]: + cmp_left_converse = '<' if cmp_left == '<=' else '<=' + cmp_right_converse = '<' if cmp_right == '<=' else '<=' + nodes.append((dtype, dtype.title(), + closed, closed.title(), + cmp_left, + cmp_right, + cmp_left_converse, + cmp_right_converse)) + +}} + +NODE_CLASSES = {} + +{{for dtype, dtype_title, closed, closed_title, cmp_left, cmp_right, + cmp_left_converse, cmp_right_converse in nodes}} + +cdef class {{dtype_title}}Closed{{closed_title}}IntervalNode: + """Non-terminal node for an IntervalTree + + Categorizes intervals by those that fall to the left, those that fall to + the right, and those that overlap with the pivot. + """ + cdef: + {{dtype_title}}Closed{{closed_title}}IntervalNode left_node, right_node + {{dtype}}_t[:] center_left_values, center_right_values, left, right + int64_t[:] center_left_indices, center_right_indices, indices + {{dtype}}_t min_left, max_right + readonly {{dtype}}_t pivot + readonly int64_t n_elements, n_center, leaf_size + readonly bint is_leaf_node + + def __init__(self, + ndarray[{{dtype}}_t, ndim=1] left, + ndarray[{{dtype}}_t, ndim=1] right, + ndarray[int64_t, ndim=1] indices, + int64_t leaf_size): + + self.n_elements = len(left) + self.leaf_size = leaf_size + + # min_left and min_right are used to speed-up query by skipping + # query on sub-nodes. If this node has size 0, query is cheap, + # so these values don't matter. + if left.size > 0: + self.min_left = left.min() + self.max_right = right.max() + else: + self.min_left = 0 + self.max_right = 0 + + if self.n_elements <= leaf_size: + # make this a terminal (leaf) node + self.is_leaf_node = True + self.left = left + self.right = right + self.indices = indices + self.n_center = 0 + else: + # calculate a pivot so we can create child nodes + self.is_leaf_node = False + self.pivot = np.median(left + right) / 2 + left_set, right_set, center_set = self.classify_intervals( + left, right) + + self.left_node = self.new_child_node(left, right, + indices, left_set) + self.right_node = self.new_child_node(left, right, + indices, right_set) + + self.center_left_values, self.center_left_indices = \ + sort_values_and_indices(left, indices, center_set) + self.center_right_values, self.center_right_indices = \ + sort_values_and_indices(right, indices, center_set) + self.n_center = len(self.center_left_indices) + + @cython.wraparound(False) + @cython.boundscheck(False) + cdef classify_intervals(self, {{dtype}}_t[:] left, {{dtype}}_t[:] right): + """Classify the given intervals based upon whether they fall to the + left, right, or overlap with this node's pivot. + """ + cdef: + Int64Vector left_ind, right_ind, overlapping_ind + Py_ssize_t i + + left_ind = Int64Vector() + right_ind = Int64Vector() + overlapping_ind = Int64Vector() + + for i in range(self.n_elements): + if right[i] {{cmp_right_converse}} self.pivot: + left_ind.append(i) + elif self.pivot {{cmp_left_converse}} left[i]: + right_ind.append(i) + else: + overlapping_ind.append(i) + + return (left_ind.to_array(), + right_ind.to_array(), + overlapping_ind.to_array()) + + cdef new_child_node(self, + ndarray[{{dtype}}_t, ndim=1] left, + ndarray[{{dtype}}_t, ndim=1] right, + ndarray[int64_t, ndim=1] indices, + ndarray[int64_t, ndim=1] subset): + """Create a new child node. + """ + left = take(left, subset) + right = take(right, subset) + indices = take(indices, subset) + return {{dtype_title}}Closed{{closed_title}}IntervalNode( + left, right, indices, self.leaf_size) + + @cython.wraparound(False) + @cython.boundscheck(False) + @cython.initializedcheck(False) + cpdef query(self, Int64Vector result, scalar_t point): + """Recursively query this node and its sub-nodes for intervals that + overlap with the query point. + """ + cdef: + int64_t[:] indices + {{dtype}}_t[:] values + Py_ssize_t i + + if self.is_leaf_node: + # Once we get down to a certain size, it doesn't make sense to + # continue the binary tree structure. Instead, we use linear + # search. + for i in range(self.n_elements): + if self.left[i] {{cmp_left}} point {{cmp_right}} self.right[i]: + result.append(self.indices[i]) + else: + # There are child nodes. Based on comparing our query to the pivot, + # look at the center values, then go to the relevant child. + if point < self.pivot: + values = self.center_left_values + indices = self.center_left_indices + for i in range(self.n_center): + if not values[i] {{cmp_left}} point: + break + result.append(indices[i]) + if point {{cmp_right}} self.left_node.max_right: + self.left_node.query(result, point) + elif point > self.pivot: + values = self.center_right_values + indices = self.center_right_indices + for i in range(self.n_center - 1, -1, -1): + if not point {{cmp_right}} values[i]: + break + result.append(indices[i]) + if self.right_node.min_left {{cmp_left}} point: + self.right_node.query(result, point) + else: + result.extend(self.center_left_indices) + + def __repr__(self): + if self.is_leaf_node: + return ('<{{dtype_title}}Closed{{closed_title}}IntervalNode: ' + '%s elements (terminal)>' % self.n_elements) + else: + n_left = self.left_node.n_elements + n_right = self.right_node.n_elements + n_center = self.n_elements - n_left - n_right + return ('<{{dtype_title}}Closed{{closed_title}}IntervalNode: ' + 'pivot %s, %s elements (%s left, %s right, %s ' + 'overlapping)>' % (self.pivot, self.n_elements, + n_left, n_right, n_center)) + + def counts(self): + """ + Inspect counts on this node + useful for debugging purposes + """ + if self.is_leaf_node: + return self.n_elements + else: + m = len(self.center_left_values) + l = self.left_node.counts() + r = self.right_node.counts() + return (m, (l, r)) + +NODE_CLASSES['{{dtype}}', + '{{closed}}'] = {{dtype_title}}Closed{{closed_title}}IntervalNode + +{{endfor}} diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index f902422b0916d..31402c38c770d 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -61,6 +61,8 @@ from tslib cimport (convert_to_tsobject, convert_to_timedelta64, _check_all_nulls) import tslib from tslib import NaT, Timestamp, Timedelta +import interval +from interval import Interval cdef int64_t NPY_NAT = util.get_nat() @@ -245,6 +247,7 @@ cpdef bint isscalar(object val): - instances of datetime.timedelta - Period - instances of decimal.Decimal + - Interval """ @@ -258,7 +261,8 @@ cpdef bint isscalar(object val): or PyDelta_Check(val) or PyTime_Check(val) or util.is_period_object(val) - or is_decimal(val)) + or is_decimal(val) + or is_interval(val)) def item_from_zerodim(object val): diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index 33c05f302dd94..f7dbae4ab736e 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -33,6 +33,10 @@ cpdef bint is_decimal(object obj): return isinstance(obj, Decimal) +cpdef bint is_interval(object obj): + return isinstance(obj, Interval) + + cpdef bint is_period(object val): """ Return a boolean if this is a Period object """ return util.is_period_object(val) @@ -429,6 +433,10 @@ def infer_dtype(object value): if is_period_array(values): return 'period' + elif is_interval(val): + if is_interval_array(values): + return 'interval' + for i in range(n): val = util.get_value_1d(values, i) if (util.is_integer_object(val) and @@ -880,6 +888,23 @@ cpdef bint is_period_array(ndarray[object] values): return null_count != n +cpdef bint is_interval_array(ndarray[object] values): + cdef: + Py_ssize_t i, n = len(values), null_count = 0 + object v + + if n == 0: + return False + for i in range(n): + v = values[i] + if util._checknull(v): + null_count += 1 + continue + if not is_interval(v): + return False + return null_count != n + + cdef extern from "parse_helper.h": inline int floatify(object, double *result, int *maybe_int) except -1 diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index ed0bb263ed6cf..47679966e3d5c 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -1296,6 +1296,18 @@ cdef class _Timestamp(datetime): return result + property _short_repr: + def __get__(self): + # format a Timestamp with only _date_repr if possible + # otherwise _repr_base + if (self.hour == 0 and + self.minute == 0 and + self.second == 0 and + self.microsecond == 0 and + self.nanosecond == 0): + return self._date_repr + return self._repr_base + property asm8: def __get__(self): return np.datetime64(self.value, 'ns') diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 7fab9295bb94e..5d2db864dd48e 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -19,7 +19,7 @@ is_bool_dtype, needs_i8_conversion, is_categorical, is_datetimetz, is_datetime64_any_dtype, is_datetime64tz_dtype, - is_timedelta64_dtype, + is_timedelta64_dtype, is_interval_dtype, is_scalar, is_list_like, _ensure_platform_int, _ensure_object, _ensure_float64, _ensure_uint64, @@ -605,31 +605,39 @@ def value_counts(values, sort=True, ascending=False, normalize=False, if bins is not None: try: from pandas.tools.tile import cut - values = Series(values).values - cat, bins = cut(values, bins, retbins=True) + values = Series(values) + ii = cut(values, bins, include_lowest=True) except TypeError: raise TypeError("bins argument only works with numeric data.") - values = cat.codes - if is_categorical_dtype(values) or is_sparse(values): + # count, remove nulls (from the index), and but the bins + result = ii.value_counts(dropna=dropna) + result = result[result.index.notnull()] + result.index = result.index.astype('interval') + result = result.sort_index() - # handle Categorical and sparse, - result = Series(values).values.value_counts(dropna=dropna) - result.name = name - counts = result.values + # if we are dropna and we have NO values + if dropna and (result.values == 0).all(): + result = result.iloc[0:0] + + # normalizing is by len of all (regardless of dropna) + counts = np.array([len(ii)]) else: - keys, counts = _value_counts_arraylike(values, dropna) - if not isinstance(keys, Index): - keys = Index(keys) - result = Series(counts, index=keys, name=name) + if is_categorical_dtype(values) or is_sparse(values): - if bins is not None: - # TODO: This next line should be more efficient - result = result.reindex(np.arange(len(cat.categories)), - fill_value=0) - result.index = bins[:-1] + # handle Categorical and sparse, + result = Series(values).values.value_counts(dropna=dropna) + result.name = name + counts = result.values + + else: + keys, counts = _value_counts_arraylike(values, dropna) + + if not isinstance(keys, Index): + keys = Index(keys) + result = Series(counts, index=keys, name=name) if sort: result = result.sort_values(ascending=ascending) @@ -1396,6 +1404,8 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None, allow_fill=allow_fill) elif is_datetimetz(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) + elif is_interval_dtype(arr): + return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) if indexer is None: indexer = np.arange(arr.shape[axis], dtype=np.int64) diff --git a/pandas/core/api.py b/pandas/core/api.py index 65253dedb8b53..ea5be17ef3aaf 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -11,7 +11,8 @@ from pandas.formats.format import set_eng_float_format from pandas.core.index import (Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, - MultiIndex) + MultiIndex, IntervalIndex) +from pandas.indexes.interval import Interval, interval_range from pandas.core.series import Series from pandas.core.frame import DataFrame diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 5591ce4b0d4aa..45a9577c8d8b2 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -18,6 +18,7 @@ from pandas.types.common import (is_numeric_dtype, is_timedelta64_dtype, is_datetime64_dtype, is_categorical_dtype, + is_interval_dtype, is_datetimelike, is_datetime64_any_dtype, is_bool, is_integer_dtype, @@ -40,11 +41,11 @@ from pandas.core.base import (PandasObject, SelectionMixin, GroupByError, DataError, SpecificationError) +from pandas.core.index import (Index, MultiIndex, + CategoricalIndex, _ensure_index) from pandas.core.categorical import Categorical from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame -from pandas.core.index import (Index, MultiIndex, CategoricalIndex, - _ensure_index) from pandas.core.internals import BlockManager, make_block from pandas.core.series import Series from pandas.core.panel import Panel @@ -2660,7 +2661,7 @@ def _convert_grouper(axis, grouper): return grouper.reindex(axis)._values elif isinstance(grouper, (list, Series, Index, np.ndarray)): if len(grouper) != len(axis): - raise AssertionError('Grouper and axis must be same length') + raise ValueError('Grouper and axis must be same length') return grouper else: return grouper @@ -3145,20 +3146,29 @@ def value_counts(self, normalize=False, sort=True, ascending=False, if bins is None: lab, lev = algorithms.factorize(val, sort=True) + llab = lambda lab, inc: lab[inc] else: - cat, bins = cut(val, bins, retbins=True) - # bins[:-1] for backward compat; - # o.w. cat.categories could be better - lab, lev, dropna = cat.codes, bins[:-1], False - sorter = np.lexsort((lab, ids)) + # lab is a Categorical with categories an IntervalIndex + lab = cut(Series(val), bins, include_lowest=True) + lev = lab.cat.categories + lab = lev.take(lab.cat.codes) + llab = lambda lab, inc: lab[inc]._multiindex.labels[-1] + + if is_interval_dtype(lab): + # TODO: should we do this inside II? + sorter = np.lexsort((lab.left, lab.right, ids)) + else: + sorter = np.lexsort((lab, ids)) + ids, lab = ids[sorter], lab[sorter] # group boundaries are where group ids change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] # new values are where sorted labels change - inc = np.r_[True, lab[1:] != lab[:-1]] + lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1)) + inc = np.r_[True, lchanges] inc[idx] = True # group boundaries are also new values out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts @@ -3166,7 +3176,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx)) # multi-index components - labels = list(map(rep, self.grouper.recons_labels)) + [lab[inc]] + labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)] levels = [ping.group_index for ping in self.grouper.groupings] + [lev] names = self.grouper.names + [self._selection_name] diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 9e22bdd5facc4..dd8fa2d3ddc81 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1087,10 +1087,10 @@ def _getitem_iterable(self, key, axis=0): return self.obj.take(inds, axis=axis, convert=False) else: # Have the index compute an indexer or return None - # if it cannot handle + # if it cannot handle; we only act on all found values indexer, keyarr = labels._convert_listlike_indexer( key, kind=self.name) - if indexer is not None: + if indexer is not None and (indexer != -1).all(): return self.obj.take(indexer, axis=axis) # existing labels are unique and indexer are unique @@ -1429,7 +1429,7 @@ def error(): try: key = self._convert_scalar_indexer(key, axis) - if key not in ax: + if not ax.contains(key): error() except TypeError as e: @@ -1897,7 +1897,7 @@ def convert_to_index_sliceable(obj, key): elif isinstance(key, compat.string_types): # we are an actual column - if key in obj._data.items: + if obj._data.items.contains(key): return None # We might have a datetimelike string that we can translate to a diff --git a/pandas/formats/format.py b/pandas/formats/format.py index 66a81aadc4213..907198d98cf5b 100644 --- a/pandas/formats/format.py +++ b/pandas/formats/format.py @@ -15,6 +15,7 @@ is_float_dtype, is_period_arraylike, is_integer_dtype, + is_interval_dtype, is_datetimetz, is_integer, is_float, @@ -575,6 +576,7 @@ def to_string(self): pprint_thing(frame.index))) text = info_line else: + strcols = self._to_str_columns() if self.line_width is None: # no need to wrap around just print # the whole frame @@ -2027,6 +2029,8 @@ def format_array(values, formatter, float_format=None, na_rep='NaN', if is_categorical_dtype(values): fmt_klass = CategoricalArrayFormatter + elif is_interval_dtype(values): + fmt_klass = IntervalArrayFormatter elif is_float_dtype(values.dtype): fmt_klass = FloatArrayFormatter elif is_period_arraylike(values): @@ -2294,6 +2298,17 @@ def _format_strings(self): return fmt_values.tolist() +class IntervalArrayFormatter(GenericArrayFormatter): + + def __init__(self, values, *args, **kwargs): + GenericArrayFormatter.__init__(self, values, *args, **kwargs) + + def _format_strings(self): + formatter = self.formatter or str + fmt_values = np.array([formatter(x) for x in self.values]) + return fmt_values + + class PeriodArrayFormatter(IntArrayFormatter): def _format_strings(self): diff --git a/pandas/indexes/api.py b/pandas/indexes/api.py index a3cb54ca97071..db076b60ab34e 100644 --- a/pandas/indexes/api.py +++ b/pandas/indexes/api.py @@ -3,6 +3,7 @@ InvalidIndexError) from pandas.indexes.category import CategoricalIndex # noqa from pandas.indexes.multi import MultiIndex # noqa +from pandas.indexes.interval import IntervalIndex # noqa from pandas.indexes.numeric import (NumericIndex, Float64Index, # noqa Int64Index, UInt64Index) from pandas.indexes.range import RangeIndex # noqa @@ -13,7 +14,7 @@ # TODO: there are many places that rely on these private methods existing in # pandas.core.index __all__ = ['Index', 'MultiIndex', 'NumericIndex', 'Float64Index', 'Int64Index', - 'CategoricalIndex', 'RangeIndex', 'UInt64Index', + 'CategoricalIndex', 'IntervalIndex', 'RangeIndex', 'UInt64Index', 'InvalidIndexError', '_new_Index', '_ensure_index', '_get_na_value', '_get_combined_index', diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py index ab5c01388e652..00ad4ca71cb9d 100644 --- a/pandas/indexes/base.py +++ b/pandas/indexes/base.py @@ -24,6 +24,7 @@ is_dtype_equal, is_object_dtype, is_categorical_dtype, + is_interval_dtype, is_bool_dtype, is_signed_integer_dtype, is_unsigned_integer_dtype, @@ -49,9 +50,9 @@ from pandas.formats.printing import pprint_thing from pandas.core.ops import _comp_method_OBJECT_ARRAY from pandas.core.strings import StringAccessorMixin - from pandas.core.config import get_option + # simplify default_pprint = lambda x, max_seq_items=None: \ pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True, @@ -138,6 +139,9 @@ class Index(IndexOpsMixin, StringAccessorMixin, PandasObject): _is_numeric_dtype = False _can_hold_na = True + # would we like our indexing holder to defer to us + _defer_to_indexing = False + # prioritize current class for _shallow_copy_with_infer, # used to infer integers as datetime-likes _infer_as_myclass = False @@ -167,6 +171,12 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, from .category import CategoricalIndex return CategoricalIndex(data, copy=copy, name=name, **kwargs) + # interval + if is_interval_dtype(data): + from .interval import IntervalIndex + return IntervalIndex.from_intervals(data, name=name, + copy=copy) + # index-like elif isinstance(data, (np.ndarray, Index, ABCSeries)): @@ -276,6 +286,10 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, elif inferred in ['floating', 'mixed-integer-float']: from .numeric import Float64Index return Float64Index(subarr, copy=copy, name=name) + elif inferred == 'interval': + from .interval import IntervalIndex + return IntervalIndex.from_intervals(subarr, name=name, + copy=copy) elif inferred == 'boolean': # don't support boolean explicity ATM pass @@ -1210,6 +1224,9 @@ def is_object(self): def is_categorical(self): return self.inferred_type in ['categorical'] + def is_interval(self): + return self.inferred_type in ['interval'] + def is_mixed(self): return self.inferred_type in ['mixed'] @@ -1413,11 +1430,6 @@ def _convert_index_indexer(self, keyarr): @Appender(_index_shared_docs['_convert_list_indexer']) def _convert_list_indexer(self, keyarr, kind=None): - """ - passed a key that is tuplesafe that is integer based - and we have a mixed index (e.g. number/labels). figure out - the indexer. return None if we can't help - """ if (kind in [None, 'iloc', 'ix'] and is_integer_dtype(keyarr) and not self.is_floating() and not isinstance(keyarr, ABCPeriodIndex)): @@ -1553,9 +1565,41 @@ def __nonzero__(self): __bool__ = __nonzero__ + _index_shared_docs['__contains__'] = """ + return a boolean if this key is IN the index + + Parameters + ---------- + key : object + + Returns + ------- + boolean + """ + + @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) def __contains__(self, key): hash(key) - # work around some kind of odd cython bug + try: + return key in self._engine + except TypeError: + return False + + _index_shared_docs['contains'] = """ + return a boolean if this key is IN the index + + Parameters + ---------- + key : object + + Returns + ------- + boolean + """ + + @Appender(_index_shared_docs['contains'] % _index_doc_kwargs) + def contains(self, key): + hash(key) try: return key in self._engine except TypeError: @@ -3341,6 +3385,13 @@ def _searchsorted_monotonic(self, label, side='left'): raise ValueError('index must be monotonic increasing or decreasing') + def _get_loc_only_exact_matches(self, key): + """ + This is overriden on subclasses (namely, IntervalIndex) to control + get_slice_bound. + """ + return self.get_loc(key) + def get_slice_bound(self, label, side, kind): """ Calculate slice bound that corresponds to given label. @@ -3370,7 +3421,7 @@ def get_slice_bound(self, label, side, kind): # we need to look up the label try: - slc = self.get_loc(label) + slc = self._get_loc_only_exact_matches(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) @@ -3606,7 +3657,9 @@ def _evaluate_compare(self, other): if needs_i8_conversion(self) and needs_i8_conversion(other): return self._evaluate_compare(other, op) - if is_object_dtype(self) and self.nlevels == 1: + if (is_object_dtype(self) and + self.nlevels == 1): + # don't pass MultiIndex with np.errstate(all='ignore'): result = _comp_method_OBJECT_ARRAY( @@ -3918,6 +3971,8 @@ def _ensure_index(index_like, copy=False): def _get_na_value(dtype): + if is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype): + return libts.NaT return {np.datetime64: libts.NaT, np.timedelta64: libts.NaT}.get(dtype, np.nan) diff --git a/pandas/indexes/category.py b/pandas/indexes/category.py index 7cfc95de5f538..6c57b2ed83705 100644 --- a/pandas/indexes/category.py +++ b/pandas/indexes/category.py @@ -7,7 +7,9 @@ from pandas.types.common import (is_categorical_dtype, _ensure_platform_int, is_list_like, + is_interval_dtype, is_scalar) +from pandas.core.common import _asarray_tuplesafe from pandas.types.missing import array_equivalent @@ -17,7 +19,6 @@ import pandas.core.base as base import pandas.core.missing as missing import pandas.indexes.base as ibase -from pandas.core.common import _asarray_tuplesafe _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update(dict(target_klass='CategoricalIndex')) @@ -261,14 +262,35 @@ def ordered(self): def _reverse_indexer(self): return self._data._reverse_indexer() + @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) def __contains__(self, key): hash(key) + + if self.categories._defer_to_indexing: + return key in self.categories + + return key in self.values + + @Appender(_index_shared_docs['contains'] % _index_doc_kwargs) + def contains(self, key): + hash(key) + + if self.categories._defer_to_indexing: + return self.categories.contains(key) + return key in self.values def __array__(self, dtype=None): """ the array interface, return my values """ return np.array(self._data, dtype=dtype) + @Appender(_index_shared_docs['astype']) + def astype(self, dtype, copy=True): + if is_interval_dtype(dtype): + from pandas import IntervalIndex + return IntervalIndex.from_intervals(np.array(self)) + return super(CategoricalIndex, self).astype(dtype=dtype, copy=copy) + @cache_readonly def _isnan(self): """ return if each value is nan""" @@ -431,8 +453,8 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): method = missing.clean_reindex_fill_method(method) target = ibase._ensure_index(target) - if isinstance(target, CategoricalIndex): - target = target.categories + if self.equals(target): + return np.arange(len(self), dtype='intp') if method == 'pad' or method == 'backfill': raise NotImplementedError("method='pad' and method='backfill' not " @@ -440,10 +462,17 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): elif method == 'nearest': raise NotImplementedError("method='nearest' not implemented yet " 'for CategoricalIndex') - else: + if (isinstance(target, CategoricalIndex) and + self.values.is_dtype_equal(target)): + # we have the same codes + codes = target.codes + else: + if isinstance(target, CategoricalIndex): + target = target.categories codes = self.categories.get_indexer(target) - indexer, _ = self._engine.get_indexer_non_unique(codes) + + indexer, _ = self._engine.get_indexer_non_unique(codes) return _ensure_platform_int(indexer) @@ -457,20 +486,39 @@ def get_indexer_non_unique(self, target): codes = self.categories.get_indexer(target) return self._engine.get_indexer_non_unique(codes) + @Appender(_index_shared_docs['_convert_scalar_indexer']) + def _convert_scalar_indexer(self, key, kind=None): + if self.categories._defer_to_indexing: + return self.categories._convert_scalar_indexer(key, kind=kind) + + return super(CategoricalIndex, self)._convert_scalar_indexer( + key, kind=kind) + @Appender(_index_shared_docs['_convert_list_indexer']) def _convert_list_indexer(self, keyarr, kind=None): # Return our indexer or raise if all of the values are not included in # the categories - codes = self.categories.get_indexer(keyarr) - if (codes == -1).any(): - raise KeyError("a list-indexer must only include values that are " - "in the categories") - return None + if self.categories._defer_to_indexing: + indexer = self.categories._convert_list_indexer(keyarr, kind=kind) + return Index(self.codes).get_indexer_for(indexer) + + indexer = self.categories.get_indexer(keyarr) + if (indexer == -1).any(): + raise KeyError( + "a list-indexer must only " + "include values that are " + "in the categories") + + return self.get_indexer(keyarr) @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): keyarr = _asarray_tuplesafe(keyarr) + + if self.categories._defer_to_indexing: + return keyarr + return self._shallow_copy(keyarr) @Appender(_index_shared_docs['_convert_index_indexer']) @@ -488,6 +536,8 @@ def take(self, indices, axis=0, allow_fill=True, na_value=-1) return self._create_from_codes(taken) + take_nd = take + def map(self, mapper): """Apply mapper function to its categories (not codes). diff --git a/pandas/indexes/interval.py b/pandas/indexes/interval.py new file mode 100644 index 0000000000000..63315ef861d12 --- /dev/null +++ b/pandas/indexes/interval.py @@ -0,0 +1,1062 @@ +""" define the IntervalIndex """ + +import numpy as np + +from pandas.types.missing import notnull, isnull +from pandas.types.generic import ABCPeriodIndex +from pandas.types.dtypes import IntervalDtype +from pandas.types.common import (_ensure_platform_int, + is_list_like, + is_datetime_or_timedelta_dtype, + is_integer_dtype, + is_object_dtype, + is_categorical_dtype, + is_float_dtype, + is_interval_dtype, + is_scalar, + is_integer) +from pandas.indexes.base import (Index, _ensure_index, + default_pprint, _index_shared_docs) + +from pandas._libs import Timestamp, Timedelta +from pandas._libs.interval import (Interval, IntervalMixin, IntervalTree, + intervals_to_interval_bounds) + +from pandas.indexes.multi import MultiIndex +from pandas.compat.numpy import function as nv +from pandas.core import common as com +from pandas.util.decorators import cache_readonly, Appender +from pandas.core.config import get_option + +import pandas.indexes.base as ibase +_index_doc_kwargs = dict(ibase._index_doc_kwargs) +_index_doc_kwargs.update( + dict(klass='IntervalIndex', + target_klass='IntervalIndex or list of Intervals')) + + +_VALID_CLOSED = set(['left', 'right', 'both', 'neither']) + + +def _get_next_label(label): + dtype = getattr(label, 'dtype', type(label)) + if isinstance(label, (Timestamp, Timedelta)): + dtype = 'datetime64' + if is_datetime_or_timedelta_dtype(dtype): + return label + np.timedelta64(1, 'ns') + elif is_integer_dtype(dtype): + return label + 1 + elif is_float_dtype(dtype): + return np.nextafter(label, np.infty) + else: + raise TypeError('cannot determine next label for type %r' + % type(label)) + + +def _get_prev_label(label): + dtype = getattr(label, 'dtype', type(label)) + if isinstance(label, (Timestamp, Timedelta)): + dtype = 'datetime64' + if is_datetime_or_timedelta_dtype(dtype): + return label - np.timedelta64(1, 'ns') + elif is_integer_dtype(dtype): + return label - 1 + elif is_float_dtype(dtype): + return np.nextafter(label, -np.infty) + else: + raise TypeError('cannot determine next label for type %r' + % type(label)) + + +def _get_interval_closed_bounds(interval): + """ + Given an Interval or IntervalIndex, return the corresponding interval with + closed bounds. + """ + left, right = interval.left, interval.right + if interval.open_left: + left = _get_next_label(left) + if interval.open_right: + right = _get_prev_label(right) + return left, right + + +def _new_IntervalIndex(cls, d): + """ This is called upon unpickling, + rather than the default which doesn't + have arguments and breaks __new__ """ + + return cls.from_arrays(**d) + + +class IntervalIndex(IntervalMixin, Index): + """ + Immutable Index implementing an ordered, sliceable set. IntervalIndex + represents an Index of intervals that are all closed on the same side. + + .. versionadded:: 0.20.0 + + Properties + ---------- + left, right : array-like (1-dimensional) + Left and right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, optional + Whether the intervals are closed on the left-side, right-side, both or + neither. Defaults to 'right'. + name : object, optional + Name to be stored in the index. + copy : boolean, default False + Copy the meta-data + """ + _typ = 'intervalindex' + _comparables = ['name'] + _attributes = ['name', 'closed'] + _allow_index_ops = True + + # we would like our indexing holder to defer to us + _defer_to_indexing = True + + _mask = None + + def __new__(cls, data, closed='right', + name=None, copy=False, dtype=None, + fastpath=False, verify_integrity=True): + + if fastpath: + return cls._simple_new(data.left, data.right, closed, name, + copy=copy, verify_integrity=False) + + if name is None and hasattr(data, 'name'): + name = data.name + + if isinstance(data, IntervalIndex): + left = data.left + right = data.right + + else: + + # don't allow scalars + if is_scalar(data): + cls._scalar_data_error(data) + + data = IntervalIndex.from_intervals(data, name=name) + left, right = data.left, data.right + + return cls._simple_new(left, right, closed, name, + copy=copy, verify_integrity=verify_integrity) + + @classmethod + def _simple_new(cls, left, right, closed=None, name=None, + copy=False, verify_integrity=True): + result = IntervalMixin.__new__(cls) + + if closed is None: + closed = 'right' + left = _ensure_index(left, copy=copy) + right = _ensure_index(right, copy=copy) + + # coerce dtypes to match if needed + if is_float_dtype(left) and is_integer_dtype(right): + right = right.astype(left.dtype) + if is_float_dtype(right) and is_integer_dtype(left): + left = left.astype(right.dtype) + + if type(left) != type(right): + raise ValueError("must not have differing left [{}] " + "and right [{}] types".format( + type(left), type(right))) + + if isinstance(left, ABCPeriodIndex): + raise ValueError("Period dtypes are not supported, " + "use a PeriodIndex instead") + + result._left = left + result._right = right + result._closed = closed + result.name = name + if verify_integrity: + result._validate() + result._reset_identity() + return result + + @Appender(_index_shared_docs['_shallow_copy']) + def _shallow_copy(self, left=None, right=None, **kwargs): + if left is None: + + # no values passed + left, right = self.left, self.right + + elif right is None: + + # only single value passed, could be an IntervalIndex + # or array of Intervals + if not isinstance(left, IntervalIndex): + left = type(self).from_intervals(left) + + left, right = left.left, left.right + else: + + # both left and right are values + pass + + attributes = self._get_attributes_dict() + attributes.update(kwargs) + attributes['verify_integrity'] = False + return self._simple_new(left, right, **attributes) + + def _validate(self): + """ + Verify that the IntervalIndex is valid. + """ + if self.closed not in _VALID_CLOSED: + raise ValueError("invalid options for 'closed': %s" % self.closed) + if len(self.left) != len(self.right): + raise ValueError('left and right must have the same length') + left_mask = notnull(self.left) + right_mask = notnull(self.right) + if not (left_mask == right_mask).all(): + raise ValueError('missing values must be missing in the same ' + 'location both left and right sides') + if not (self.left[left_mask] <= self.right[left_mask]).all(): + raise ValueError('left side of interval must be <= right side') + self._mask = ~left_mask + + @cache_readonly + def hasnans(self): + """ return if I have any nans; enables various perf speedups """ + return self._isnan.any() + + @cache_readonly + def _isnan(self): + """ return if each value is nan""" + if self._mask is None: + self._mask = isnull(self.left) + return self._mask + + @cache_readonly + def _engine(self): + return IntervalTree(self.left, self.right, closed=self.closed) + + @property + def _constructor(self): + return type(self).from_intervals + + def __contains__(self, key): + """ + return a boolean if this key is IN the index + We *only* accept an Interval + + Parameters + ---------- + key : Interval + + Returns + ------- + boolean + """ + if not isinstance(key, Interval): + return False + + try: + self.get_loc(key) + return True + except KeyError: + return False + + def contains(self, key): + """ + return a boolean if this key is IN the index + + We accept / allow keys to be not *just* actual + objects. + + Parameters + ---------- + key : int, float, Interval + + Returns + ------- + boolean + """ + try: + self.get_loc(key) + return True + except KeyError: + return False + + @classmethod + def from_breaks(cls, breaks, closed='right', name=None, copy=False): + """ + Construct an IntervalIndex from an array of splits + + Parameters + ---------- + breaks : array-like (1-dimensional) + Left and right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, optional + Whether the intervals are closed on the left-side, right-side, both + or neither. Defaults to 'right'. + name : object, optional + Name to be stored in the index. + copy : boolean, default False + copy the data + + Examples + -------- + + >>> IntervalIndex.from_breaks([0, 1, 2, 3]) + IntervalIndex(left=[0, 1, 2], + right=[1, 2, 3], + closed='right') + """ + breaks = np.asarray(breaks) + return cls.from_arrays(breaks[:-1], breaks[1:], closed, + name=name, copy=copy) + + @classmethod + def from_arrays(cls, left, right, closed='right', name=None, copy=False): + """ + Construct an IntervalIndex from a a left and right array + + Parameters + ---------- + left : array-like (1-dimensional) + Left bounds for each interval. + right : array-like (1-dimensional) + Right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, optional + Whether the intervals are closed on the left-side, right-side, both + or neither. Defaults to 'right'. + name : object, optional + Name to be stored in the index. + copy : boolean, default False + copy the data + + Examples + -------- + + >>> IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3]) + IntervalIndex(left=[0, 1, 2], + right=[1, 2, 3], + closed='right') + """ + left = np.asarray(left) + right = np.asarray(right) + return cls._simple_new(left, right, closed, name=name, + copy=copy, verify_integrity=True) + + @classmethod + def from_intervals(cls, data, name=None, copy=False): + """ + Construct an IntervalIndex from a 1d array of Interval objects + + Parameters + ---------- + data : array-like (1-dimensional) + Array of Interval objects. All intervals must be closed on the same + sides. + name : object, optional + Name to be stored in the index. + copy : boolean, default False + by-default copy the data, this is compat only and ignored + + Examples + -------- + + >>> IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)]) + IntervalIndex(left=[0, 1], + right=[1, 2], + closed='right') + + The generic Index constructor work identically when it infers an array + of all intervals: + + >>> Index([Interval(0, 1), Interval(1, 2)]) + IntervalIndex(left=[0, 1], + right=[1, 2], + closed='right') + """ + data = np.asarray(data) + left, right, closed = intervals_to_interval_bounds(data) + return cls.from_arrays(left, right, closed, name=name, copy=False) + + @classmethod + def from_tuples(cls, data, closed='right', name=None, copy=False): + """ + Construct an IntervalIndex from a list/array of tuples + + Parameters + ---------- + data : array-like (1-dimensional) + Array of tuples + closed : {'left', 'right', 'both', 'neither'}, optional + Whether the intervals are closed on the left-side, right-side, both + or neither. Defaults to 'right'. + name : object, optional + Name to be stored in the index. + copy : boolean, default False + by-default copy the data, this is compat only and ignored + + Examples + -------- + + """ + left = [] + right = [] + for d in data: + + if isnull(d): + left.append(np.nan) + right.append(np.nan) + continue + + l, r = d + left.append(l) + right.append(r) + + # TODO + # if we have nulls and we previous had *only* + # integer data, then we have changed the dtype + + return cls.from_arrays(left, right, closed, name=name, copy=False) + + def to_tuples(self): + return Index(com._asarray_tuplesafe(zip(self.left, self.right))) + + @cache_readonly + def _multiindex(self): + return MultiIndex.from_arrays([self.left, self.right], + names=['left', 'right']) + + @property + def left(self): + return self._left + + @property + def right(self): + return self._right + + @property + def closed(self): + return self._closed + + def __len__(self): + return len(self.left) + + @cache_readonly + def values(self): + """ + Returns the IntervalIndex's data as a numpy array of Interval + objects (with dtype='object') + """ + left = self.left + right = self.right + mask = self._isnan + closed = self._closed + + result = np.empty(len(left), dtype=object) + for i in range(len(left)): + if mask[i]: + result[i] = np.nan + else: + result[i] = Interval(left[i], right[i], closed) + return result + + def __array__(self, result=None): + """ the array interface, return my values """ + return self.values + + def __array_wrap__(self, result, context=None): + # we don't want the superclass implementation + return result + + def _array_values(self): + return self.values + + def __reduce__(self): + d = dict(left=self.left, + right=self.right) + d.update(self._get_attributes_dict()) + return _new_IntervalIndex, (self.__class__, d), None + + @Appender(_index_shared_docs['copy']) + def copy(self, deep=False, name=None): + left = self.left.copy(deep=True) if deep else self.left + right = self.right.copy(deep=True) if deep else self.right + name = name if name is not None else self.name + return type(self).from_arrays(left, right, name=name) + + @Appender(_index_shared_docs['astype']) + def astype(self, dtype, copy=True): + if is_interval_dtype(dtype): + if copy: + self = self.copy() + return self + elif is_object_dtype(dtype): + return Index(self.values, dtype=object) + elif is_categorical_dtype(dtype): + from pandas import Categorical + return Categorical(self, ordered=True) + raise ValueError('Cannot cast IntervalIndex to dtype %s' % dtype) + + @cache_readonly + def dtype(self): + return IntervalDtype.construct_from_string(str(self.left.dtype)) + + @property + def inferred_type(self): + return 'interval' + + @Appender(Index.memory_usage.__doc__) + def memory_usage(self, deep=False): + # we don't use an explict engine + # so return the bytes here + return (self.left.memory_usage(deep=deep) + + self.right.memory_usage(deep=deep)) + + @cache_readonly + def mid(self): + """Returns the mid-point of each interval in the index as an array + """ + try: + return Index(0.5 * (self.left.values + self.right.values)) + except TypeError: + # datetime safe version + delta = self.right.values - self.left.values + return Index(self.left.values + 0.5 * delta) + + @cache_readonly + def is_monotonic(self): + return self._multiindex.is_monotonic + + @cache_readonly + def is_monotonic_increasing(self): + return self._multiindex.is_monotonic_increasing + + @cache_readonly + def is_monotonic_decreasing(self): + return self._multiindex.is_monotonic_decreasing + + @cache_readonly + def is_unique(self): + return self._multiindex.is_unique + + @cache_readonly + def is_non_overlapping_monotonic(self): + # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... ) + # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...) + # we already require left <= right + return ((self.right[:-1] <= self.left[1:]).all() or + (self.left[:-1] >= self.right[1:]).all()) + + @Appender(_index_shared_docs['_convert_scalar_indexer']) + def _convert_scalar_indexer(self, key, kind=None): + if kind == 'iloc': + return super(IntervalIndex, self)._convert_scalar_indexer( + key, kind=kind) + return key + + def _maybe_cast_slice_bound(self, label, side, kind): + return getattr(self, side)._maybe_cast_slice_bound(label, side, kind) + + @Appender(_index_shared_docs['_convert_list_indexer']) + def _convert_list_indexer(self, keyarr, kind=None): + """ + we are passed a list-like indexer. Return the + indexer for matching intervals. + """ + locs = self.get_indexer_for(keyarr) + + # we have missing values + if (locs == -1).any(): + raise KeyError + + return locs + + def _maybe_cast_indexed(self, key): + """ + we need to cast the key, which could be a scalar + or an array-like to the type of our subtype + """ + if isinstance(key, IntervalIndex): + return key + + subtype = self.dtype.subtype + if is_float_dtype(subtype): + if is_integer(key): + key = float(key) + elif isinstance(key, (np.ndarray, Index)): + key = key.astype('float64') + elif is_integer_dtype(subtype): + if is_integer(key): + key = int(key) + + return key + + def _check_method(self, method): + if method is None: + return + + if method in ['bfill', 'backfill', 'pad', 'ffill', 'nearest']: + raise NotImplementedError( + 'method {} not yet implemented for ' + 'IntervalIndex'.format(method)) + + raise ValueError("Invalid fill method") + + def _searchsorted_monotonic(self, label, side, exclude_label=False): + if not self.is_non_overlapping_monotonic: + raise KeyError('can only get slices from an IntervalIndex if ' + 'bounds are non-overlapping and all monotonic ' + 'increasing or decreasing') + + if isinstance(label, IntervalMixin): + raise NotImplementedError + + if ((side == 'left' and self.left.is_monotonic_increasing) or + (side == 'right' and self.left.is_monotonic_decreasing)): + sub_idx = self.right + if self.open_right or exclude_label: + label = _get_next_label(label) + else: + sub_idx = self.left + if self.open_left or exclude_label: + label = _get_prev_label(label) + + return sub_idx._searchsorted_monotonic(label, side) + + def _get_loc_only_exact_matches(self, key): + if isinstance(key, Interval): + + if not self.is_unique: + raise ValueError("cannot index with a slice Interval" + " and a non-unique index") + + # TODO: this expands to a tuple index, see if we can + # do better + return Index(self._multiindex.values).get_loc(key) + raise KeyError + + def _find_non_overlapping_monotonic_bounds(self, key): + if isinstance(key, IntervalMixin): + start = self._searchsorted_monotonic( + key.left, 'left', exclude_label=key.open_left) + stop = self._searchsorted_monotonic( + key.right, 'right', exclude_label=key.open_right) + elif isinstance(key, slice): + # slice + start, stop = key.start, key.stop + if (key.step or 1) != 1: + raise NotImplementedError("cannot slice with a slice step") + if start is None: + start = 0 + else: + start = self._searchsorted_monotonic(start, 'left') + if stop is None: + stop = len(self) + else: + stop = self._searchsorted_monotonic(stop, 'right') + else: + # scalar or index-like + + start = self._searchsorted_monotonic(key, 'left') + stop = self._searchsorted_monotonic(key, 'right') + return start, stop + + def get_loc(self, key, method=None): + self._check_method(method) + + original_key = key + key = self._maybe_cast_indexed(key) + + if self.is_non_overlapping_monotonic: + if isinstance(key, Interval): + left = self._maybe_cast_slice_bound(key.left, 'left', None) + right = self._maybe_cast_slice_bound(key.right, 'right', None) + key = Interval(left, right, key.closed) + else: + key = self._maybe_cast_slice_bound(key, 'left', None) + + start, stop = self._find_non_overlapping_monotonic_bounds(key) + + if start is None or stop is None: + return slice(start, stop) + elif start + 1 == stop: + return start + elif start < stop: + return slice(start, stop) + else: + raise KeyError(original_key) + + else: + # use the interval tree + if isinstance(key, Interval): + left, right = _get_interval_closed_bounds(key) + return self._engine.get_loc_interval(left, right) + else: + return self._engine.get_loc(key) + + def get_value(self, series, key): + if com.is_bool_indexer(key): + loc = key + elif is_list_like(key): + loc = self.get_indexer(key) + elif isinstance(key, slice): + + if not (key.step is None or key.step == 1): + raise ValueError("cannot support not-default " + "step in a slice") + + try: + loc = self.get_loc(key) + except TypeError: + + # we didn't find exact intervals + # or are non-unique + raise ValueError("unable to slice with " + "this key: {}".format(key)) + + else: + loc = self.get_loc(key) + return series.iloc[loc] + + @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) + def get_indexer(self, target, method=None, limit=None, tolerance=None): + + self._check_method(method) + target = _ensure_index(target) + target = self._maybe_cast_indexed(target) + + if self.equals(target): + return np.arange(len(self), dtype='intp') + + if self.is_non_overlapping_monotonic: + start, stop = self._find_non_overlapping_monotonic_bounds(target) + + start_plus_one = start + 1 + if not ((start_plus_one < stop).any()): + return np.where(start_plus_one == stop, start, -1) + + if not self.is_unique: + raise ValueError("cannot handle non-unique indices") + + # IntervalIndex + if isinstance(target, IntervalIndex): + indexer = self._get_reindexer(target) + + # non IntervalIndex + else: + indexer = np.concatenate([self.get_loc(i) for i in target]) + + return _ensure_platform_int(indexer) + + def _get_reindexer(self, target): + """ + Return an indexer for a target IntervalIndex with self + """ + + # find the left and right indexers + lindexer = self._engine.get_indexer(target.left.values) + rindexer = self._engine.get_indexer(target.right.values) + + # we want to return an indexer on the intervals + # however, our keys could provide overlapping of multiple + # intervals, so we iterate thru the indexers and construct + # a set of indexers + + indexer = [] + n = len(self) + + for i, (l, r) in enumerate(zip(lindexer, rindexer)): + + target_value = target[i] + + # matching on the lhs bound + if (l != -1 and + self.closed == 'right' and + target_value.left == self[l].right): + l += 1 + + # matching on the lhs bound + if (r != -1 and + self.closed == 'left' and + target_value.right == self[r].left): + r -= 1 + + # not found + if l == -1 and r == -1: + indexer.append(np.array([-1])) + + elif r == -1: + + indexer.append(np.arange(l, n)) + + elif l == -1: + + # care about left/right closed here + value = self[i] + + # target.closed same as self.closed + if self.closed == target.closed: + if target_value.left < value.left: + indexer.append(np.array([-1])) + continue + + # target.closed == 'left' + elif self.closed == 'right': + if target_value.left <= value.left: + indexer.append(np.array([-1])) + continue + + # target.closed == 'right' + elif self.closed == 'left': + if target_value.left <= value.left: + indexer.append(np.array([-1])) + continue + + indexer.append(np.arange(0, r + 1)) + + else: + indexer.append(np.arange(l, r + 1)) + + return np.concatenate(indexer) + + @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) + def get_indexer_non_unique(self, target): + target = self._maybe_cast_indexed(_ensure_index(target)) + return super(IntervalIndex, self).get_indexer_non_unique(target) + + @Appender(_index_shared_docs['where']) + def where(self, cond, other=None): + if other is None: + other = self._na_value + values = np.where(cond, self.values, other) + return self._shallow_copy(values) + + def delete(self, loc): + new_left = self.left.delete(loc) + new_right = self.right.delete(loc) + return self._shallow_copy(new_left, new_right) + + def insert(self, loc, item): + if not isinstance(item, Interval): + raise ValueError('can only insert Interval objects into an ' + 'IntervalIndex') + if not item.closed == self.closed: + raise ValueError('inserted item must be closed on the same side ' + 'as the index') + new_left = self.left.insert(loc, item.left) + new_right = self.right.insert(loc, item.right) + return self._shallow_copy(new_left, new_right) + + def _as_like_interval_index(self, other, error_msg): + self._assert_can_do_setop(other) + other = _ensure_index(other) + if (not isinstance(other, IntervalIndex) or + self.closed != other.closed): + raise ValueError(error_msg) + return other + + def _append_same_dtype(self, to_concat, name): + """ + assert that we all have the same .closed + we allow a 0-len index here as well + """ + if not len(set([i.closed for i in to_concat if len(i)])) == 1: + msg = ('can only append two IntervalIndex objects ' + 'that are closed on the same side') + raise ValueError(msg) + return super(IntervalIndex, self)._append_same_dtype(to_concat, name) + + @Appender(_index_shared_docs['take'] % _index_doc_kwargs) + def take(self, indices, axis=0, allow_fill=True, + fill_value=None, **kwargs): + nv.validate_take(tuple(), kwargs) + indices = _ensure_platform_int(indices) + left, right = self.left, self.right + + if fill_value is None: + fill_value = self._na_value + mask = indices == -1 + + if not mask.any(): + # we won't change dtype here in this case + # if we don't need + allow_fill = False + + taker = lambda x: x.take(indices, allow_fill=allow_fill, + fill_value=fill_value) + + try: + new_left = taker(left) + new_right = taker(right) + except ValueError: + + # we need to coerce; migth have NA's in an + # interger dtype + new_left = taker(left.astype(float)) + new_right = taker(right.astype(float)) + + return self._shallow_copy(new_left, new_right) + + def __getitem__(self, value): + mask = self._isnan[value] + if is_scalar(mask) and mask: + return self._na_value + + left = self.left[value] + right = self.right[value] + + # scalar + if not isinstance(left, Index): + return Interval(left, right, self.closed) + + return self._shallow_copy(left, right) + + # __repr__ associated methods are based on MultiIndex + + def _format_with_header(self, header, **kwargs): + return header + list(self._format_native_types(**kwargs)) + + def _format_native_types(self, na_rep='', quoting=None, **kwargs): + """ actually format my specific types """ + from pandas.formats.format import IntervalArrayFormatter + return IntervalArrayFormatter(values=self, + na_rep=na_rep, + justify='all').get_result() + + def _format_data(self): + + # TODO: integrate with categorical and make generic + n = len(self) + max_seq_items = min((get_option( + 'display.max_seq_items') or n) // 10, 10) + + formatter = str + + if n == 0: + summary = '[]' + elif n == 1: + first = formatter(self[0]) + summary = '[{}]'.format(first) + elif n == 2: + first = formatter(self[0]) + last = formatter(self[-1]) + summary = '[{}, {}]'.format(first, last) + else: + + if n > max_seq_items: + n = min(max_seq_items // 2, 10) + head = [formatter(x) for x in self[:n]] + tail = [formatter(x) for x in self[-n:]] + summary = '[{} ... {}]'.format(', '.join(head), + ', '.join(tail)) + else: + head = [] + tail = [formatter(x) for x in self] + summary = '[{}]'.format(', '.join(tail)) + + return summary + self._format_space() + + def _format_attrs(self): + attrs = [('closed', repr(self.closed))] + if self.name is not None: + attrs.append(('name', default_pprint(self.name))) + attrs.append(('dtype', "'%s'" % self.dtype)) + return attrs + + def _format_space(self): + return "\n%s" % (' ' * (len(self.__class__.__name__) + 1)) + + def argsort(self, *args, **kwargs): + return np.lexsort((self.right, self.left)) + + def equals(self, other): + + if self.is_(other): + return True + + # if we can coerce to an II + # then we can compare + if not isinstance(other, IntervalIndex): + if not is_interval_dtype(other): + return False + other = Index(getattr(other, '.values', other)) + + return (self.left.equals(other.left) and + self.right.equals(other.right) and + self.closed == other.closed) + + def _setop(op_name): + def func(self, other): + msg = ('can only do set operations between two IntervalIndex ' + 'objects that are closed on the same side') + other = self._as_like_interval_index(other, msg) + result = getattr(self._multiindex, op_name)(other._multiindex) + result_name = self.name if self.name == other.name else None + return type(self).from_tuples(result.values, closed=self.closed, + name=result_name) + return func + + union = _setop('union') + intersection = _setop('intersection') + difference = _setop('difference') + symmetric_differnce = _setop('symmetric_difference') + + # TODO: arithmetic operations + + +IntervalIndex._add_logical_methods_disabled() + + +def interval_range(start=None, end=None, freq=None, periods=None, + name=None, closed='right', **kwargs): + """ + Return a fixed frequency IntervalIndex + + Parameters + ---------- + start : string or datetime-like, default None + Left bound for generating data + end : string or datetime-like, default None + Right bound for generating data + freq : interger, string or DateOffset, default 1 + periods : interger, default None + name : str, default None + Name of the resulting index + closed : string, default 'right' + options are: 'left', 'right', 'both', 'neither' + + Notes + ----- + 2 of start, end, or periods must be specified + + Returns + ------- + rng : IntervalIndex + """ + + if freq is None: + freq = 1 + + if start is None: + if periods is None or end is None: + raise ValueError("must specify 2 of start, end, periods") + start = end - periods * freq + elif end is None: + if periods is None or start is None: + raise ValueError("must specify 2 of start, end, periods") + end = start + periods * freq + elif periods is None: + if start is None or end is None: + raise ValueError("must specify 2 of start, end, periods") + pass + + # must all be same units or None + arr = np.array([start, end, freq]) + if is_object_dtype(arr): + raise ValueError("start, end, freq need to be the same type") + + return IntervalIndex.from_breaks(np.arange(start, end, freq), + name=name, + closed=closed) diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py index 74c45aac8b620..d1c8e0ba1cc4e 100644 --- a/pandas/indexes/multi.py +++ b/pandas/indexes/multi.py @@ -1318,15 +1318,17 @@ def nlevels(self): def levshape(self): return tuple(len(x) for x in self.levels) + @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) def __contains__(self, key): hash(key) - # work around some kind of odd cython bug try: self.get_loc(key) return True except LookupError: return False + contains = __contains__ + def __reduce__(self): """Necessary for making this object picklable""" d = dict(levels=[lev for lev in self.levels], diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 7301c87026114..a15d7cf26cbea 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -49,7 +49,7 @@ class TestPDApi(Base, tm.TestCase): 'Period', 'PeriodIndex', 'RangeIndex', 'UInt64Index', 'Series', 'SparseArray', 'SparseDataFrame', 'SparseSeries', 'TimeGrouper', 'Timedelta', - 'TimedeltaIndex', 'Timestamp'] + 'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex'] # these are already deprecated; awaiting removal deprecated_classes = ['WidePanel', 'Panel4D', @@ -63,7 +63,7 @@ class TestPDApi(Base, tm.TestCase): # top-level functions funcs = ['bdate_range', 'concat', 'crosstab', 'cut', - 'date_range', 'eval', + 'date_range', 'interval_range', 'eval', 'factorize', 'get_dummies', 'infer_freq', 'isnull', 'lreshape', 'melt', 'notnull', 'offsets', diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py index f3fd6332417a1..1d05eda88e265 100644 --- a/pandas/tests/api/test_types.py +++ b/pandas/tests/api/test_types.py @@ -23,7 +23,8 @@ class TestTypes(Base, tm.TestCase): 'is_string_dtype', 'is_signed_integer_dtype', 'is_timedelta64_dtype', 'is_timedelta64_ns_dtype', 'is_unsigned_integer_dtype', 'is_period', - 'is_period_dtype', 'is_re', 'is_re_compilable', + 'is_period_dtype', 'is_interval', 'is_interval_dtype', + 'is_re', 'is_re_compilable', 'is_dict_like', 'is_iterator', 'is_file_like', 'is_list_like', 'is_hashable', 'is_named_tuple', 'is_sequence', diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index e52bfdbd4f837..f05b6fdd6bc23 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -8,7 +8,10 @@ from pandas.compat import lrange from pandas import (DataFrame, Series, Index, MultiIndex, - RangeIndex, date_range) + RangeIndex, date_range, IntervalIndex) +from pandas.types.common import (is_object_dtype, + is_categorical_dtype, + is_interval_dtype) import pandas as pd from pandas.util.testing import (assert_series_equal, @@ -295,6 +298,17 @@ def test_set_index_dst(self): exp = pd.DataFrame({'b': [3, 4, 5]}, index=exp_index) tm.assert_frame_equal(res, exp) + def test_reset_index_with_intervals(self): + idx = pd.IntervalIndex.from_breaks(np.arange(11), name='x') + original = pd.DataFrame({'x': idx, 'y': np.arange(10)})[['x', 'y']] + + result = original.set_index('x') + expected = pd.DataFrame({'y': np.arange(10)}, index=idx) + assert_frame_equal(result, expected) + + result2 = result.reset_index() + assert_frame_equal(result2, original) + def test_set_index_multiindexcolumns(self): columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)]) df = DataFrame(np.random.randn(3, 3), columns=columns) @@ -730,3 +744,53 @@ def test_set_index_preserve_categorical_dtype(self): result = df.set_index(cols).reset_index() result = result.reindex(columns=df.columns) tm.assert_frame_equal(result, df) + + +class TestIntervalIndex(tm.TestCase): + + def test_setitem(self): + + df = DataFrame({'A': range(10)}) + s = pd.cut(df.A, 5) + self.assertIsInstance(s.cat.categories, IntervalIndex) + + # B & D end up as Categoricals + # the remainer are converted to in-line objects + # contining an IntervalIndex.values + df['B'] = s + df['C'] = np.array(s) + df['D'] = s.values + df['E'] = np.array(s.values) + + assert is_categorical_dtype(df['B']) + assert is_interval_dtype(df['B'].cat.categories) + assert is_categorical_dtype(df['D']) + assert is_interval_dtype(df['D'].cat.categories) + + assert is_object_dtype(df['C']) + assert is_object_dtype(df['E']) + + # they compare equal as Index + # when converted to numpy objects + c = lambda x: Index(np.array(x)) + tm.assert_index_equal(c(df.B), c(df.B), check_names=False) + tm.assert_index_equal(c(df.B), c(df.C), check_names=False) + tm.assert_index_equal(c(df.B), c(df.D), check_names=False) + tm.assert_index_equal(c(df.B), c(df.D), check_names=False) + + # B & D are the same Series + tm.assert_series_equal(df['B'], df['B'], check_names=False) + tm.assert_series_equal(df['B'], df['D'], check_names=False) + + # C & E are the same Series + tm.assert_series_equal(df['C'], df['C'], check_names=False) + tm.assert_series_equal(df['C'], df['E'], check_names=False) + + def test_set_reset_index(self): + + df = DataFrame({'A': range(10)}) + s = pd.cut(df.A, 5) + df['B'] = s + df = df.set_index('B') + + df = df.reset_index() diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index 5108fc6080866..97171123c4a36 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -1,12 +1,13 @@ # -*- coding: utf-8 -*- from __future__ import print_function - +import random import numpy as np +import pandas as pd from pandas.compat import lrange from pandas import (DataFrame, Series, MultiIndex, Timestamp, - date_range, NaT) + date_range, NaT, IntervalIndex) from pandas.util.testing import (assert_series_equal, assert_frame_equal, @@ -19,45 +20,6 @@ class TestDataFrameSorting(tm.TestCase, TestData): - def test_sort_index(self): - # GH13496 - - frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4], - columns=['A', 'B', 'C', 'D']) - - # axis=0 : sort rows by index labels - unordered = frame.loc[[3, 2, 4, 1]] - result = unordered.sort_index(axis=0) - expected = frame - assert_frame_equal(result, expected) - - result = unordered.sort_index(ascending=False) - expected = frame[::-1] - assert_frame_equal(result, expected) - - # axis=1 : sort columns by column names - unordered = frame.iloc[:, [2, 1, 3, 0]] - result = unordered.sort_index(axis=1) - assert_frame_equal(result, frame) - - result = unordered.sort_index(axis=1, ascending=False) - expected = frame.iloc[:, ::-1] - assert_frame_equal(result, expected) - - def test_sort_index_multiindex(self): - # GH13496 - - # sort rows by specified level of multi-index - mi = MultiIndex.from_tuples([[2, 1, 3], [1, 1, 1]], names=list('ABC')) - df = DataFrame([[1, 2], [3, 4]], mi) - - # MI sort, but no level: sort_level has no effect - mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) - df = DataFrame([[1, 2], [3, 4]], mi) - result = df.sort_index(sort_remaining=False) - expected = df.sort_index() - assert_frame_equal(result, expected) - def test_sort(self): frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4], columns=['A', 'B', 'C', 'D']) @@ -151,21 +113,6 @@ def test_sort_values_inplace(self): expected = frame.sort_values(by=['A', 'B'], ascending=False) assert_frame_equal(sorted_df, expected) - def test_sort_index_categorical_index(self): - - df = (DataFrame({'A': np.arange(6, dtype='int64'), - 'B': Series(list('aabbca')) - .astype('category', categories=list('cab'))}) - .set_index('B')) - - result = df.sort_index() - expected = df.iloc[[4, 0, 1, 5, 2, 3]] - assert_frame_equal(result, expected) - - result = df.sort_index(ascending=False) - expected = df.iloc[[3, 2, 5, 1, 0, 4]] - assert_frame_equal(result, expected) - def test_sort_nan(self): # GH3917 nan = np.nan @@ -291,8 +238,86 @@ def test_stable_descending_multicolumn_sort(self): kind='mergesort') assert_frame_equal(sorted_df, expected) + def test_sort_datetimes(self): + + # GH 3461, argsort / lexsort differences for a datetime column + df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'], + columns=['A'], + index=date_range('20130101', periods=9)) + dts = [Timestamp(x) + for x in ['2004-02-11', '2004-01-21', '2004-01-26', + '2005-09-20', '2010-10-04', '2009-05-12', + '2008-11-12', '2010-09-28', '2010-09-28']] + df['B'] = dts[::2] + dts[1::2] + df['C'] = 2. + df['A1'] = 3. + + df1 = df.sort_values(by='A') + df2 = df.sort_values(by=['A']) + assert_frame_equal(df1, df2) + + df1 = df.sort_values(by='B') + df2 = df.sort_values(by=['B']) + assert_frame_equal(df1, df2) + + def test_frame_column_inplace_sort_exception(self): + s = self.frame['A'] + with assertRaisesRegexp(ValueError, "This Series is a view"): + s.sort_values(inplace=True) + + cp = s.copy() + cp.sort_values() # it works! + + def test_sort_nat_values_in_int_column(self): + + # GH 14922: "sorting with large float and multiple columns incorrect" + + # cause was that the int64 value NaT was considered as "na". Which is + # only correct for datetime64 columns. + + int_values = (2, int(NaT)) + float_values = (2.0, -1.797693e308) + + df = DataFrame(dict(int=int_values, float=float_values), + columns=["int", "float"]) + + df_reversed = DataFrame(dict(int=int_values[::-1], + float=float_values[::-1]), + columns=["int", "float"], + index=[1, 0]) + + # NaT is not a "na" for int64 columns, so na_position must not + # influence the result: + df_sorted = df.sort_values(["int", "float"], na_position="last") + assert_frame_equal(df_sorted, df_reversed) + + df_sorted = df.sort_values(["int", "float"], na_position="first") + assert_frame_equal(df_sorted, df_reversed) + + # reverse sorting order + df_sorted = df.sort_values(["int", "float"], ascending=False) + assert_frame_equal(df_sorted, df) + + # and now check if NaT is still considered as "na" for datetime64 + # columns: + df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT], + float=float_values), columns=["datetime", "float"]) + + df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")], + float=float_values[::-1]), + columns=["datetime", "float"], + index=[1, 0]) + + df_sorted = df.sort_values(["datetime", "float"], na_position="first") + assert_frame_equal(df_sorted, df_reversed) + + df_sorted = df.sort_values(["datetime", "float"], na_position="last") + assert_frame_equal(df_sorted, df_reversed) + + +class TestDataFrameSortIndexKinds(tm.TestCase, TestData): + def test_sort_index_multicolumn(self): - import random A = np.arange(5).repeat(20) B = np.tile(np.arange(5), 20) random.shuffle(A) @@ -448,78 +473,73 @@ def test_sort_index_level(self): res = df.sort_index(level=['A', 'B'], sort_remaining=False) assert_frame_equal(df, res) - def test_sort_datetimes(self): - - # GH 3461, argsort / lexsort differences for a datetime column - df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'], - columns=['A'], - index=date_range('20130101', periods=9)) - dts = [Timestamp(x) - for x in ['2004-02-11', '2004-01-21', '2004-01-26', - '2005-09-20', '2010-10-04', '2009-05-12', - '2008-11-12', '2010-09-28', '2010-09-28']] - df['B'] = dts[::2] + dts[1::2] - df['C'] = 2. - df['A1'] = 3. - - df1 = df.sort_values(by='A') - df2 = df.sort_values(by=['A']) - assert_frame_equal(df1, df2) - - df1 = df.sort_values(by='B') - df2 = df.sort_values(by=['B']) - assert_frame_equal(df1, df2) - - def test_frame_column_inplace_sort_exception(self): - s = self.frame['A'] - with assertRaisesRegexp(ValueError, "This Series is a view"): - s.sort_values(inplace=True) - - cp = s.copy() - cp.sort_values() # it works! + def test_sort_index_categorical_index(self): - def test_sort_nat_values_in_int_column(self): + df = (DataFrame({'A': np.arange(6, dtype='int64'), + 'B': Series(list('aabbca')) + .astype('category', categories=list('cab'))}) + .set_index('B')) - # GH 14922: "sorting with large float and multiple columns incorrect" + result = df.sort_index() + expected = df.iloc[[4, 0, 1, 5, 2, 3]] + assert_frame_equal(result, expected) - # cause was that the int64 value NaT was considered as "na". Which is - # only correct for datetime64 columns. + result = df.sort_index(ascending=False) + expected = df.iloc[[3, 2, 5, 1, 0, 4]] + assert_frame_equal(result, expected) - int_values = (2, int(NaT)) - float_values = (2.0, -1.797693e308) + def test_sort_index(self): + # GH13496 - df = DataFrame(dict(int=int_values, float=float_values), - columns=["int", "float"]) + frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4], + columns=['A', 'B', 'C', 'D']) - df_reversed = DataFrame(dict(int=int_values[::-1], - float=float_values[::-1]), - columns=["int", "float"], - index=[1, 0]) + # axis=0 : sort rows by index labels + unordered = frame.loc[[3, 2, 4, 1]] + result = unordered.sort_index(axis=0) + expected = frame + assert_frame_equal(result, expected) - # NaT is not a "na" for int64 columns, so na_position must not - # influence the result: - df_sorted = df.sort_values(["int", "float"], na_position="last") - assert_frame_equal(df_sorted, df_reversed) + result = unordered.sort_index(ascending=False) + expected = frame[::-1] + assert_frame_equal(result, expected) - df_sorted = df.sort_values(["int", "float"], na_position="first") - assert_frame_equal(df_sorted, df_reversed) + # axis=1 : sort columns by column names + unordered = frame.iloc[:, [2, 1, 3, 0]] + result = unordered.sort_index(axis=1) + assert_frame_equal(result, frame) - # reverse sorting order - df_sorted = df.sort_values(["int", "float"], ascending=False) - assert_frame_equal(df_sorted, df) + result = unordered.sort_index(axis=1, ascending=False) + expected = frame.iloc[:, ::-1] + assert_frame_equal(result, expected) - # and now check if NaT is still considered as "na" for datetime64 - # columns: - df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT], - float=float_values), columns=["datetime", "float"]) + def test_sort_index_multiindex(self): + # GH13496 - df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")], - float=float_values[::-1]), - columns=["datetime", "float"], - index=[1, 0]) + # sort rows by specified level of multi-index + mi = MultiIndex.from_tuples([[2, 1, 3], [1, 1, 1]], names=list('ABC')) + df = DataFrame([[1, 2], [3, 4]], mi) - df_sorted = df.sort_values(["datetime", "float"], na_position="first") - assert_frame_equal(df_sorted, df_reversed) + # MI sort, but no level: sort_level has no effect + mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) + df = DataFrame([[1, 2], [3, 4]], mi) + result = df.sort_index(sort_remaining=False) + expected = df.sort_index() + assert_frame_equal(result, expected) - df_sorted = df.sort_values(["datetime", "float"], na_position="last") - assert_frame_equal(df_sorted, df_reversed) + def test_sort_index_intervalindex(self): + # this is a de-facto sort via unstack + # confirming that we sort in the order of the bins + y = Series(np.random.randn(100)) + x1 = Series(np.sign(np.random.randn(100))) + x2 = pd.cut(Series(np.random.randn(100)), + bins=[-3, -0.5, 0, 0.5, 3]) + model = pd.concat([y, x1, x2], axis=1, keys=['Y', 'X1', 'X2']) + + result = model.groupby(['X1', 'X2']).mean().unstack() + expected = IntervalIndex.from_tuples( + [(-3.0, -0.5), (-0.5, 0.0), + (0.0, 0.5), (0.5, 3.0)], + closed='right') + result = result.columns.levels[1].categories + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index cfcb531bedab8..68bdc0c6d5112 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -7,7 +7,7 @@ import pandas as pd from pandas import (Index, MultiIndex, CategoricalIndex, - DataFrame, Categorical, Series) + DataFrame, Categorical, Series, Interval) from pandas.util.testing import assert_frame_equal, assert_series_equal import pandas.util.testing as tm from .common import MixIn @@ -519,7 +519,8 @@ def test_groupby_categorical_two_columns(self): res = groups_double_key.agg('mean') nan = np.nan idx = MultiIndex.from_product( - [Categorical(["(1, 2]", "(2, 3]", "(3, 6]"], ordered=True), + [Categorical([Interval(1, 2), Interval(2, 3), + Interval(3, 6)], ordered=True), [1, 2, 3, 4]], names=["cat", "C2"]) exp = DataFrame({"C1": [nan, nan, nan, nan, 3, 3, diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 8f3d8e2307f45..25f89b29021ce 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -864,11 +864,13 @@ def test_get_group_empty_bins(self): bins = [0, 5, 10, 15] g = d.groupby(pd.cut(d[0], bins)) - result = g.get_group('(0, 5]') + # TODO: should prob allow a str of Interval work as well + # IOW '(0, 5]' + result = g.get_group(pd.Interval(0, 5)) expected = DataFrame([3, 1], index=[0, 1]) assert_frame_equal(result, expected) - self.assertRaises(KeyError, lambda: g.get_group('(10, 15]')) + self.assertRaises(KeyError, lambda: g.get_group(pd.Interval(10, 15))) def test_get_group_grouped_by_tuple(self): # GH 8121 diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 08f8f8d48e705..54d47d02c5e8e 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -7,7 +7,8 @@ from pandas import (Series, Index, Float64Index, Int64Index, UInt64Index, RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex, - TimedeltaIndex, PeriodIndex, notnull, isnull) + TimedeltaIndex, PeriodIndex, IntervalIndex, + notnull, isnull) from pandas.types.common import needs_i8_conversion from pandas.util.testing import assertRaisesRegexp from pandas._libs.tslib import iNaT @@ -255,18 +256,21 @@ def test_ensure_copied_data(self): tm.assert_numpy_array_equal(index.values, result.values, check_same='copy') - if not isinstance(index, PeriodIndex): - result = index_type(index.values, copy=False, **init_kwargs) - tm.assert_numpy_array_equal(index.values, result.values, - check_same='same') - tm.assert_numpy_array_equal(index._values, result._values, - check_same='same') - else: + if isinstance(index, PeriodIndex): # .values an object array of Period, thus copied result = index_type(ordinal=index.asi8, copy=False, **init_kwargs) tm.assert_numpy_array_equal(index._values, result._values, check_same='same') + elif isinstance(index, IntervalIndex): + # checked in test_interval.py + pass + else: + result = index_type(index.values, copy=False, **init_kwargs) + tm.assert_numpy_array_equal(index.values, result.values, + check_same='same') + tm.assert_numpy_array_equal(index._values, result._values, + check_same='same') def test_copy_and_deepcopy(self): from copy import copy, deepcopy @@ -377,8 +381,9 @@ def test_memory_usage(self): result2 = index.memory_usage() result3 = index.memory_usage(deep=True) - # RangeIndex doesn't use a hashtable engine - if not isinstance(index, RangeIndex): + # RangeIndex, IntervalIndex + # don't have engines + if not isinstance(index, (RangeIndex, IntervalIndex)): self.assertTrue(result2 > result) if index.inferred_type == 'object': diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index a8197b070b032..cc819ff83b1dd 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -14,7 +14,7 @@ from pandas import (period_range, date_range, Series, DataFrame, Float64Index, Int64Index, CategoricalIndex, DatetimeIndex, TimedeltaIndex, - PeriodIndex) + PeriodIndex, isnull) from pandas.core.index import _get_combined_index from pandas.util.testing import assert_almost_equal from pandas.compat.numpy import np_datetime64_compat @@ -504,7 +504,7 @@ def test_is_(self): def test_asof(self): d = self.dateIndex[0] self.assertEqual(self.dateIndex.asof(d), d) - self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1)))) + self.assertTrue(isnull(self.dateIndex.asof(d - timedelta(1)))) d = self.dateIndex[-1] self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d) diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 0d75ba5f2bd46..f2e409deb2ce4 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -8,7 +8,7 @@ import numpy as np -from pandas import Categorical, compat, notnull +from pandas import Categorical, IntervalIndex, compat, notnull from pandas.util.testing import assert_almost_equal import pandas.core.config as cf import pandas as pd @@ -343,11 +343,26 @@ def test_astype(self): self.assertIsInstance(result, Index) self.assertNotIsInstance(result, CategoricalIndex) + # interval + ii = IntervalIndex.from_arrays(left=[-0.001, 2.0], + right=[2, 4], + closed='right') + + ci = CategoricalIndex(Categorical.from_codes( + [0, 1, -1], categories=ii, ordered=True)) + + result = ci.astype('interval') + expected = ii.take([0, 1, -1]) + tm.assert_index_equal(result, expected) + + result = IntervalIndex.from_intervals(result.values) + tm.assert_index_equal(result, expected) + def test_reindex_base(self): # determined by cat ordering idx = self.create_index() - expected = np.array([4, 0, 1, 5, 2, 3], dtype=np.intp) + expected = np.arange(len(idx), dtype=np.intp) actual = idx.get_indexer(idx) tm.assert_numpy_array_equal(expected, actual) diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py new file mode 100644 index 0000000000000..25ca961895ca3 --- /dev/null +++ b/pandas/tests/indexes/test_interval.py @@ -0,0 +1,798 @@ +from __future__ import division + +import pytest +import numpy as np + +from pandas import (Interval, IntervalIndex, Index, isnull, + interval_range, Timestamp, Timedelta) +from pandas._libs.interval import IntervalTree +from pandas.tests.indexes.common import Base +import pandas.util.testing as tm +import pandas as pd + + +class TestIntervalIndex(Base, tm.TestCase): + _holder = IntervalIndex + + def setUp(self): + self.index = IntervalIndex.from_arrays([0, 1], [1, 2]) + self.index_with_nan = IntervalIndex.from_tuples( + [(0, 1), np.nan, (1, 2)]) + self.indices = dict(intervalIndex=tm.makeIntervalIndex(10)) + + def create_index(self): + return IntervalIndex.from_breaks(np.arange(10)) + + def test_constructors(self): + expected = self.index + actual = IntervalIndex.from_breaks(np.arange(3), closed='right') + self.assertTrue(expected.equals(actual)) + + alternate = IntervalIndex.from_breaks(np.arange(3), closed='left') + self.assertFalse(expected.equals(alternate)) + + actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)]) + self.assertTrue(expected.equals(actual)) + + actual = IntervalIndex([Interval(0, 1), Interval(1, 2)]) + self.assertTrue(expected.equals(actual)) + + actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1, + closed='right') + self.assertTrue(expected.equals(actual)) + + actual = Index([Interval(0, 1), Interval(1, 2)]) + self.assertIsInstance(actual, IntervalIndex) + self.assertTrue(expected.equals(actual)) + + actual = Index(expected) + self.assertIsInstance(actual, IntervalIndex) + self.assertTrue(expected.equals(actual)) + + def test_constructors_other(self): + + # all-nan + result = IntervalIndex.from_intervals([np.nan]) + expected = np.array([np.nan], dtype=object) + tm.assert_numpy_array_equal(result.values, expected) + + # empty + result = IntervalIndex.from_intervals([]) + expected = np.array([], dtype=object) + tm.assert_numpy_array_equal(result.values, expected) + + def test_constructors_errors(self): + + # scalar + with pytest.raises(TypeError): + IntervalIndex(5) + + # not an interval + with pytest.raises(TypeError): + IntervalIndex([0, 1]) + + with pytest.raises(TypeError): + IntervalIndex.from_intervals([0, 1]) + + # invalid closed + with pytest.raises(ValueError): + IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid') + + # mismatched closed + with pytest.raises(ValueError): + IntervalIndex.from_intervals([Interval(0, 1), + Interval(1, 2, closed='left')]) + + with pytest.raises(ValueError): + IntervalIndex.from_arrays([0, 10], [3, 5]) + + with pytest.raises(ValueError): + Index([Interval(0, 1), Interval(2, 3, closed='left')]) + + # no point in nesting periods in an IntervalIndex + with pytest.raises(ValueError): + IntervalIndex.from_breaks( + pd.period_range('2000-01-01', periods=3)) + + def test_constructors_datetimelike(self): + + # DTI / TDI + for idx in [pd.date_range('20130101', periods=5), + pd.timedelta_range('1 day', periods=5)]: + result = IntervalIndex.from_breaks(idx) + expected = IntervalIndex.from_breaks(idx.values) + tm.assert_index_equal(result, expected) + + expected_scalar_type = type(idx[0]) + i = result[0] + self.assertTrue(isinstance(i.left, expected_scalar_type)) + self.assertTrue(isinstance(i.right, expected_scalar_type)) + + def test_constructors_error(self): + + # non-intervals + def f(): + IntervalIndex.from_intervals([0.997, 4.0]) + self.assertRaises(TypeError, f) + + def test_properties(self): + index = self.index + self.assertEqual(len(index), 2) + self.assertEqual(index.size, 2) + self.assertEqual(index.shape, (2, )) + + self.assert_index_equal(index.left, Index([0, 1])) + self.assert_index_equal(index.right, Index([1, 2])) + self.assert_index_equal(index.mid, Index([0.5, 1.5])) + + self.assertEqual(index.closed, 'right') + + expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object) + self.assert_numpy_array_equal(np.asarray(index), expected) + self.assert_numpy_array_equal(index.values, expected) + + # with nans + index = self.index_with_nan + self.assertEqual(len(index), 3) + self.assertEqual(index.size, 3) + self.assertEqual(index.shape, (3, )) + + self.assert_index_equal(index.left, Index([0, np.nan, 1])) + self.assert_index_equal(index.right, Index([1, np.nan, 2])) + self.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5])) + + self.assertEqual(index.closed, 'right') + + expected = np.array([Interval(0, 1), np.nan, + Interval(1, 2)], dtype=object) + self.assert_numpy_array_equal(np.asarray(index), expected) + self.assert_numpy_array_equal(index.values, expected) + + def test_with_nans(self): + index = self.index + self.assertFalse(index.hasnans) + self.assert_numpy_array_equal(index.isnull(), + np.array([False, False])) + self.assert_numpy_array_equal(index.notnull(), + np.array([True, True])) + + index = self.index_with_nan + self.assertTrue(index.hasnans) + self.assert_numpy_array_equal(index.notnull(), + np.array([True, False, True])) + self.assert_numpy_array_equal(index.isnull(), + np.array([False, True, False])) + + def test_copy(self): + actual = self.index.copy() + self.assertTrue(actual.equals(self.index)) + + actual = self.index.copy(deep=True) + self.assertTrue(actual.equals(self.index)) + self.assertIsNot(actual.left, self.index.left) + + def test_ensure_copied_data(self): + # exercise the copy flag in the constructor + + # not copying + index = self.index + result = IntervalIndex(index, copy=False) + tm.assert_numpy_array_equal(index.left.values, result.left.values, + check_same='same') + tm.assert_numpy_array_equal(index.right.values, result.right.values, + check_same='same') + + # by-definition make a copy + result = IntervalIndex.from_intervals(index.values, copy=False) + tm.assert_numpy_array_equal(index.left.values, result.left.values, + check_same='copy') + tm.assert_numpy_array_equal(index.right.values, result.right.values, + check_same='copy') + + def test_equals(self): + + idx = self.index + self.assertTrue(idx.equals(idx)) + self.assertTrue(idx.equals(idx.copy())) + + self.assertFalse(idx.equals(idx.astype(object))) + self.assertFalse(idx.equals(np.array(idx))) + self.assertFalse(idx.equals(list(idx))) + + self.assertFalse(idx.equals([1, 2])) + self.assertFalse(idx.equals(np.array([1, 2]))) + self.assertFalse(idx.equals( + pd.date_range('20130101', periods=2))) + + def test_astype(self): + + idx = self.index + + for dtype in [np.int64, np.float64, 'datetime64[ns]', + 'datetime64[ns, US/Eastern]', 'timedelta64', + 'period[M]']: + self.assertRaises(ValueError, idx.astype, dtype) + + result = idx.astype(object) + tm.assert_index_equal(result, Index(idx.values, dtype='object')) + self.assertFalse(idx.equals(result)) + self.assertTrue(idx.equals(IntervalIndex.from_intervals(result))) + + result = idx.astype('interval') + tm.assert_index_equal(result, idx) + self.assertTrue(result.equals(idx)) + + result = idx.astype('category') + expected = pd.Categorical(idx, ordered=True) + tm.assert_categorical_equal(result, expected) + + def test_where(self): + expected = self.index + result = self.index.where(self.index.notnull()) + tm.assert_index_equal(result, expected) + + idx = IntervalIndex.from_breaks([1, 2]) + result = idx.where([True, False]) + expected = IntervalIndex.from_intervals( + [Interval(1.0, 2.0, closed='right'), np.nan]) + tm.assert_index_equal(result, expected) + + def test_where_array_like(self): + pass + + def test_delete(self): + expected = IntervalIndex.from_breaks([1, 2]) + actual = self.index.delete(0) + self.assertTrue(expected.equals(actual)) + + def test_insert(self): + expected = IntervalIndex.from_breaks(range(4)) + actual = self.index.insert(2, Interval(2, 3)) + self.assertTrue(expected.equals(actual)) + + self.assertRaises(ValueError, self.index.insert, 0, 1) + self.assertRaises(ValueError, self.index.insert, 0, + Interval(2, 3, closed='left')) + + def test_take(self): + actual = self.index.take([0, 1]) + self.assertTrue(self.index.equals(actual)) + + expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2]) + actual = self.index.take([0, 0, 1]) + self.assertTrue(expected.equals(actual)) + + def test_monotonic_and_unique(self): + self.assertTrue(self.index.is_monotonic) + self.assertTrue(self.index.is_unique) + + idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)]) + self.assertTrue(idx.is_monotonic) + self.assertTrue(idx.is_unique) + + idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (1, 2)]) + self.assertFalse(idx.is_monotonic) + self.assertTrue(idx.is_unique) + + idx = IntervalIndex.from_tuples([(0, 2), (0, 2)]) + self.assertFalse(idx.is_unique) + self.assertTrue(idx.is_monotonic) + + @pytest.mark.xfail(reason='not a valid repr as we use interval notation') + def test_repr(self): + i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right') + expected = ("IntervalIndex(left=[0, 1]," + "\n right=[1, 2]," + "\n closed='right'," + "\n dtype='interval[int64]')") + self.assertEqual(repr(i), expected) + + i = IntervalIndex.from_tuples((Timestamp('20130101'), + Timestamp('20130102')), + (Timestamp('20130102'), + Timestamp('20130103')), + closed='right') + expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02']," + "\n right=['2013-01-02', '2013-01-03']," + "\n closed='right'," + "\n dtype='interval[datetime64[ns]]')") + self.assertEqual(repr(i), expected) + + @pytest.mark.xfail(reason='not a valid repr as we use interval notation') + def test_repr_max_seq_item_setting(self): + super(TestIntervalIndex, self).test_repr_max_seq_item_setting() + + @pytest.mark.xfail(reason='not a valid repr as we use interval notation') + def test_repr_roundtrip(self): + super(TestIntervalIndex, self).test_repr_roundtrip() + + def test_get_item(self): + i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), + closed='right') + assert i[0] == Interval(0.0, 1.0) + assert i[1] == Interval(1.0, 2.0) + assert isnull(i[2]) + + result = i[0:1] + expected = IntervalIndex.from_arrays((0.,), (1.,), closed='right') + tm.assert_index_equal(result, expected) + + result = i[0:2] + expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed='right') + tm.assert_index_equal(result, expected) + + result = i[1:3] + expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan), + closed='right') + tm.assert_index_equal(result, expected) + + def test_get_loc_value(self): + self.assertRaises(KeyError, self.index.get_loc, 0) + self.assertEqual(self.index.get_loc(0.5), 0) + self.assertEqual(self.index.get_loc(1), 0) + self.assertEqual(self.index.get_loc(1.5), 1) + self.assertEqual(self.index.get_loc(2), 1) + self.assertRaises(KeyError, self.index.get_loc, -1) + self.assertRaises(KeyError, self.index.get_loc, 3) + + idx = IntervalIndex.from_tuples([(0, 2), (1, 3)]) + self.assertEqual(idx.get_loc(0.5), 0) + self.assertEqual(idx.get_loc(1), 0) + self.assert_numpy_array_equal(idx.get_loc(1.5), + np.array([0, 1], dtype='int64')) + self.assert_numpy_array_equal(np.sort(idx.get_loc(2)), + np.array([0, 1], dtype='int64')) + self.assertEqual(idx.get_loc(3), 1) + self.assertRaises(KeyError, idx.get_loc, 3.5) + + idx = IntervalIndex.from_arrays([0, 2], [1, 3]) + self.assertRaises(KeyError, idx.get_loc, 1.5) + + def slice_locs_cases(self, breaks): + # TODO: same tests for more index types + index = IntervalIndex.from_breaks([0, 1, 2], closed='right') + self.assertEqual(index.slice_locs(), (0, 2)) + self.assertEqual(index.slice_locs(0, 1), (0, 1)) + self.assertEqual(index.slice_locs(1, 1), (0, 1)) + self.assertEqual(index.slice_locs(0, 2), (0, 2)) + self.assertEqual(index.slice_locs(0.5, 1.5), (0, 2)) + self.assertEqual(index.slice_locs(0, 0.5), (0, 1)) + self.assertEqual(index.slice_locs(start=1), (0, 2)) + self.assertEqual(index.slice_locs(start=1.2), (1, 2)) + self.assertEqual(index.slice_locs(end=1), (0, 1)) + self.assertEqual(index.slice_locs(end=1.1), (0, 2)) + self.assertEqual(index.slice_locs(end=1.0), (0, 1)) + self.assertEqual(*index.slice_locs(-1, -1)) + + index = IntervalIndex.from_breaks([0, 1, 2], closed='neither') + self.assertEqual(index.slice_locs(0, 1), (0, 1)) + self.assertEqual(index.slice_locs(0, 2), (0, 2)) + self.assertEqual(index.slice_locs(0.5, 1.5), (0, 2)) + self.assertEqual(index.slice_locs(1, 1), (1, 1)) + self.assertEqual(index.slice_locs(1, 2), (1, 2)) + + index = IntervalIndex.from_breaks([0, 1, 2], closed='both') + self.assertEqual(index.slice_locs(1, 1), (0, 2)) + self.assertEqual(index.slice_locs(1, 2), (0, 2)) + + def test_slice_locs_int64(self): + self.slice_locs_cases([0, 1, 2]) + + def test_slice_locs_float64(self): + self.slice_locs_cases([0.0, 1.0, 2.0]) + + def slice_locs_decreasing_cases(self, tuples): + index = IntervalIndex.from_tuples(tuples) + self.assertEqual(index.slice_locs(1.5, 0.5), (1, 3)) + self.assertEqual(index.slice_locs(2, 0), (1, 3)) + self.assertEqual(index.slice_locs(2, 1), (1, 3)) + self.assertEqual(index.slice_locs(3, 1.1), (0, 3)) + self.assertEqual(index.slice_locs(3, 3), (0, 2)) + self.assertEqual(index.slice_locs(3.5, 3.3), (0, 1)) + self.assertEqual(index.slice_locs(1, -3), (2, 3)) + self.assertEqual(*index.slice_locs(-1, -1)) + + def test_slice_locs_decreasing_int64(self): + self.slice_locs_cases([(2, 4), (1, 3), (0, 2)]) + + def test_slice_locs_decreasing_float64(self): + self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)]) + + def test_slice_locs_fails(self): + index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)]) + with self.assertRaises(KeyError): + index.slice_locs(1, 2) + + def test_get_loc_interval(self): + self.assertEqual(self.index.get_loc(Interval(0, 1)), 0) + self.assertEqual(self.index.get_loc(Interval(0, 0.5)), 0) + self.assertEqual(self.index.get_loc(Interval(0, 1, 'left')), 0) + self.assertRaises(KeyError, self.index.get_loc, Interval(2, 3)) + self.assertRaises(KeyError, self.index.get_loc, + Interval(-1, 0, 'left')) + + def test_get_indexer(self): + actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3]) + expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + actual = self.index.get_indexer(self.index) + expected = np.array([0, 1], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + index = IntervalIndex.from_breaks([0, 1, 2], closed='left') + actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3]) + expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + actual = self.index.get_indexer(index[:1]) + expected = np.array([0], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + actual = self.index.get_indexer(index) + expected = np.array([-1, 1], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + def test_get_indexer_subintervals(self): + + # TODO: is this right? + # return indexers for wholly contained subintervals + target = IntervalIndex.from_breaks(np.linspace(0, 2, 5)) + actual = self.index.get_indexer(target) + expected = np.array([0, 0, 1, 1], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2]) + actual = self.index.get_indexer(target) + expected = np.array([0, 0, 1, 1], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + actual = self.index.get_indexer(target[[0, -1]]) + expected = np.array([0, 1], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left') + actual = self.index.get_indexer(target) + expected = np.array([0, 0, 0], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + def test_contains(self): + # only endpoints are valid + i = IntervalIndex.from_arrays([0, 1], [1, 2]) + + # invalid + self.assertNotIn(0, i) + self.assertNotIn(1, i) + self.assertNotIn(2, i) + + # valid + self.assertIn(Interval(0, 1), i) + self.assertIn(Interval(0, 2), i) + self.assertIn(Interval(0, 0.5), i) + self.assertNotIn(Interval(3, 5), i) + self.assertNotIn(Interval(-1, 0, closed='left'), i) + + def testcontains(self): + # can select values that are IN the range of a value + i = IntervalIndex.from_arrays([0, 1], [1, 2]) + + assert i.contains(0.1) + assert i.contains(0.5) + assert i.contains(1) + assert i.contains(Interval(0, 1)) + assert i.contains(Interval(0, 2)) + + # these overlaps completely + assert i.contains(Interval(0, 3)) + assert i.contains(Interval(1, 3)) + + assert not i.contains(20) + assert not i.contains(-20) + + def test_dropna(self): + + expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)]) + + ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan]) + result = ii.dropna() + tm.assert_index_equal(result, expected) + + ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan]) + result = ii.dropna() + tm.assert_index_equal(result, expected) + + def test_non_contiguous(self): + index = IntervalIndex.from_tuples([(0, 1), (2, 3)]) + target = [0.5, 1.5, 2.5] + actual = index.get_indexer(target) + expected = np.array([0, -1, 1], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + self.assertNotIn(1.5, index) + + def test_union(self): + other = IntervalIndex.from_arrays([2], [3]) + expected = IntervalIndex.from_arrays(range(3), range(1, 4)) + actual = self.index.union(other) + self.assertTrue(expected.equals(actual)) + + actual = other.union(self.index) + self.assertTrue(expected.equals(actual)) + + tm.assert_index_equal(self.index.union(self.index), self.index) + tm.assert_index_equal(self.index.union(self.index[:1]), + self.index) + + def test_intersection(self): + other = IntervalIndex.from_breaks([1, 2, 3]) + expected = IntervalIndex.from_breaks([1, 2]) + actual = self.index.intersection(other) + self.assertTrue(expected.equals(actual)) + + tm.assert_index_equal(self.index.intersection(self.index), + self.index) + + def test_difference(self): + tm.assert_index_equal(self.index.difference(self.index[:1]), + self.index[1:]) + + def test_symmetric_difference(self): + result = self.index[:1].symmetric_difference(self.index[1:]) + expected = self.index + tm.assert_index_equal(result, expected) + + def test_set_operation_errors(self): + self.assertRaises(ValueError, self.index.union, self.index.left) + + other = IntervalIndex.from_breaks([0, 1, 2], closed='neither') + self.assertRaises(ValueError, self.index.union, other) + + def test_isin(self): + actual = self.index.isin(self.index) + self.assert_numpy_array_equal(np.array([True, True]), actual) + + actual = self.index.isin(self.index[:1]) + self.assert_numpy_array_equal(np.array([True, False]), actual) + + def test_comparison(self): + actual = Interval(0, 1) < self.index + expected = np.array([False, True]) + self.assert_numpy_array_equal(actual, expected) + + actual = Interval(0.5, 1.5) < self.index + expected = np.array([False, True]) + self.assert_numpy_array_equal(actual, expected) + actual = self.index > Interval(0.5, 1.5) + self.assert_numpy_array_equal(actual, expected) + + actual = self.index == self.index + expected = np.array([True, True]) + self.assert_numpy_array_equal(actual, expected) + actual = self.index <= self.index + self.assert_numpy_array_equal(actual, expected) + actual = self.index >= self.index + self.assert_numpy_array_equal(actual, expected) + + actual = self.index < self.index + expected = np.array([False, False]) + self.assert_numpy_array_equal(actual, expected) + actual = self.index > self.index + self.assert_numpy_array_equal(actual, expected) + + actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left') + self.assert_numpy_array_equal(actual, expected) + + actual = self.index == self.index.values + self.assert_numpy_array_equal(actual, np.array([True, True])) + actual = self.index.values == self.index + self.assert_numpy_array_equal(actual, np.array([True, True])) + actual = self.index <= self.index.values + self.assert_numpy_array_equal(actual, np.array([True, True])) + actual = self.index != self.index.values + self.assert_numpy_array_equal(actual, np.array([False, False])) + actual = self.index > self.index.values + self.assert_numpy_array_equal(actual, np.array([False, False])) + actual = self.index.values > self.index + self.assert_numpy_array_equal(actual, np.array([False, False])) + + # invalid comparisons + actual = self.index == 0 + self.assert_numpy_array_equal(actual, np.array([False, False])) + actual = self.index == self.index.left + self.assert_numpy_array_equal(actual, np.array([False, False])) + + with self.assertRaisesRegexp(TypeError, 'unorderable types'): + self.index > 0 + with self.assertRaisesRegexp(TypeError, 'unorderable types'): + self.index <= 0 + with self.assertRaises(TypeError): + self.index > np.arange(2) + with self.assertRaises(ValueError): + self.index > np.arange(3) + + def test_missing_values(self): + idx = pd.Index([np.nan, pd.Interval(0, 1), pd.Interval(1, 2)]) + idx2 = pd.IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2]) + assert idx.equals(idx2) + + with pytest.raises(ValueError): + IntervalIndex.from_arrays([np.nan, 0, 1], np.array([0, 1, 2])) + + self.assert_numpy_array_equal(isnull(idx), + np.array([True, False, False])) + + def test_sort_values(self): + expected = IntervalIndex.from_breaks([1, 2, 3, 4]) + actual = IntervalIndex.from_tuples([(3, 4), (1, 2), + (2, 3)]).sort_values() + tm.assert_index_equal(expected, actual) + + # nan + idx = self.index_with_nan + mask = idx.isnull() + self.assert_numpy_array_equal(mask, np.array([False, True, False])) + + result = idx.sort_values() + mask = result.isnull() + self.assert_numpy_array_equal(mask, np.array([False, False, True])) + + result = idx.sort_values(ascending=False) + mask = result.isnull() + self.assert_numpy_array_equal(mask, np.array([True, False, False])) + + def test_datetime(self): + dates = pd.date_range('2000', periods=3) + idx = IntervalIndex.from_breaks(dates) + + tm.assert_index_equal(idx.left, dates[:2]) + tm.assert_index_equal(idx.right, dates[-2:]) + + expected = pd.date_range('2000-01-01T12:00', periods=2) + tm.assert_index_equal(idx.mid, expected) + + self.assertNotIn(pd.Timestamp('2000-01-01T12'), idx) + self.assertNotIn(pd.Timestamp('2000-01-01T12'), idx) + + target = pd.date_range('1999-12-31T12:00', periods=7, freq='12H') + actual = idx.get_indexer(target) + expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='int64') + self.assert_numpy_array_equal(actual, expected) + + def test_append(self): + + index1 = IntervalIndex.from_arrays([0, 1], [1, 2]) + index2 = IntervalIndex.from_arrays([1, 2], [2, 3]) + + result = index1.append(index2) + expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3]) + tm.assert_index_equal(result, expected) + + result = index1.append([index1, index2]) + expected = IntervalIndex.from_arrays([0, 1, 0, 1, 1, 2], + [1, 2, 1, 2, 2, 3]) + tm.assert_index_equal(result, expected) + + def f(): + index1.append(IntervalIndex.from_arrays([0, 1], [1, 2], + closed='both')) + + self.assertRaises(ValueError, f) + + +class TestIntervalRange(tm.TestCase): + + def test_construction(self): + result = interval_range(0, 5, name='foo', closed='both') + expected = IntervalIndex.from_breaks( + np.arange(0, 5), name='foo', closed='both') + tm.assert_index_equal(result, expected) + + def test_errors(self): + + # not enough params + def f(): + interval_range(0) + + self.assertRaises(ValueError, f) + + def f(): + interval_range(periods=2) + + self.assertRaises(ValueError, f) + + def f(): + interval_range() + + self.assertRaises(ValueError, f) + + # mixed units + def f(): + interval_range(0, Timestamp('20130101'), freq=2) + + self.assertRaises(ValueError, f) + + def f(): + interval_range(0, 10, freq=Timedelta('1day')) + + self.assertRaises(ValueError, f) + + +class TestIntervalTree(tm.TestCase): + def setUp(self): + gentree = lambda dtype: IntervalTree(np.arange(5, dtype=dtype), + np.arange(5, dtype=dtype) + 2) + self.tree = gentree('int64') + self.trees = {dtype: gentree(dtype) + for dtype in ['int32', 'int64', 'float32', 'float64']} + + def test_get_loc(self): + for dtype, tree in self.trees.items(): + self.assert_numpy_array_equal(tree.get_loc(1), + np.array([0], dtype='int64')) + self.assert_numpy_array_equal(np.sort(tree.get_loc(2)), + np.array([0, 1], dtype='int64')) + with self.assertRaises(KeyError): + tree.get_loc(-1) + + def test_get_indexer(self): + for dtype, tree in self.trees.items(): + self.assert_numpy_array_equal( + tree.get_indexer(np.array([1.0, 5.5, 6.5])), + np.array([0, 4, -1], dtype='int64')) + with self.assertRaises(KeyError): + tree.get_indexer(np.array([3.0])) + + def test_get_indexer_non_unique(self): + indexer, missing = self.tree.get_indexer_non_unique( + np.array([1.0, 2.0, 6.5])) + self.assert_numpy_array_equal(indexer[:1], + np.array([0], dtype='int64')) + self.assert_numpy_array_equal(np.sort(indexer[1:3]), + np.array([0, 1], dtype='int64')) + self.assert_numpy_array_equal(np.sort(indexer[3:]), + np.array([-1], dtype='int64')) + self.assert_numpy_array_equal(missing, np.array([2], dtype='int64')) + + def test_duplicates(self): + tree = IntervalTree([0, 0, 0], [1, 1, 1]) + self.assert_numpy_array_equal(np.sort(tree.get_loc(0.5)), + np.array([0, 1, 2], dtype='int64')) + + with self.assertRaises(KeyError): + tree.get_indexer(np.array([0.5])) + + indexer, missing = tree.get_indexer_non_unique(np.array([0.5])) + self.assert_numpy_array_equal(np.sort(indexer), + np.array([0, 1, 2], dtype='int64')) + self.assert_numpy_array_equal(missing, np.array([], dtype='int64')) + + def test_get_loc_closed(self): + for closed in ['left', 'right', 'both', 'neither']: + tree = IntervalTree([0], [1], closed=closed) + for p, errors in [(0, tree.open_left), + (1, tree.open_right)]: + if errors: + with self.assertRaises(KeyError): + tree.get_loc(p) + else: + self.assert_numpy_array_equal(tree.get_loc(p), + np.array([0], dtype='int64')) + + def test_get_indexer_closed(self): + x = np.arange(1000, dtype='int64') + found = x + not_found = (-1 * np.ones(1000)).astype('int64') + for leaf_size in [1, 10, 100, 10000]: + for closed in ['left', 'right', 'both', 'neither']: + tree = IntervalTree(x, x + 0.5, closed=closed, + leaf_size=leaf_size) + self.assert_numpy_array_equal(found, + tree.get_indexer(x + 0.25)) + + expected = found if tree.closed_left else not_found + self.assert_numpy_array_equal(expected, + tree.get_indexer(x + 0.0)) + + expected = found if tree.closed_right else not_found + self.assert_numpy_array_equal(expected, + tree.get_indexer(x + 0.5)) diff --git a/pandas/tests/indexing/test_interval.py b/pandas/tests/indexing/test_interval.py new file mode 100644 index 0000000000000..bccc21ed6c086 --- /dev/null +++ b/pandas/tests/indexing/test_interval.py @@ -0,0 +1,245 @@ +import pytest +import numpy as np +import pandas as pd + +from pandas import Series, DataFrame, IntervalIndex, Interval +import pandas.util.testing as tm + + +class TestIntervalIndex(tm.TestCase): + + def setUp(self): + self.s = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6))) + + def test_loc_with_scalar(self): + + s = self.s + expected = 0 + + result = s.loc[0.5] + assert result == expected + + result = s.loc[1] + assert result == expected + + with pytest.raises(KeyError): + s.loc[0] + + expected = s.iloc[:3] + tm.assert_series_equal(expected, s.loc[:3]) + tm.assert_series_equal(expected, s.loc[:2.5]) + tm.assert_series_equal(expected, s.loc[0.1:2.5]) + tm.assert_series_equal(expected, s.loc[-1:3]) + + expected = s.iloc[1:4] + tm.assert_series_equal(expected, s.loc[[1.5, 2.5, 3.5]]) + tm.assert_series_equal(expected, s.loc[[2, 3, 4]]) + tm.assert_series_equal(expected, s.loc[[1.5, 3, 4]]) + + expected = s.iloc[2:5] + tm.assert_series_equal(expected, s.loc[s >= 2]) + + def test_getitem_with_scalar(self): + + s = self.s + expected = 0 + + result = s[0.5] + assert result == expected + + result = s[1] + assert result == expected + + with pytest.raises(KeyError): + s[0] + + expected = s.iloc[:3] + tm.assert_series_equal(expected, s[:3]) + tm.assert_series_equal(expected, s[:2.5]) + tm.assert_series_equal(expected, s[0.1:2.5]) + tm.assert_series_equal(expected, s[-1:3]) + + expected = s.iloc[1:4] + tm.assert_series_equal(expected, s[[1.5, 2.5, 3.5]]) + tm.assert_series_equal(expected, s[[2, 3, 4]]) + tm.assert_series_equal(expected, s[[1.5, 3, 4]]) + + expected = s.iloc[2:5] + tm.assert_series_equal(expected, s[s >= 2]) + + def test_with_interval(self): + + s = self.s + expected = 0 + + result = s.loc[Interval(0, 1)] + assert result == expected + + result = s[Interval(0, 1)] + assert result == expected + + expected = s.iloc[3:5] + result = s.loc[Interval(3, 6)] + tm.assert_series_equal(expected, result) + + expected = s.iloc[3:5] + result = s.loc[[Interval(3, 6)]] + tm.assert_series_equal(expected, result) + + expected = s.iloc[3:5] + result = s.loc[[Interval(3, 5)]] + tm.assert_series_equal(expected, result) + + # missing + with pytest.raises(KeyError): + s.loc[Interval(-2, 0)] + + with pytest.raises(KeyError): + s[Interval(-2, 0)] + + with pytest.raises(KeyError): + s.loc[Interval(5, 6)] + + with pytest.raises(KeyError): + s[Interval(5, 6)] + + def test_with_slices(self): + + s = self.s + + # slice of interval + with pytest.raises(NotImplementedError): + result = s.loc[Interval(3, 6):] + + with pytest.raises(NotImplementedError): + result = s[Interval(3, 6):] + + expected = s.iloc[3:5] + result = s[[Interval(3, 6)]] + tm.assert_series_equal(expected, result) + + # slice of scalar with step != 1 + with pytest.raises(ValueError): + s[0:4:2] + + def test_with_overlaps(self): + + s = self.s + expected = s.iloc[[3, 4, 3, 4]] + result = s.loc[[Interval(3, 6), Interval(3, 6)]] + tm.assert_series_equal(expected, result) + + idx = IntervalIndex.from_tuples([(1, 5), (3, 7)]) + s = Series(range(len(idx)), index=idx) + + result = s[4] + expected = s + tm.assert_series_equal(expected, result) + + result = s[[4]] + expected = s + tm.assert_series_equal(expected, result) + + result = s.loc[[4]] + expected = s + tm.assert_series_equal(expected, result) + + result = s[Interval(3, 5)] + expected = s + tm.assert_series_equal(expected, result) + + result = s.loc[Interval(3, 5)] + expected = s + tm.assert_series_equal(expected, result) + + # doesn't intersect unique set of intervals + with pytest.raises(KeyError): + s[[Interval(3, 5)]] + + with pytest.raises(KeyError): + s.loc[[Interval(3, 5)]] + + def test_non_unique(self): + + idx = IntervalIndex.from_tuples([(1, 3), (3, 7)]) + + s = pd.Series(range(len(idx)), index=idx) + + result = s.loc[Interval(1, 3)] + assert result == 0 + + result = s.loc[[Interval(1, 3)]] + expected = s.iloc[0:1] + tm.assert_series_equal(expected, result) + + def test_non_unique_moar(self): + + idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)]) + s = Series(range(len(idx)), index=idx) + + result = s.loc[Interval(1, 3)] + expected = s.iloc[[0, 1]] + tm.assert_series_equal(expected, result) + + # non-unique index and slices not allowed + with pytest.raises(ValueError): + s.loc[Interval(1, 3):] + + with pytest.raises(ValueError): + s[Interval(1, 3):] + + # non-unique + with pytest.raises(ValueError): + s[[Interval(1, 3)]] + + def test_non_matching(self): + s = self.s + + # this is a departure from our current + # indexin scheme, but simpler + with pytest.raises(KeyError): + s.loc[[-1, 3, 4, 5]] + + with pytest.raises(KeyError): + s.loc[[-1, 3]] + + def test_large_series(self): + s = Series(np.arange(1000000), + index=IntervalIndex.from_breaks(np.arange(1000001))) + + result1 = s.loc[:80000] + result2 = s.loc[0:80000] + result3 = s.loc[0:80000:1] + tm.assert_series_equal(result1, result2) + tm.assert_series_equal(result1, result3) + + def test_loc_getitem_frame(self): + + df = DataFrame({'A': range(10)}) + s = pd.cut(df.A, 5) + df['B'] = s + df = df.set_index('B') + + result = df.loc[4] + expected = df.iloc[4:6] + tm.assert_frame_equal(result, expected) + + with pytest.raises(KeyError): + df.loc[10] + + # single list-like + result = df.loc[[4]] + expected = df.iloc[4:6] + tm.assert_frame_equal(result, expected) + + # non-unique + result = df.loc[[4, 5]] + expected = df.take([4, 5, 4, 5]) + tm.assert_frame_equal(result, expected) + + with pytest.raises(KeyError): + df.loc[[10]] + + # partial missing + with pytest.raises(KeyError): + df.loc[[10, 4]] diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/test_interval.py new file mode 100644 index 0000000000000..63e57fb472861 --- /dev/null +++ b/pandas/tests/scalar/test_interval.py @@ -0,0 +1,129 @@ +from __future__ import division + +import pytest +from pandas import Interval +import pandas.util.testing as tm + + +class TestInterval(tm.TestCase): + def setUp(self): + self.interval = Interval(0, 1) + + def test_properties(self): + self.assertEqual(self.interval.closed, 'right') + self.assertEqual(self.interval.left, 0) + self.assertEqual(self.interval.right, 1) + self.assertEqual(self.interval.mid, 0.5) + + def test_repr(self): + self.assertEqual(repr(self.interval), + "Interval(0, 1, closed='right')") + self.assertEqual(str(self.interval), "(0, 1]") + + interval_left = Interval(0, 1, closed='left') + self.assertEqual(repr(interval_left), + "Interval(0, 1, closed='left')") + self.assertEqual(str(interval_left), "[0, 1)") + + def test_contains(self): + self.assertIn(0.5, self.interval) + self.assertIn(1, self.interval) + self.assertNotIn(0, self.interval) + self.assertRaises(TypeError, lambda: self.interval in self.interval) + + interval = Interval(0, 1, closed='both') + self.assertIn(0, interval) + self.assertIn(1, interval) + + interval = Interval(0, 1, closed='neither') + self.assertNotIn(0, interval) + self.assertIn(0.5, interval) + self.assertNotIn(1, interval) + + def test_equal(self): + self.assertEqual(Interval(0, 1), Interval(0, 1, closed='right')) + self.assertNotEqual(Interval(0, 1), Interval(0, 1, closed='left')) + self.assertNotEqual(Interval(0, 1), 0) + + def test_comparison(self): + with self.assertRaisesRegexp(TypeError, 'unorderable types'): + Interval(0, 1) < 2 + + self.assertTrue(Interval(0, 1) < Interval(1, 2)) + self.assertTrue(Interval(0, 1) < Interval(0, 2)) + self.assertTrue(Interval(0, 1) < Interval(0.5, 1.5)) + self.assertTrue(Interval(0, 1) <= Interval(0, 1)) + self.assertTrue(Interval(0, 1) > Interval(-1, 2)) + self.assertTrue(Interval(0, 1) >= Interval(0, 1)) + + def test_hash(self): + # should not raise + hash(self.interval) + + def test_math_add(self): + expected = Interval(1, 2) + actual = self.interval + 1 + self.assertEqual(expected, actual) + + expected = Interval(1, 2) + actual = 1 + self.interval + self.assertEqual(expected, actual) + + actual = self.interval + actual += 1 + self.assertEqual(expected, actual) + + with pytest.raises(TypeError): + self.interval + Interval(1, 2) + + with pytest.raises(TypeError): + self.interval + 'foo' + + def test_math_sub(self): + expected = Interval(-1, 0) + actual = self.interval - 1 + self.assertEqual(expected, actual) + + actual = self.interval + actual -= 1 + self.assertEqual(expected, actual) + + with pytest.raises(TypeError): + self.interval - Interval(1, 2) + + with pytest.raises(TypeError): + self.interval - 'foo' + + def test_math_mult(self): + expected = Interval(0, 2) + actual = self.interval * 2 + self.assertEqual(expected, actual) + + expected = Interval(0, 2) + actual = 2 * self.interval + self.assertEqual(expected, actual) + + actual = self.interval + actual *= 2 + self.assertEqual(expected, actual) + + with pytest.raises(TypeError): + self.interval * Interval(1, 2) + + with pytest.raises(TypeError): + self.interval * 'foo' + + def test_math_div(self): + expected = Interval(0, 0.5) + actual = self.interval / 2.0 + self.assertEqual(expected, actual) + + actual = self.interval + actual /= 2.0 + self.assertEqual(expected, actual) + + with pytest.raises(TypeError): + self.interval / Interval(1, 2) + + with pytest.raises(TypeError): + self.interval / 'foo' diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 8ad07afcacfcc..f4297208b2e26 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -10,8 +10,7 @@ from pandas.types.common import is_categorical_dtype, is_datetime64tz_dtype from pandas import (Index, Series, isnull, date_range, - period_range, NaT) -from pandas.core.index import MultiIndex + NaT, period_range, MultiIndex, IntervalIndex) from pandas.tseries.index import Timestamp, DatetimeIndex from pandas._libs import lib @@ -543,6 +542,17 @@ def test_constructor_with_datetime_tz(self): expected = Series(pd.DatetimeIndex(['NaT', 'NaT'], tz='US/Eastern')) assert_series_equal(s, expected) + def test_construction_interval(self): + # construction from interval & array of intervals + index = IntervalIndex.from_breaks(np.arange(3), closed='right') + result = Series(index) + repr(result) + str(result) + tm.assert_index_equal(Index(result.values), index) + + result = Series(index.values) + tm.assert_index_equal(Index(result.values), index) + def test_construction_consistency(self): # make sure that we are not re-localizing upon construction diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index ea49abeee21c5..4a3332c2de6d8 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import (Series, DataFrame, isnull, date_range, - MultiIndex, Index, Timestamp, NaT) + MultiIndex, Index, Timestamp, NaT, IntervalIndex) from pandas.compat import range from pandas._libs.tslib import iNaT from pandas.util.testing import assert_series_equal, assert_frame_equal @@ -556,6 +556,15 @@ def test_dropna_no_nan(self): s2.dropna(inplace=True) self.assert_series_equal(s2, s) + def test_dropna_intervals(self): + s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays( + [np.nan, 0, 1, 2], + [np.nan, 1, 2, 3])) + + result = s.dropna() + expected = s.iloc[1:] + assert_series_equal(result, expected) + def test_valid(self): ts = self.ts.copy() ts[::2] = np.NaN diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py index 66ecba960ae0b..26c51ec976f74 100644 --- a/pandas/tests/series/test_sorting.py +++ b/pandas/tests/series/test_sorting.py @@ -3,9 +3,9 @@ import numpy as np import random -from pandas import (DataFrame, Series, MultiIndex) +from pandas import DataFrame, Series, MultiIndex, IntervalIndex -from pandas.util.testing import (assert_series_equal, assert_almost_equal) +from pandas.util.testing import assert_series_equal, assert_almost_equal import pandas.util.testing as tm from .common import TestData @@ -177,3 +177,18 @@ def test_sort_index_na_position(self): expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan]) index_sorted_series = series.sort_index(na_position='last') assert_series_equal(expected_series_last, index_sorted_series) + + def test_sort_index_intervals(self): + s = Series([np.nan, 1, 2, 3], IntervalIndex.from_arrays( + [0, 1, 2, 3], + [1, 2, 3, 4])) + + result = s.sort_index() + expected = s + assert_series_equal(result, expected) + + result = s.sort_index(ascending=False) + expected = Series([3, 2, 1, np.nan], IntervalIndex.from_arrays( + [3, 2, 1, 0], + [4, 3, 2, 1])) + assert_series_equal(result, expected) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index d9f81968c684d..cd1ec915d3aeb 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1,20 +1,20 @@ # -*- coding: utf-8 -*- -from pandas.compat import range import numpy as np from numpy.random import RandomState from numpy import nan from datetime import datetime from itertools import permutations -from pandas import (Series, Categorical, CategoricalIndex, Index, - Timestamp, DatetimeIndex) +from pandas import (Series, Categorical, CategoricalIndex, + Timestamp, DatetimeIndex, + Index, IntervalIndex) import pandas as pd from pandas import compat from pandas._libs import (groupby as libgroupby, algos as libalgos, hashtable) from pandas._libs.hashtable import unique_label_indices -from pandas.compat import lrange +from pandas.compat import lrange, range import pandas.core.algorithms as algos import pandas.util.testing as tm from pandas.compat.numpy import np_array_datetime64_compat @@ -588,24 +588,27 @@ def test_value_counts(self): arr = np.random.randn(4) factor = cut(arr, 4) - tm.assertIsInstance(factor, Categorical) + # tm.assertIsInstance(factor, n) result = algos.value_counts(factor) - cats = ['(-1.194, -0.535]', '(-0.535, 0.121]', '(0.121, 0.777]', - '(0.777, 1.433]'] - expected_index = CategoricalIndex(cats, cats, ordered=True) - expected = Series([1, 1, 1, 1], index=expected_index) + breaks = [-1.194, -0.535, 0.121, 0.777, 1.433] + expected_index = pd.IntervalIndex.from_breaks( + breaks).astype('category') + expected = Series([1, 1, 1, 1], + index=expected_index) tm.assert_series_equal(result.sort_index(), expected.sort_index()) def test_value_counts_bins(self): s = [1, 2, 3, 4] result = algos.value_counts(s, bins=1) - self.assertEqual(result.tolist(), [4]) - self.assertEqual(result.index[0], 0.997) + expected = Series([4], + index=IntervalIndex.from_tuples([(0.996, 4.0)])) + tm.assert_series_equal(result, expected) result = algos.value_counts(s, bins=2, sort=False) - self.assertEqual(result.tolist(), [2, 2]) - self.assertEqual(result.index[0], 0.997) - self.assertEqual(result.index[1], 2.5) + expected = Series([2, 2], + index=IntervalIndex.from_tuples([(0.996, 2.5), + (2.5, 4.0)])) + tm.assert_series_equal(result, expected) def test_value_counts_dtypes(self): result = algos.value_counts([1, 1.]) @@ -657,6 +660,7 @@ def test_categorical(self): result = s.value_counts() expected = Series([3, 2, 1], index=pd.CategoricalIndex(['a', 'b', 'c'])) + tm.assert_series_equal(result, expected, check_index_type=True) # preserve order? @@ -670,12 +674,13 @@ def test_categorical_nans(self): s.iloc[1] = np.nan result = s.value_counts() expected = Series([4, 3, 2], index=pd.CategoricalIndex( + ['a', 'b', 'c'], categories=['a', 'b', 'c'])) tm.assert_series_equal(result, expected, check_index_type=True) result = s.value_counts(dropna=False) expected = Series([ 4, 3, 2, 1 - ], index=pd.CategoricalIndex(['a', 'b', 'c', np.nan])) + ], index=CategoricalIndex(['a', 'b', 'c', np.nan])) tm.assert_series_equal(result, expected, check_index_type=True) # out of order diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 032e3a186b84a..4a1cf6314aaed 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -13,7 +13,7 @@ needs_i8_conversion) import pandas.util.testing as tm from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex, PeriodIndex, - Timedelta) + Timedelta, IntervalIndex, Interval) from pandas.compat import StringIO from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.base import PandasDelegate, NoNewAttributesMixin @@ -575,10 +575,10 @@ def test_value_counts_bins(self): s1 = Series([1, 1, 2, 3]) res1 = s1.value_counts(bins=1) - exp1 = Series({0.998: 4}) + exp1 = Series({Interval(0.997, 3.0): 4}) tm.assert_series_equal(res1, exp1) res1n = s1.value_counts(bins=1, normalize=True) - exp1n = Series({0.998: 1.0}) + exp1n = Series({Interval(0.997, 3.0): 1.0}) tm.assert_series_equal(res1n, exp1n) if isinstance(s1, Index): @@ -589,18 +589,20 @@ def test_value_counts_bins(self): self.assertEqual(s1.nunique(), 3) - res4 = s1.value_counts(bins=4) - exp4 = Series({0.998: 2, - 1.5: 1, - 2.0: 0, - 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0]) + # these return the same + res4 = s1.value_counts(bins=4, dropna=True) + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) + exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2])) tm.assert_series_equal(res4, exp4) + + res4 = s1.value_counts(bins=4, dropna=False) + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) + exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2])) + tm.assert_series_equal(res4, exp4) + res4n = s1.value_counts(bins=4, normalize=True) - exp4n = Series( - {0.998: 0.5, - 1.5: 0.25, - 2.0: 0.0, - 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0]) + exp4n = Series([0.5, 0.25, 0.25, 0], + index=intervals.take([0, 3, 1, 2])) tm.assert_series_equal(res4n, exp4n) # handle NA's properly diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index adacbb95f5162..dd370f0a20c2e 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -21,7 +21,8 @@ Timestamp, CategoricalIndex, isnull, date_range, DatetimeIndex, period_range, PeriodIndex, - timedelta_range, TimedeltaIndex, NaT) + timedelta_range, TimedeltaIndex, NaT, + Interval, IntervalIndex) from pandas.compat import range, lrange, u, PY3 from pandas.core.config import option_context @@ -121,6 +122,16 @@ def test_constructor_unsortable(self): self.assertRaises( TypeError, lambda: Categorical(arr, ordered=True)) + def test_constructor_interval(self): + result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)], + ordered=True) + ii = IntervalIndex.from_intervals([Interval(1, 2), + Interval(2, 3), + Interval(3, 6)]) + exp = Categorical(ii, ordered=True) + self.assert_categorical_equal(result, exp) + tm.assert_index_equal(result.categories, ii) + def test_is_equal_dtype(self): # test dtype comparisons between cats @@ -1598,10 +1609,11 @@ def setUp(self): df = DataFrame({'value': np.random.randint(0, 10000, 100)}) labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)] + cat_labels = Categorical(labels, labels) df = df.sort_values(by=['value'], ascending=True) - df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False, - labels=labels) + df['value_group'] = pd.cut(df.value, range(0, 10500, 500), + right=False, labels=cat_labels) self.cat = df def test_dtypes(self): @@ -2008,9 +2020,10 @@ def test_series_functions_no_warnings(self): def test_assignment_to_dataframe(self): # assignment - df = DataFrame({'value': np.array(np.random.randint(0, 10000, 100), - dtype='int32')}) - labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)] + df = DataFrame({'value': np.array( + np.random.randint(0, 10000, 100), dtype='int32')}) + labels = Categorical(["{0} - {1}".format(i, i + 499) + for i in range(0, 10000, 500)]) df = df.sort_values(by=['value'], ascending=True) s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels) @@ -3007,7 +3020,7 @@ def f(x): # GH 9603 df = pd.DataFrame({'a': [1, 0, 0, 0]}) - c = pd.cut(df.a, [0, 1, 2, 3, 4]) + c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=pd.Categorical(list('abcd'))) result = df.groupby(c).apply(len) exp_index = pd.CategoricalIndex(c.values.categories, @@ -3124,7 +3137,7 @@ def test_slicing(self): df = DataFrame({'value': (np.arange(100) + 1).astype('int64')}) df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100]) - expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10) + expected = Series([11, Interval(0, 25)], index=['value', 'D'], name=10) result = df.iloc[10] tm.assert_series_equal(result, expected) @@ -3134,7 +3147,7 @@ def test_slicing(self): result = df.iloc[10:20] tm.assert_frame_equal(result, expected) - expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8) + expected = Series([9, Interval(0, 25)], index=['value', 'D'], name=8) result = df.loc[8] tm.assert_series_equal(result, expected) diff --git a/pandas/tests/tools/test_tile.py b/pandas/tests/tools/test_tile.py index cc80c1ff5db29..742568870c3c3 100644 --- a/pandas/tests/tools/test_tile.py +++ b/pandas/tests/tools/test_tile.py @@ -3,21 +3,20 @@ import numpy as np from pandas.compat import zip -from pandas import Series, Index, Categorical +from pandas import (Series, Index, isnull, + to_datetime, DatetimeIndex, Timestamp, + Interval, IntervalIndex, Categorical, + cut, qcut, date_range) import pandas.util.testing as tm -from pandas.util.testing import assertRaisesRegexp -import pandas.core.common as com from pandas.core.algorithms import quantile -from pandas.tools.tile import cut, qcut import pandas.tools.tile as tmod -from pandas import to_datetime, DatetimeIndex, Timestamp class TestCut(tm.TestCase): def test_simple(self): - data = np.ones(5) + data = np.ones(5, dtype='int64') result = cut(data, 4, labels=False) expected = np.array([1, 1, 1, 1, 1]) tm.assert_numpy_array_equal(result, expected, @@ -27,34 +26,62 @@ def test_bins(self): data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]) result, bins = cut(data, 3, retbins=True) - exp_codes = np.array([0, 0, 0, 1, 2, 0], dtype=np.int8) - tm.assert_numpy_array_equal(result.codes, exp_codes) - exp = np.array([0.1905, 3.36666667, 6.53333333, 9.7]) - tm.assert_almost_equal(bins, exp) + intervals = IntervalIndex.from_breaks(bins.round(3)) + expected = intervals.take([0, 0, 0, 1, 2, 0]).astype('category') + tm.assert_categorical_equal(result, expected) + tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667, + 6.53333333, 9.7])) def test_right(self): data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) result, bins = cut(data, 4, right=True, retbins=True) - exp_codes = np.array([0, 0, 0, 2, 3, 0, 0], dtype=np.int8) - tm.assert_numpy_array_equal(result.codes, exp_codes) - exp = np.array([0.1905, 2.575, 4.95, 7.325, 9.7]) - tm.assert_numpy_array_equal(bins, exp) + intervals = IntervalIndex.from_breaks(bins.round(3)) + expected = intervals.astype('category').take([0, 0, 0, 2, 3, 0, 0]) + tm.assert_categorical_equal(result, expected) + tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, + 7.325, 9.7])) def test_noright(self): data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) result, bins = cut(data, 4, right=False, retbins=True) - exp_codes = np.array([0, 0, 0, 2, 3, 0, 1], dtype=np.int8) - tm.assert_numpy_array_equal(result.codes, exp_codes) - exp = np.array([0.2, 2.575, 4.95, 7.325, 9.7095]) - tm.assert_almost_equal(bins, exp) + intervals = IntervalIndex.from_breaks(bins.round(3), closed='left') + expected = intervals.take([0, 0, 0, 2, 3, 0, 1]).astype('category') + tm.assert_categorical_equal(result, expected) + tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, + 7.325, 9.7095])) def test_arraylike(self): data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1] result, bins = cut(data, 3, retbins=True) - exp_codes = np.array([0, 0, 0, 1, 2, 0], dtype=np.int8) - tm.assert_numpy_array_equal(result.codes, exp_codes) - exp = np.array([0.1905, 3.36666667, 6.53333333, 9.7]) - tm.assert_almost_equal(bins, exp) + intervals = IntervalIndex.from_breaks(bins.round(3)) + expected = intervals.take([0, 0, 0, 1, 2, 0]).astype('category') + tm.assert_categorical_equal(result, expected) + tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667, + 6.53333333, 9.7])) + + def test_bins_from_intervalindex(self): + c = cut(range(5), 3) + expected = c + result = cut(range(5), bins=expected.categories) + tm.assert_categorical_equal(result, expected) + + expected = Categorical.from_codes(np.append(c.codes, -1), + categories=c.categories, + ordered=True) + result = cut(range(6), bins=expected.categories) + tm.assert_categorical_equal(result, expected) + + # doc example + # make sure we preserve the bins + ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60]) + c = cut(ages, bins=[0, 18, 35, 70]) + expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)]) + tm.assert_index_equal(c.categories, expected) + + result = cut([25, 20, 50], bins=c.categories) + tm.assert_index_equal(result.categories, expected) + tm.assert_numpy_array_equal(result.codes, + np.array([1, 1, 2], dtype='int8')) def test_bins_not_monotonic(self): data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1] @@ -82,14 +109,13 @@ def test_labels(self): arr = np.tile(np.arange(0, 1.01, 0.1), 4) result, bins = cut(arr, 4, retbins=True) - ex_levels = Index(['(-0.001, 0.25]', '(0.25, 0.5]', '(0.5, 0.75]', - '(0.75, 1]']) - self.assert_index_equal(result.categories, ex_levels) + ex_levels = IntervalIndex.from_breaks([-1e-3, 0.25, 0.5, 0.75, 1]) + tm.assert_index_equal(result.categories, ex_levels) result, bins = cut(arr, 4, retbins=True, right=False) - ex_levels = Index(['[0, 0.25)', '[0.25, 0.5)', '[0.5, 0.75)', - '[0.75, 1.001)']) - self.assert_index_equal(result.categories, ex_levels) + ex_levels = IntervalIndex.from_breaks([0, 0.25, 0.5, 0.75, 1 + 1e-3], + closed='left') + tm.assert_index_equal(result.categories, ex_levels) def test_cut_pass_series_name_to_factor(self): s = Series(np.random.randn(100), name='foo') @@ -101,9 +127,9 @@ def test_label_precision(self): arr = np.arange(0, 0.73, 0.01) result = cut(arr, 4, precision=2) - ex_levels = Index(['(-0.00072, 0.18]', '(0.18, 0.36]', - '(0.36, 0.54]', '(0.54, 0.72]']) - self.assert_index_equal(result.categories, ex_levels) + ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, + 0.54, 0.72]) + tm.assert_index_equal(result.categories, ex_levels) def test_na_handling(self): arr = np.arange(0, 0.75, 0.01) @@ -113,39 +139,43 @@ def test_na_handling(self): result_arr = np.asarray(result) - ex_arr = np.where(com.isnull(arr), np.nan, result_arr) + ex_arr = np.where(isnull(arr), np.nan, result_arr) tm.assert_almost_equal(result_arr, ex_arr) result = cut(arr, 4, labels=False) - ex_result = np.where(com.isnull(arr), np.nan, result) + ex_result = np.where(isnull(arr), np.nan, result) tm.assert_almost_equal(result, ex_result) def test_inf_handling(self): data = np.arange(6) data_ser = Series(data, dtype='int64') - result = cut(data, [-np.inf, 2, 4, np.inf]) - result_ser = cut(data_ser, [-np.inf, 2, 4, np.inf]) + bins = [-np.inf, 2, 4, np.inf] + result = cut(data, bins) + result_ser = cut(data_ser, bins) - ex_categories = Index(['(-inf, 2]', '(2, 4]', '(4, inf]']) - - tm.assert_index_equal(result.categories, ex_categories) - tm.assert_index_equal(result_ser.cat.categories, ex_categories) - self.assertEqual(result[5], '(4, inf]') - self.assertEqual(result[0], '(-inf, 2]') - self.assertEqual(result_ser[5], '(4, inf]') - self.assertEqual(result_ser[0], '(-inf, 2]') + ex_uniques = IntervalIndex.from_breaks(bins) + tm.assert_index_equal(result.categories, ex_uniques) + self.assertEqual(result[5], Interval(4, np.inf)) + self.assertEqual(result[0], Interval(-np.inf, 2)) + self.assertEqual(result_ser[5], Interval(4, np.inf)) + self.assertEqual(result_ser[0], Interval(-np.inf, 2)) def test_qcut(self): arr = np.random.randn(1000) + # we store the bins as Index that have been rounded + # to comparisions are a bit tricky labels, bins = qcut(arr, 4, retbins=True) ex_bins = quantile(arr, [0, .25, .5, .75, 1.]) - tm.assert_almost_equal(bins, ex_bins) + result = labels.categories.left.values + self.assertTrue(np.allclose(result, ex_bins[:-1], atol=1e-2)) + result = labels.categories.right.values + self.assertTrue(np.allclose(result, ex_bins[1:], atol=1e-2)) ex_levels = cut(arr, ex_bins, include_lowest=True) - self.assert_categorical_equal(labels, ex_levels) + tm.assert_categorical_equal(labels, ex_levels) def test_qcut_bounds(self): arr = np.random.randn(1000) @@ -161,15 +191,15 @@ def test_qcut_specify_quantiles(self): tm.assert_categorical_equal(factor, expected) def test_qcut_all_bins_same(self): - assertRaisesRegexp(ValueError, "edges.*unique", qcut, - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3) + tm.assertRaisesRegexp(ValueError, "edges.*unique", qcut, + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3) def test_cut_out_of_bounds(self): arr = np.random.randn(100) result = cut(arr, [-1, 0, 1]) - mask = result.codes == -1 + mask = isnull(result) ex_mask = (arr < -1) | (arr > 1) self.assert_numpy_array_equal(mask, ex_mask) @@ -179,30 +209,41 @@ def test_cut_pass_labels(self): labels = ['Small', 'Medium', 'Large'] result = cut(arr, bins, labels=labels) + exp = Categorical(['Medium'] + 4 * ['Small'] + ['Medium', 'Large'], + ordered=True) + self.assert_categorical_equal(result, exp) - exp = cut(arr, bins) - exp.categories = labels - - tm.assert_categorical_equal(result, exp) + result = cut(arr, bins, labels=Categorical.from_codes([0, 1, 2], + labels)) + exp = Categorical.from_codes([1] + 4 * [0] + [1, 2], labels) + self.assert_categorical_equal(result, exp) def test_qcut_include_lowest(self): values = np.arange(10) - cats = qcut(values, 4) + ii = qcut(values, 4) - ex_levels = ['[0, 2.25]', '(2.25, 4.5]', '(4.5, 6.75]', '(6.75, 9]'] - self.assertTrue((cats.categories == ex_levels).all()) + ex_levels = IntervalIndex.from_intervals( + [Interval(-0.001, 2.25), + Interval(2.25, 4.5), + Interval(4.5, 6.75), + Interval(6.75, 9)]) + tm.assert_index_equal(ii.categories, ex_levels) def test_qcut_nas(self): arr = np.random.randn(100) arr[:20] = np.nan result = qcut(arr, 4) - self.assertTrue(com.isnull(result[:20]).all()) + self.assertTrue(isnull(result[:20]).all()) - def test_label_formatting(self): - self.assertEqual(tmod._trim_zeros('1.000'), '1') + def test_qcut_index(self): + result = qcut([0, 2], 2) + expected = Index([Interval(-0.001, 1), Interval(1, 2)]).astype( + 'category') + self.assert_categorical_equal(result, expected) + def test_round_frac(self): # it works result = cut(np.arange(11.), 2) @@ -210,10 +251,15 @@ def test_label_formatting(self): # #1979, negative numbers - result = tmod._format_label(-117.9998, precision=3) - self.assertEqual(result, '-118') - result = tmod._format_label(117.9998, precision=3) - self.assertEqual(result, '118') + result = tmod._round_frac(-117.9998, precision=3) + self.assertEqual(result, -118) + result = tmod._round_frac(117.9998, precision=3) + self.assertEqual(result, 118) + + result = tmod._round_frac(117.9998, precision=2) + self.assertEqual(result, 118) + result = tmod._round_frac(0.000123456, precision=2) + self.assertEqual(result, 0.00012) def test_qcut_binning_issues(self): # #1978, 1979 @@ -224,9 +270,9 @@ def test_qcut_binning_issues(self): starts = [] ends = [] - for lev in result.categories: - s, e = lev[1:-1].split(',') - + for lev in np.unique(result): + s = lev.left + e = lev.right self.assertTrue(s != e) starts.append(float(s)) @@ -238,44 +284,47 @@ def test_qcut_binning_issues(self): self.assertTrue(ep < en) self.assertTrue(ep <= sn) - def test_cut_return_categorical(self): + def test_cut_return_intervals(self): s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8]) res = cut(s, 3) - exp = Series(Categorical.from_codes([0, 0, 0, 1, 1, 1, 2, 2, 2], - ["(-0.008, 2.667]", - "(2.667, 5.333]", "(5.333, 8]"], - ordered=True)) + exp_bins = np.linspace(0, 8, num=4).round(3) + exp_bins[0] -= 0.008 + exp = Series(IntervalIndex.from_breaks(exp_bins, closed='right').take( + [0, 0, 0, 1, 1, 1, 2, 2, 2])).astype('category', ordered=True) tm.assert_series_equal(res, exp) - def test_qcut_return_categorical(self): + def test_qcut_return_intervals(self): s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8]) res = qcut(s, [0, 0.333, 0.666, 1]) - exp = Series(Categorical.from_codes([0, 0, 0, 1, 1, 1, 2, 2, 2], - ["[0, 2.664]", - "(2.664, 5.328]", "(5.328, 8]"], - ordered=True)) + exp_levels = np.array([Interval(-0.001, 2.664), + Interval(2.664, 5.328), Interval(5.328, 8)]) + exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype( + 'category', ordered=True) tm.assert_series_equal(res, exp) def test_series_retbins(self): # GH 8589 s = Series(np.arange(4)) result, bins = cut(s, 2, retbins=True) - tm.assert_numpy_array_equal(result.cat.codes.values, - np.array([0, 0, 1, 1], dtype=np.int8)) - tm.assert_numpy_array_equal(bins, np.array([-0.003, 1.5, 3])) + expected = Series(IntervalIndex.from_breaks( + [-0.003, 1.5, 3], closed='right').repeat(2)).astype('category', + ordered=True) + tm.assert_series_equal(result, expected) result, bins = qcut(s, 2, retbins=True) - tm.assert_numpy_array_equal(result.cat.codes.values, - np.array([0, 0, 1, 1], dtype=np.int8)) - tm.assert_numpy_array_equal(bins, np.array([0, 1.5, 3])) + expected = Series(IntervalIndex.from_breaks( + [-0.001, 1.5, 3], closed='right').repeat(2)).astype('category', + ordered=True) + tm.assert_series_equal(result, expected) def test_qcut_duplicates_bin(self): # GH 7751 values = [0, 0, 0, 0, 1, 2, 3] - result_levels = ['[0, 1]', '(1, 3]'] + expected = IntervalIndex.from_intervals([Interval(-0.001, 1), + Interval(1, 3)]) - cats = qcut(values, 3, duplicates='drop') - self.assertTrue((cats.categories == result_levels).all()) + result = qcut(values, 3, duplicates='drop') + tm.assert_index_equal(result.categories, expected) self.assertRaises(ValueError, qcut, values, 3) self.assertRaises(ValueError, qcut, values, 3, duplicates='raise') @@ -291,51 +340,57 @@ def test_single_quantile(self): result = qcut(s, 1, labels=False) tm.assert_series_equal(result, expected) result = qcut(s, 1) - exp_lab = Series(Categorical.from_codes([0, 0], ["[9, 9]"], - ordered=True)) - tm.assert_series_equal(result, exp_lab) + intervals = IntervalIndex([Interval(8.999, 9.0), + Interval(8.999, 9.0)], closed='right') + expected = Series(intervals).astype('category', ordered=True) + tm.assert_series_equal(result, expected) s = Series([-9., -9.]) + expected = Series([0, 0]) result = qcut(s, 1, labels=False) tm.assert_series_equal(result, expected) result = qcut(s, 1) - exp_lab = Series(Categorical.from_codes([0, 0], ["[-9, -9]"], - ordered=True)) - tm.assert_series_equal(result, exp_lab) + intervals = IntervalIndex([Interval(-9.001, -9.0), + Interval(-9.001, -9.0)], closed='right') + expected = Series(intervals).astype('category', ordered=True) + tm.assert_series_equal(result, expected) s = Series([0., 0.]) + expected = Series([0, 0]) result = qcut(s, 1, labels=False) tm.assert_series_equal(result, expected) result = qcut(s, 1) - exp_lab = Series(Categorical.from_codes([0, 0], ["[0, 0]"], - ordered=True)) - tm.assert_series_equal(result, exp_lab) - - expected = Series([0]) + intervals = IntervalIndex([Interval(-0.001, 0.0), + Interval(-0.001, 0.0)], closed='right') + expected = Series(intervals).astype('category', ordered=True) + tm.assert_series_equal(result, expected) s = Series([9]) + expected = Series([0]) result = qcut(s, 1, labels=False) tm.assert_series_equal(result, expected) result = qcut(s, 1) - exp_lab = Series(Categorical.from_codes([0], ["[9, 9]"], - ordered=True)) - tm.assert_series_equal(result, exp_lab) + intervals = IntervalIndex([Interval(8.999, 9.0)], closed='right') + expected = Series(intervals).astype('category', ordered=True) + tm.assert_series_equal(result, expected) s = Series([-9]) + expected = Series([0]) result = qcut(s, 1, labels=False) tm.assert_series_equal(result, expected) result = qcut(s, 1) - exp_lab = Series(Categorical.from_codes([0], ["[-9, -9]"], - ordered=True)) - tm.assert_series_equal(result, exp_lab) + intervals = IntervalIndex([Interval(-9.001, -9.0)], closed='right') + expected = Series(intervals).astype('category', ordered=True) + tm.assert_series_equal(result, expected) s = Series([0]) + expected = Series([0]) result = qcut(s, 1, labels=False) tm.assert_series_equal(result, expected) result = qcut(s, 1) - exp_lab = Series(Categorical.from_codes([0], ["[0, 0]"], - ordered=True)) - tm.assert_series_equal(result, exp_lab) + intervals = IntervalIndex([Interval(-0.001, 0.0)], closed='right') + expected = Series(intervals).astype('category', ordered=True) + tm.assert_series_equal(result, expected) def test_single_bin(self): # issue 14652 @@ -376,11 +431,18 @@ def test_datetime_cut(self): # GH 14714 # testing for time data to be present as series data = to_datetime(Series(['2013-01-01', '2013-01-02', '2013-01-03'])) + result, bins = cut(data, 3, retbins=True) - expected = Series(['(2012-12-31 23:57:07.200000, 2013-01-01 16:00:00]', - '(2013-01-01 16:00:00, 2013-01-02 08:00:00]', - '(2013-01-02 08:00:00, 2013-01-03 00:00:00]'], - ).astype("category", ordered=True) + expected = ( + Series(IntervalIndex.from_intervals([ + Interval(Timestamp('2012-12-31 23:57:07.200000'), + Timestamp('2013-01-01 16:00:00')), + Interval(Timestamp('2013-01-01 16:00:00'), + Timestamp('2013-01-02 08:00:00')), + Interval(Timestamp('2013-01-02 08:00:00'), + Timestamp('2013-01-03 00:00:00'))])) + .astype('category', ordered=True)) + tm.assert_series_equal(result, expected) # testing for time data to be present as list @@ -404,9 +466,11 @@ def test_datetime_cut(self): def test_datetime_bin(self): data = [np.datetime64('2012-12-13'), np.datetime64('2012-12-15')] bin_data = ['2012-12-12', '2012-12-14', '2012-12-16'] - expected = Series(['(2012-12-12 00:00:00, 2012-12-14 00:00:00]', - '(2012-12-14 00:00:00, 2012-12-16 00:00:00]'], - ).astype("category", ordered=True) + expected = ( + Series(IntervalIndex.from_intervals([ + Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])), + Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))])) + .astype('category', ordered=True)) for conv in [Timestamp, Timestamp, np.datetime64]: bins = [conv(v) for v in bin_data] @@ -421,6 +485,20 @@ def test_datetime_bin(self): result = cut(data, bins=bin_pydatetime) tm.assert_series_equal(Series(result), expected) + def test_datetime_nan(self): + + def f(): + cut(date_range('20130101', periods=3), bins=[0, 2, 4]) + self.assertRaises(ValueError, f) + + result = cut(date_range('20130102', periods=5), + bins=date_range('20130101', periods=2)) + mask = result.categories.isnull() + self.assert_numpy_array_equal(mask, np.array([False])) + mask = result.isnull() + self.assert_numpy_array_equal( + mask, np.array([False, True, True, True, True])) + def curpath(): pth, _ = os.path.split(os.path.abspath(__file__)) diff --git a/pandas/tests/types/test_dtypes.py b/pandas/tests/types/test_dtypes.py index e7b2edeb57714..79d9fd84396e7 100644 --- a/pandas/tests/types/test_dtypes.py +++ b/pandas/tests/types/test_dtypes.py @@ -3,14 +3,15 @@ import numpy as np import pandas as pd -from pandas import Series, Categorical, date_range +from pandas import Series, Categorical, IntervalIndex, date_range -from pandas.types.dtypes import DatetimeTZDtype, PeriodDtype, CategoricalDtype +from pandas.types.dtypes import (DatetimeTZDtype, PeriodDtype, + IntervalDtype, CategoricalDtype) from pandas.types.common import (is_categorical_dtype, is_categorical, is_datetime64tz_dtype, is_datetimetz, is_period_dtype, is_period, is_dtype_equal, is_datetime64_ns_dtype, - is_datetime64_dtype, + is_datetime64_dtype, is_interval_dtype, is_datetime64_any_dtype, is_string_dtype, _coerce_to_dtype) import pandas.util.testing as tm @@ -351,3 +352,114 @@ def test_empty(self): def test_not_string(self): # though PeriodDtype has object kind, it cannot be string self.assertFalse(is_string_dtype(PeriodDtype('D'))) + + +class TestIntervalDtype(Base, tm.TestCase): + + # TODO: placeholder + def setUp(self): + self.dtype = IntervalDtype('int64') + + def test_construction(self): + with tm.assertRaises(ValueError): + IntervalDtype('xx') + + for s in ['interval[int64]', 'Interval[int64]', 'int64']: + i = IntervalDtype(s) + self.assertEqual(i.subtype, np.dtype('int64')) + self.assertTrue(is_interval_dtype(i)) + + def test_construction_generic(self): + # generic + i = IntervalDtype('interval') + self.assertIs(i.subtype, None) + self.assertTrue(is_interval_dtype(i)) + self.assertTrue(str(i) == 'interval') + + i = IntervalDtype() + self.assertIs(i.subtype, None) + self.assertTrue(is_interval_dtype(i)) + self.assertTrue(str(i) == 'interval') + + def test_subclass(self): + a = IntervalDtype('interval[int64]') + b = IntervalDtype('interval[int64]') + + self.assertTrue(issubclass(type(a), type(a))) + self.assertTrue(issubclass(type(a), type(b))) + + def test_is_dtype(self): + self.assertTrue(IntervalDtype.is_dtype(self.dtype)) + self.assertTrue(IntervalDtype.is_dtype('interval')) + self.assertTrue(IntervalDtype.is_dtype(IntervalDtype('float64'))) + self.assertTrue(IntervalDtype.is_dtype(IntervalDtype('int64'))) + self.assertTrue(IntervalDtype.is_dtype(IntervalDtype(np.int64))) + + self.assertFalse(IntervalDtype.is_dtype('D')) + self.assertFalse(IntervalDtype.is_dtype('3D')) + self.assertFalse(IntervalDtype.is_dtype('U')) + self.assertFalse(IntervalDtype.is_dtype('S')) + self.assertFalse(IntervalDtype.is_dtype('foo')) + self.assertFalse(IntervalDtype.is_dtype(np.object_)) + self.assertFalse(IntervalDtype.is_dtype(np.int64)) + self.assertFalse(IntervalDtype.is_dtype(np.float64)) + + def test_identity(self): + self.assertEqual(IntervalDtype('interval[int64]'), + IntervalDtype('interval[int64]')) + + def test_coerce_to_dtype(self): + self.assertEqual(_coerce_to_dtype('interval[int64]'), + IntervalDtype('interval[int64]')) + + def test_construction_from_string(self): + result = IntervalDtype('interval[int64]') + self.assertTrue(is_dtype_equal(self.dtype, result)) + result = IntervalDtype.construct_from_string('interval[int64]') + self.assertTrue(is_dtype_equal(self.dtype, result)) + with tm.assertRaises(TypeError): + IntervalDtype.construct_from_string('foo') + with tm.assertRaises(TypeError): + IntervalDtype.construct_from_string('interval[foo]') + with tm.assertRaises(TypeError): + IntervalDtype.construct_from_string('foo[int64]') + + def test_equality(self): + self.assertTrue(is_dtype_equal(self.dtype, 'interval[int64]')) + self.assertTrue(is_dtype_equal(self.dtype, IntervalDtype('int64'))) + self.assertTrue(is_dtype_equal(self.dtype, IntervalDtype('int64'))) + self.assertTrue(is_dtype_equal(IntervalDtype('int64'), + IntervalDtype('int64'))) + + self.assertFalse(is_dtype_equal(self.dtype, 'int64')) + self.assertFalse(is_dtype_equal(IntervalDtype('int64'), + IntervalDtype('float64'))) + + def test_basic(self): + self.assertTrue(is_interval_dtype(self.dtype)) + + ii = IntervalIndex.from_breaks(range(3)) + + self.assertTrue(is_interval_dtype(ii.dtype)) + self.assertTrue(is_interval_dtype(ii)) + + s = Series(ii, name='A') + + # dtypes + # series results in object dtype currently, + self.assertFalse(is_interval_dtype(s.dtype)) + self.assertFalse(is_interval_dtype(s)) + + def test_basic_dtype(self): + self.assertTrue(is_interval_dtype('interval[int64]')) + self.assertTrue(is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))) + self.assertTrue(is_interval_dtype + (IntervalIndex.from_breaks(np.arange(4)))) + self.assertTrue(is_interval_dtype( + IntervalIndex.from_breaks(date_range('20130101', periods=3)))) + self.assertFalse(is_interval_dtype('U')) + self.assertFalse(is_interval_dtype('S')) + self.assertFalse(is_interval_dtype('foo')) + self.assertFalse(is_interval_dtype(np.object_)) + self.assertFalse(is_interval_dtype(np.int64)) + self.assertFalse(is_interval_dtype(np.float64)) diff --git a/pandas/tests/types/test_missing.py b/pandas/tests/types/test_missing.py index efd6dda02ab4b..31bf2817c8bab 100644 --- a/pandas/tests/types/test_missing.py +++ b/pandas/tests/types/test_missing.py @@ -55,6 +55,14 @@ def test_0d_array(self): self.assertFalse(isnull(np.array(0.0, dtype=object))) self.assertFalse(isnull(np.array(0, dtype=object))) + def test_empty_object(self): + + for shape in [(4, 0), (4,)]: + arr = np.empty(shape=shape, dtype=object) + result = isnull(arr) + expected = np.ones(shape=shape, dtype=bool) + tm.assert_numpy_array_equal(result, expected) + def test_isnull(self): self.assertFalse(isnull(1.)) self.assertTrue(isnull(None)) diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py index 4a3d452228e01..2a258d4a7b7e5 100644 --- a/pandas/tools/tile.py +++ b/pandas/tools/tile.py @@ -3,17 +3,19 @@ """ from pandas.types.missing import isnull -from pandas.types.common import (is_float, is_integer, - is_scalar, _ensure_int64) +from pandas.types.common import (is_integer, + is_scalar, + is_categorical_dtype, + is_datetime64_dtype, + is_timedelta64_dtype, + _ensure_int64) -from pandas.core.api import Series -from pandas.core.categorical import Categorical import pandas.core.algorithms as algos import pandas.core.nanops as nanops -from pandas.compat import zip -from pandas import to_timedelta, to_datetime -from pandas.types.common import is_datetime64_dtype, is_timedelta64_dtype from pandas._libs.lib import infer_dtype +from pandas import (to_timedelta, to_datetime, + Categorical, Timestamp, Timedelta, + Series, Interval, IntervalIndex) import numpy as np @@ -27,7 +29,7 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, ---------- x : array-like Input array to be binned. It has to be 1-dimensional. - bins : int or sequence of scalars + bins : int, sequence of scalars, or IntervalIndex If `bins` is an int, it defines the number of equal-width bins in the range of `x`. However, in this case, the range of `x` is extended by .1% on each side to include the min or max values of `x`. If @@ -45,9 +47,9 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, retbins : bool, optional Whether to return the bins or not. Can be useful if bins is given as a scalar. - precision : int + precision : int, optional The precision at which to store and display the bins labels - include_lowest : bool + include_lowest : bool, optional Whether the first interval should be left-inclusive or not. Returns @@ -76,10 +78,12 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, (6.533, 9.7], (0.191, 3.367]] Categories (3, object): [(0.191, 3.367] < (3.367, 6.533] < (6.533, 9.7]], array([ 0.1905 , 3.36666667, 6.53333333, 9.7 ])) + >>> pd.cut(np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1]), 3, labels=["good","medium","bad"]) [good, good, good, medium, bad, good] Categories (3, object): [good < medium < bad] + >>> pd.cut(np.ones(5), 4, labels=False) array([1, 1, 1, 1, 1], dtype=int64) """ @@ -93,14 +97,16 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, if is_scalar(bins) and bins < 1: raise ValueError("`bins` should be a positive integer.") - sz = x.size + try: # for array-like + sz = x.size + except AttributeError: + x = np.asarray(x) + sz = x.size if sz == 0: raise ValueError('Cannot cut empty array') - # handle empty arrays. Can't determine range, so use 0-1. - # rng = (0, 1) - else: - rng = (nanops.nanmin(x), nanops.nanmax(x)) + + rng = (nanops.nanmin(x), nanops.nanmax(x)) mn, mx = [mi + 0.0 for mi in rng] if mn == mx: # adjust end points before binning @@ -115,15 +121,18 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, else: bins[-1] += adj + elif isinstance(bins, IntervalIndex): + pass else: bins = np.asarray(bins) - bins = _convert_bin_to_numeric_type(bins) + bins = _convert_bin_to_numeric_type(bins, dtype) if (np.diff(bins) < 0).any(): raise ValueError('bins must increase monotonically.') fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels, precision=precision, - include_lowest=include_lowest, dtype=dtype) + include_lowest=include_lowest, + dtype=dtype) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name) @@ -147,9 +156,9 @@ def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): the resulting bins. If False, return only integer indicators of the bins. retbins : bool, optional - Whether to return the bins or not. Can be useful if bins is given - as a scalar. - precision : int + Whether to return the (bins, labels) or not. Can be useful if bins + is given as a scalar. + precision : int, optional The precision at which to store and display the bins labels duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. @@ -174,9 +183,11 @@ def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): >>> pd.qcut(range(5), 4) [[0, 1], [0, 1], (1, 2], (2, 3], (3, 4]] Categories (4, object): [[0, 1] < (1, 2] < (2, 3] < (3, 4]] + >>> pd.qcut(range(5), 3, labels=["good","medium","bad"]) [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] + >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3], dtype=int64) """ @@ -205,6 +216,13 @@ def _bins_to_cuts(x, bins, right=True, labels=None, raise ValueError("invalid value for 'duplicates' parameter, " "valid options are: raise, drop") + if isinstance(bins, IntervalIndex): + # we have a fast-path here + ids = bins.get_indexer(x) + result = algos.take_nd(bins, ids) + result = Categorical(result, categories=bins, ordered=True) + return result, bins + unique_bins = algos.unique(bins) if len(unique_bins) < len(bins) and len(bins) != 2: if duplicates == 'raise': @@ -225,96 +243,26 @@ def _bins_to_cuts(x, bins, right=True, labels=None, if labels is not False: if labels is None: - increases = 0 - while True: - try: - levels = _format_levels(bins, precision, right=right, - include_lowest=include_lowest, - dtype=dtype) - except ValueError: - increases += 1 - precision += 1 - if increases >= 20: - raise - else: - break - + labels = _format_labels(bins, precision, right=right, + include_lowest=include_lowest, + dtype=dtype) else: if len(labels) != len(bins) - 1: raise ValueError('Bin labels must be one fewer than ' 'the number of bin edges') - levels = labels + if not is_categorical_dtype(labels): + labels = Categorical(labels, ordered=True) - levels = np.asarray(levels, dtype=object) np.putmask(ids, na_mask, 0) - fac = Categorical(ids - 1, levels, ordered=True, fastpath=True) - else: - fac = ids - 1 - if has_nas: - fac = fac.astype(np.float64) - np.putmask(fac, na_mask, np.nan) + result = algos.take_nd(labels, ids - 1) - return fac, bins - - -def _format_levels(bins, prec, right=True, - include_lowest=False, dtype=None): - fmt = lambda v: _format_label(v, precision=prec, dtype=dtype) - if right: - levels = [] - for a, b in zip(bins, bins[1:]): - fa, fb = fmt(a), fmt(b) - - if a != b and fa == fb: - raise ValueError('precision too low') - - formatted = '(%s, %s]' % (fa, fb) - - levels.append(formatted) - - if include_lowest: - levels[0] = '[' + levels[0][1:] else: - levels = ['[%s, %s)' % (fmt(a), fmt(b)) - for a, b in zip(bins, bins[1:])] - return levels + result = ids - 1 + if has_nas: + result = result.astype(np.float64) + np.putmask(result, na_mask, np.nan) - -def _format_label(x, precision=3, dtype=None): - fmt_str = '%%.%dg' % precision - - if is_datetime64_dtype(dtype): - return to_datetime(x, unit='ns') - if is_timedelta64_dtype(dtype): - return to_timedelta(x, unit='ns') - if np.isinf(x): - return str(x) - elif is_float(x): - frac, whole = np.modf(x) - sgn = '-' if x < 0 else '' - whole = abs(whole) - if frac != 0.0: - val = fmt_str % frac - - # rounded up or down - if '.' not in val: - if x < 0: - return '%d' % (-whole - 1) - else: - return '%d' % (whole + 1) - - if 'e' in val: - return _trim_zeros(fmt_str % x) - else: - val = _trim_zeros(val) - if '.' in val: - return sgn + '.'.join(('%d' % whole, val.split('.')[1])) - else: # pragma: no cover - return sgn + '.'.join(('%d' % whole, val)) - else: - return sgn + '%0.f' % whole - else: - return str(x) + return result, bins def _trim_zeros(x): @@ -343,17 +291,65 @@ def _coerce_to_type(x): return x, dtype -def _convert_bin_to_numeric_type(x): +def _convert_bin_to_numeric_type(bins, dtype): """ if the passed bin is of datetime/timedelta type, this method converts it to integer + + Parameters + ---------- + bins : list-liek of bins + dtype : dtype of data + + Raises + ------ + ValueError if bins are not of a compat dtype to dtype """ - dtype = infer_dtype(x) - if dtype == 'timedelta' or dtype == 'timedelta64': - x = to_timedelta(x).view(np.int64) - elif dtype == 'datetime' or dtype == 'datetime64': - x = to_datetime(x).view(np.int64) - return x + bins_dtype = infer_dtype(bins) + if is_timedelta64_dtype(dtype): + if bins_dtype in ['timedelta', 'timedelta64']: + bins = to_timedelta(bins).view(np.int64) + else: + raise ValueError("bins must be of timedelta64 dtype") + elif is_datetime64_dtype(dtype): + if bins_dtype in ['datetime', 'datetime64']: + bins = to_datetime(bins).view(np.int64) + else: + raise ValueError("bins must be of datetime64 dtype") + + return bins + + +def _format_labels(bins, precision, right=True, + include_lowest=False, dtype=None): + """ based on the dtype, return our labels """ + + closed = 'right' if right else 'left' + + if is_datetime64_dtype(dtype): + formatter = Timestamp + adjust = lambda x: x - Timedelta('1ns') + elif is_timedelta64_dtype(dtype): + formatter = Timedelta + adjust = lambda x: x - Timedelta('1ns') + else: + precision = _infer_precision(precision, bins) + formatter = lambda x: _round_frac(x, precision) + adjust = lambda x: x - 10 ** (-precision) + + breaks = [formatter(b) for b in bins] + labels = IntervalIndex.from_breaks(breaks, closed=closed) + + if right and include_lowest: + # we will adjust the left hand side by precision to + # account that we are all right closed + v = adjust(labels[0].left) + + i = IntervalIndex.from_intervals( + [Interval(v, labels[0].right, closed='right')]) + labels = i.append(labels[1:]) + + return labels def _preprocess_for_cut(x): @@ -375,7 +371,8 @@ def _preprocess_for_cut(x): return x_is_series, series_index, name, x -def _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name): +def _postprocess_for_cut(fac, bins, retbins, x_is_series, + series_index, name): """ handles post processing for the cut method where we combine the index information if the originally passed @@ -388,3 +385,28 @@ def _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name): return fac return fac, bins + + +def _round_frac(x, precision): + """ + Round the fractional part of the given number + """ + if not np.isfinite(x) or x == 0: + return x + else: + frac, whole = np.modf(x) + if whole == 0: + digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision + else: + digits = precision + return np.around(x, digits) + + +def _infer_precision(base_precision, bins): + """Infer an appropriate precision for _round_frac + """ + for precision in range(base_precision, 20): + levels = [_round_frac(b, precision) for b in bins] + if algos.unique(levels).size == bins.size: + return precision + return base_precision # default diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py index ae40c2f66a590..48d236177b474 100644 --- a/pandas/tseries/base.py +++ b/pandas/tseries/base.py @@ -31,6 +31,9 @@ import pandas.types.concat as _concat import pandas.tseries.frequencies as frequencies +import pandas.indexes.base as ibase +_index_doc_kwargs = dict(ibase._index_doc_kwargs) + class DatelikeOps(object): """ common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex """ @@ -242,6 +245,7 @@ def _box_values(self, values): def _format_with_header(self, header, **kwargs): return header + list(self._format_native_types(**kwargs)) + @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) def __contains__(self, key): try: res = self.get_loc(key) @@ -249,6 +253,8 @@ def __contains__(self, key): except (KeyError, TypeError, ValueError): return False + contains = __contains__ + def __getitem__(self, key): """ This getitem defers to the underlying array, which by-definition can @@ -381,7 +387,7 @@ def sort_values(self, return_indexer=False, ascending=True): return self._simple_new(sorted_values, **attribs) - @Appender(_index_shared_docs['take']) + @Appender(_index_shared_docs['take'] % _index_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) @@ -798,7 +804,7 @@ def repeat(self, repeats, *args, **kwargs): return self._shallow_copy(self.asi8.repeat(repeats), freq=freq) - @Appender(_index_shared_docs['where']) + @Appender(_index_shared_docs['where'] % _index_doc_kwargs) def where(self, cond, other=None): other = _ensure_datetimelike_to_i8(other) values = _ensure_datetimelike_to_i8(self) diff --git a/pandas/tseries/interval.py b/pandas/tseries/interval.py deleted file mode 100644 index 22801318a1853..0000000000000 --- a/pandas/tseries/interval.py +++ /dev/null @@ -1,35 +0,0 @@ - -from pandas.core.index import Index - - -class Interval(object): - """ - Represents an interval of time defined by two timestamps - """ - - def __init__(self, start, end): - self.start = start - self.end = end - - -class PeriodInterval(object): - """ - Represents an interval of time defined by two Period objects (time - ordinals) - """ - - def __init__(self, start, end): - self.start = start - self.end = end - - -class IntervalIndex(Index): - """ - - """ - - def __new__(self, starts, ends): - pass - - def dtype(self): - return self.values.dtype diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 1e1496bbe9c27..7f7b3286fd4f8 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -347,6 +347,7 @@ def _coerce_scalar_to_index(self, item): """ return PeriodIndex([item], **self._get_attributes_dict()) + @Appender(_index_shared_docs['__contains__']) def __contains__(self, key): if isinstance(key, Period): if key.freq != self.freq: @@ -361,6 +362,8 @@ def __contains__(self, key): return False return False + contains = __contains__ + @property def asi8(self): return self._values.view('i8') diff --git a/pandas/types/api.py b/pandas/types/api.py index e78514ce77822..6dbd3dc6b640c 100644 --- a/pandas/types/api.py +++ b/pandas/types/api.py @@ -10,6 +10,10 @@ is_categorical, is_categorical_dtype, + # interval + is_interval, + is_interval_dtype, + # datetimelike is_datetimetz, is_datetime64_dtype, diff --git a/pandas/types/common.py b/pandas/types/common.py index 7ab2e068ac69f..0b14e484d40a7 100644 --- a/pandas/types/common.py +++ b/pandas/types/common.py @@ -7,6 +7,7 @@ from .dtypes import (CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, DatetimeTZDtypeType, PeriodDtype, PeriodDtypeType, + IntervalDtype, IntervalDtypeType, ExtensionDtype) from .generic import (ABCCategorical, ABCPeriodIndex, ABCDatetimeIndex, ABCSeries, @@ -139,6 +140,10 @@ def is_period_dtype(arr_or_dtype): return PeriodDtype.is_dtype(arr_or_dtype) +def is_interval_dtype(arr_or_dtype): + return IntervalDtype.is_dtype(arr_or_dtype) + + def is_categorical_dtype(arr_or_dtype): return CategoricalDtype.is_dtype(arr_or_dtype) @@ -501,6 +506,8 @@ def _coerce_to_dtype(dtype): dtype = DatetimeTZDtype(dtype) elif is_period_dtype(dtype): dtype = PeriodDtype(dtype) + elif is_interval_dtype(dtype): + dtype = IntervalDtype(dtype) else: dtype = np.dtype(dtype) return dtype @@ -538,6 +545,8 @@ def _get_dtype(arr_or_dtype): return arr_or_dtype elif isinstance(arr_or_dtype, PeriodDtype): return arr_or_dtype + elif isinstance(arr_or_dtype, IntervalDtype): + return arr_or_dtype elif isinstance(arr_or_dtype, string_types): if is_categorical_dtype(arr_or_dtype): return CategoricalDtype.construct_from_string(arr_or_dtype) @@ -545,6 +554,8 @@ def _get_dtype(arr_or_dtype): return DatetimeTZDtype.construct_from_string(arr_or_dtype) elif is_period_dtype(arr_or_dtype): return PeriodDtype.construct_from_string(arr_or_dtype) + elif is_interval_dtype(arr_or_dtype): + return IntervalDtype.construct_from_string(arr_or_dtype) if hasattr(arr_or_dtype, 'dtype'): arr_or_dtype = arr_or_dtype.dtype @@ -575,6 +586,8 @@ def _get_dtype_type(arr_or_dtype): return CategoricalDtypeType elif isinstance(arr_or_dtype, DatetimeTZDtype): return DatetimeTZDtypeType + elif isinstance(arr_or_dtype, IntervalDtype): + return IntervalDtypeType elif isinstance(arr_or_dtype, PeriodDtype): return PeriodDtypeType elif isinstance(arr_or_dtype, string_types): @@ -584,6 +597,8 @@ def _get_dtype_type(arr_or_dtype): return DatetimeTZDtypeType elif is_period_dtype(arr_or_dtype): return PeriodDtypeType + elif is_interval_dtype(arr_or_dtype): + return IntervalDtypeType return _get_dtype_type(np.dtype(arr_or_dtype)) try: return arr_or_dtype.dtype.type @@ -695,6 +710,8 @@ def pandas_dtype(dtype): return dtype elif isinstance(dtype, CategoricalDtype): return dtype + elif isinstance(dtype, IntervalDtype): + return dtype elif isinstance(dtype, string_types): try: return DatetimeTZDtype.construct_from_string(dtype) @@ -708,6 +725,12 @@ def pandas_dtype(dtype): except TypeError: pass + elif dtype.startswith('interval[') or dtype.startswith('Interval['): + try: + return IntervalDtype.construct_from_string(dtype) + except TypeError: + pass + try: return CategoricalDtype.construct_from_string(dtype) except TypeError: diff --git a/pandas/types/dtypes.py b/pandas/types/dtypes.py index c3494df93476b..7913950a597c9 100644 --- a/pandas/types/dtypes.py +++ b/pandas/types/dtypes.py @@ -367,3 +367,112 @@ def is_dtype(cls, dtype): else: return False return super(PeriodDtype, cls).is_dtype(dtype) + + +class IntervalDtypeType(type): + """ + the type of IntervalDtype, this metaclass determines subclass ability + """ + pass + + +class IntervalDtype(ExtensionDtype): + __metaclass__ = IntervalDtypeType + """ + A Interval duck-typed class, suitable for holding an interval + + THIS IS NOT A REAL NUMPY DTYPE + """ + type = IntervalDtypeType + kind = None + str = '|O08' + base = np.dtype('O') + num = 103 + _metadata = ['subtype'] + _match = re.compile("(I|i)nterval\[(?P.+)\]") + _cache = {} + + def __new__(cls, subtype=None): + """ + Parameters + ---------- + subtype : the dtype of the Interval + """ + + if isinstance(subtype, IntervalDtype): + return subtype + elif subtype is None or (isinstance(subtype, compat.string_types) and + subtype == 'interval'): + subtype = None + else: + if isinstance(subtype, compat.string_types): + m = cls._match.search(subtype) + if m is not None: + subtype = m.group('subtype') + + from pandas.types.common import pandas_dtype + try: + subtype = pandas_dtype(subtype) + except TypeError: + raise ValueError("could not construct IntervalDtype") + + try: + return cls._cache[str(subtype)] + except KeyError: + u = object.__new__(cls) + u.subtype = subtype + cls._cache[str(subtype)] = u + return u + + @classmethod + def construct_from_string(cls, string): + """ + attempt to construct this type from a string, raise a TypeError + if its not possible + """ + if isinstance(string, compat.string_types): + try: + return cls(string) + except ValueError: + pass + raise TypeError("could not construct IntervalDtype") + + def __unicode__(self): + if self.subtype is None: + return "interval" + return "interval[{subtype}]".format(subtype=self.subtype) + + @property + def name(self): + return str(self) + + def __hash__(self): + # make myself hashable + return hash(str(self)) + + def __eq__(self, other): + if isinstance(other, compat.string_types): + return other == self.name or other == self.name.title() + + return (isinstance(other, IntervalDtype) and + self.subtype == other.subtype) + + @classmethod + def is_dtype(cls, dtype): + """ + Return a boolean if we if the passed type is an actual dtype that we + can match (via string or type) + """ + + if isinstance(dtype, compat.string_types): + if dtype.lower().startswith('interval'): + try: + if cls.construct_from_string(dtype) is not None: + return True + else: + return False + except ValueError: + return False + else: + return False + return super(IntervalDtype, cls).is_dtype(dtype) diff --git a/pandas/types/generic.py b/pandas/types/generic.py index e7b54ccc6f25e..90608c18ae503 100644 --- a/pandas/types/generic.py +++ b/pandas/types/generic.py @@ -32,12 +32,14 @@ def _check(cls, inst): ("periodindex", )) ABCCategoricalIndex = create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex", )) +ABCIntervalIndex = create_pandas_abc_type("ABCIntervalIndex", "_typ", + ("intervalindex", )) ABCIndexClass = create_pandas_abc_type("ABCIndexClass", "_typ", ("index", "int64index", "rangeindex", "float64index", "uint64index", "multiindex", "datetimeindex", "timedeltaindex", "periodindex", - "categoricalindex")) + "categoricalindex", "intervalindex")) ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series", )) ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe", )) diff --git a/pandas/types/inference.py b/pandas/types/inference.py index 91418677c6b19..b0a93d24228af 100644 --- a/pandas/types/inference.py +++ b/pandas/types/inference.py @@ -20,6 +20,8 @@ is_decimal = lib.is_decimal +is_interval = lib.is_interval + def is_number(obj): """ diff --git a/pandas/types/missing.py b/pandas/types/missing.py index ea49af9884f5a..af3a873bc2866 100644 --- a/pandas/types/missing.py +++ b/pandas/types/missing.py @@ -9,7 +9,7 @@ from .common import (is_string_dtype, is_datetimelike, is_datetimelike_v_numeric, is_float_dtype, is_datetime64_dtype, is_datetime64tz_dtype, - is_timedelta64_dtype, + is_timedelta64_dtype, is_interval_dtype, is_complex_dtype, is_categorical_dtype, is_string_like_dtype, is_bool_dtype, is_integer_dtype, is_dtype_equal, @@ -127,6 +127,9 @@ def _isnull_ndarraylike(obj): if not isinstance(values, Categorical): values = values.values result = values.isnull() + elif is_interval_dtype(values): + from pandas import IntervalIndex + result = IntervalIndex(obj).isnull() else: # Working around NumPy ticket 1542 diff --git a/pandas/util/testing.py b/pandas/util/testing.py index d5986a7f390e5..c73cca56f975a 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -29,6 +29,7 @@ is_number, is_bool, needs_i8_conversion, is_categorical_dtype, + is_interval_dtype, is_sequence, is_list_like) from pandas.formats.printing import pprint_thing @@ -43,9 +44,11 @@ from pandas.computation import expressions as expr -from pandas import (bdate_range, CategoricalIndex, Categorical, DatetimeIndex, - TimedeltaIndex, PeriodIndex, RangeIndex, Index, MultiIndex, +from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex, + DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex, + Index, MultiIndex, Series, DataFrame, Panel, Panel4D) + from pandas.util.decorators import deprecate from pandas.util import libtesting from pandas.io.common import urlopen @@ -943,6 +946,9 @@ def _get_ilevel_values(index, level): assert_attr_equal('names', left, right, obj=obj) if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex): assert_attr_equal('freq', left, right, obj=obj) + if (isinstance(left, pd.IntervalIndex) or + isinstance(right, pd.IntervalIndex)): + assert_attr_equal('closed', left, right, obj=obj) if check_categorical: if is_categorical_dtype(left) or is_categorical_dtype(right): @@ -1307,6 +1313,12 @@ def assert_series_equal(left, right, check_dtype=True, else: assert_numpy_array_equal(left.get_values(), right.get_values(), check_dtype=check_dtype) + elif is_interval_dtype(left) or is_interval_dtype(right): + # TODO: big hack here + l = pd.IntervalIndex(left) + r = pd.IntervalIndex(right) + assert_index_equal(l, r, obj='{0}.index'.format(obj)) + else: libtesting.assert_almost_equal(left.get_values(), right.get_values(), check_less_precise=check_less_precise, @@ -1687,6 +1699,12 @@ def makeCategoricalIndex(k=10, n=3, name=None): return CategoricalIndex(np.random.choice(x, k), name=name) +def makeIntervalIndex(k=10, name=None): + """ make a length k IntervalIndex """ + x = np.linspace(0, 100, num=(k + 1)) + return IntervalIndex.from_breaks(x, name=name) + + def makeBoolIndex(k=10, name=None): if k == 1: return Index([True], name=name) diff --git a/setup.py b/setup.py index 96b25f7427370..6707af7eb0908 100755 --- a/setup.py +++ b/setup.py @@ -119,6 +119,7 @@ def is_platform_mac(): '_libs/hashtable_func_helper.pxi.in'], 'index': ['_libs/index_class_helper.pxi.in'], 'sparse': ['sparse/sparse_op_helper.pxi.in'], + 'interval': ['_libs/intervaltree.pxi.in'] } _pxifiles = [] @@ -335,6 +336,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/index.pyx', 'pandas/_libs/algos.pyx', 'pandas/_libs/join.pyx', + 'pandas/_libs/interval.pyx', 'pandas/core/window.pyx', 'pandas/sparse/sparse.pyx', 'pandas/util/testing.pyx', @@ -508,6 +510,9 @@ def pxd(name): 'depends': _pxi_dep['join']}, '_libs.reshape': {'pyxfile': '_libs/reshape', 'depends': _pxi_dep['reshape']}, + '_libs.interval': {'pyxfile': '_libs/interval', + 'pxdfiles': ['_libs/hashtable'], + 'depends': _pxi_dep['interval']}, 'core.libwindow': {'pyxfile': 'core/window', 'pxdfiles': ['_libs/src/skiplist', '_libs/src/util'], 'depends': ['pandas/_libs/src/skiplist.pyx', From 8b404539b8b8f2ce2eaf38c7cd2f7f3925c6e171 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Fri, 14 Apr 2017 15:37:03 +0000 Subject: [PATCH 22/56] ENH: add Series & DataFrame .agg/.aggregate (#14668) * ENH: add Series & DataFrame .agg/.aggregate to provide convienent function application that mimics the groupby(..).agg/.aggregate interface .apply is now a synonym for .agg, and will accept dict/list-likes for aggregations CLN: rename .name attr -> ._selection_name from SeriesGroupby for compat (didn't exist on DataFrameGroupBy) resolves conflicts w.r.t. setting .name on a groupby object closes #1623 closes #14464 custom .describe closes #14483 closes #15015 closes #7014 * DOC/TST: test for deprecation in .agg additional doc updates * whatsnew fixes --- doc/source/api.rst | 4 + doc/source/basics.rst | 231 +++++++++++++++++++++- doc/source/computation.rst | 12 +- doc/source/groupby.rst | 4 +- doc/source/timeseries.rst | 6 +- doc/source/whatsnew/v0.20.0.txt | 64 ++++++ pandas/core/base.py | 25 +-- pandas/core/frame.py | 70 ++++++- pandas/core/generic.py | 85 +++++++- pandas/core/series.py | 55 ++++++ pandas/tests/frame/test_apply.py | 178 +++++++++++++++++ pandas/tests/groupby/test_aggregate.py | 6 +- pandas/tests/groupby/test_value_counts.py | 1 + pandas/tests/series/test_apply.py | 181 ++++++++++++++++- 14 files changed, 877 insertions(+), 45 deletions(-) diff --git a/doc/source/api.rst b/doc/source/api.rst index 6ba8c2b8ead67..6d1765ce65714 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -314,6 +314,8 @@ Function application, GroupBy & Window :toctree: generated/ Series.apply + Series.aggregate + Series.transform Series.map Series.groupby Series.rolling @@ -831,6 +833,8 @@ Function application, GroupBy & Window DataFrame.apply DataFrame.applymap + DataFrame.aggregate + DataFrame.transform DataFrame.groupby DataFrame.rolling DataFrame.expanding diff --git a/doc/source/basics.rst b/doc/source/basics.rst index f649b3fd8a9a3..ab5d7e69c923b 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -702,7 +702,8 @@ on an entire ``DataFrame`` or ``Series``, row- or column-wise, or elementwise. 1. `Tablewise Function Application`_: :meth:`~DataFrame.pipe` 2. `Row or Column-wise Function Application`_: :meth:`~DataFrame.apply` -3. Elementwise_ function application: :meth:`~DataFrame.applymap` +3. `Aggregation API`_: :meth:`~DataFrame.agg` and :meth:`~DataFrame.transform` +4. `Applying Elementwise Functions`_: :meth:`~DataFrame.applymap` .. _basics.pipe: @@ -778,6 +779,13 @@ statistics methods, take an optional ``axis`` argument: df.apply(np.cumsum) df.apply(np.exp) +``.apply()`` will also dispatch on a string method name. + +.. ipython:: python + + df.apply('mean') + df.apply('mean', axis=1) + Depending on the return type of the function passed to :meth:`~DataFrame.apply`, the result will either be of lower dimension or the same dimension. @@ -827,16 +835,223 @@ set to True, the passed function will instead receive an ndarray object, which has positive performance implications if you do not need the indexing functionality. -.. seealso:: +.. _basics.aggregate: + +Aggregation API +~~~~~~~~~~~~~~~ + +.. versionadded:: 0.20.0 + +The aggregation API allows one to express possibly multiple aggregation operations in a single concise way. +This API is similar across pandas objects, see :ref:`groupby API `, the +:ref:`window functions API `, and the :ref:`resample API `. +The entry point for aggregation is the method :meth:`~DataFrame.aggregate`, or the alias :meth:`~DataFrame.agg`. + +We will use a similar starting frame from above: + +.. ipython:: python + + tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], + index=pd.date_range('1/1/2000', periods=10)) + tsdf.iloc[3:7] = np.nan + tsdf + +Using a single function is equivalent to :meth:`~DataFrame.apply`; You can also pass named methods as strings. +These will return a ``Series`` of the aggregated output: + +.. ipython:: python + + tsdf.agg(np.sum) + + tsdf.agg('sum') + + # these are equivalent to a ``.sum()`` because we are aggregating on a single function + tsdf.sum() + +Single aggregations on a ``Series`` this will result in a scalar value: + +.. ipython:: python + + tsdf.A.agg('sum') + + +Aggregating with multiple functions ++++++++++++++++++++++++++++++++++++ + +You can pass multiple aggregation arguments as a list. +The results of each of the passed functions will be a row in the resultant ``DataFrame``. +These are naturally named from the aggregation function. + +.. ipython:: python + + tsdf.agg(['sum']) + +Multiple functions yield multiple rows: + +.. ipython:: python + + tsdf.agg(['sum', 'mean']) + +On a ``Series``, multiple functions return a ``Series``, indexed by the function names: + +.. ipython:: python + + tsdf.A.agg(['sum', 'mean']) + +Passing a ``lambda`` function will yield a ```` named row: + +.. ipython:: python + + tsdf.A.agg(['sum', lambda x: x.mean()]) + +Passing a named function will yield that name for the row: + +.. ipython:: python + + def mymean(x): + return x.mean() + + tsdf.A.agg(['sum', mymean]) + +Aggregating with a dict ++++++++++++++++++++++++ + +Passing a dictionary of column names to a scalar or a list of scalars, to ``DataFame.agg`` +allows you to customize which functions are applied to which columns. + +.. ipython:: python + + tsdf.agg({'A': 'mean', 'B': 'sum'}) + +Passing a list-like will generate a ``DataFrame`` output. You will get a matrix-like output +of all of the aggregators. The output will consist of all unique functions. Those that are +not noted for a particular column will be ``NaN``: + +.. ipython:: python + + tsdf.agg({'A': ['mean', 'min'], 'B': 'sum'}) + +.. _basics.aggregation.mixed_dtypes: + +Mixed Dtypes +++++++++++++ - The section on :ref:`GroupBy ` demonstrates related, flexible - functionality for grouping by some criterion, applying, and combining the - results into a Series, DataFrame, etc. +When presented with mixed dtypes that cannot aggregate, ``.agg`` will only take the valid +aggregations. This is similiar to how groupby ``.agg`` works. -.. _Elementwise: +.. ipython:: python + + mdf = pd.DataFrame({'A': [1, 2, 3], + 'B': [1., 2., 3.], + 'C': ['foo', 'bar', 'baz'], + 'D': pd.date_range('20130101', periods=3)}) + mdf.dtypes + +.. ipython:: python + + mdf.agg(['min', 'sum']) + +.. _basics.aggregation.custom_describe: + +Custom describe ++++++++++++++++ + +With ``.agg()`` is it possible to easily create a custom describe function, similar +to the built in :ref:`describe function `. + +.. ipython:: python + + from functools import partial + + q_25 = partial(pd.Series.quantile, q=0.25) + q_25.__name__ = '25%' + q_75 = partial(pd.Series.quantile, q=0.75) + q_75.__name__ = '75%' + + tsdf.agg(['count', 'mean', 'std', 'min', q_25, 'median', q_75, 'max']) + +.. _basics.transform: + +Transform API +~~~~~~~~~~~~~ -Applying elementwise Python functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. versionadded:: 0.20.0 + +The :method:`~DataFrame.transform` method returns an object that is indexed the same (same size) +as the original. This API allows you to provide *multiple* operations at the same +time rather than one-by-one. Its api is quite similar to the ``.agg`` API. + +Use a similar frame to the above sections. + +.. ipython:: python + + tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], + index=pd.date_range('1/1/2000', periods=10)) + tsdf.iloc[3:7] = np.nan + tsdf + +Transform the entire frame. Transform allows functions to input as a numpy function, string +function name and user defined function. + +.. ipython:: python + + tsdf.transform(np.abs) + tsdf.transform('abs') + tsdf.transform(lambda x: x.abs()) + +Since this is a single function, this is equivalent to a ufunc application + +.. ipython:: python + + np.abs(tsdf) + +Passing a single function to ``.transform()`` with a Series will yield a single Series in return. + +.. ipython:: python + + tsdf.A.transform(np.abs) + + +Transform with multiple functions ++++++++++++++++++++++++++++++++++ + +Passing multiple functions will yield a column multi-indexed DataFrame. +The first level will be the original frame column names; the second level +will be the names of the transforming functions. + +.. ipython:: python + + tsdf.transform([np.abs, lambda x: x+1]) + +Passing multiple functions to a Series will yield a DataFrame. The +resulting column names will be the transforming functions. + +.. ipython:: python + + tsdf.A.transform([np.abs, lambda x: x+1]) + + +Transforming with a dict +++++++++++++++++++++++++ + + +Passing a dict of functions will will allow selective transforming per column. + +.. ipython:: python + + tsdf.transform({'A': np.abs, 'B': lambda x: x+1}) + +Passing a dict of lists will generate a multi-indexed DataFrame with these +selective transforms. + +.. ipython:: python + + tsdf.transform({'A': np.abs, 'B': [lambda x: x+1, 'sqrt']}) + +.. _basics.elementwise: + +Applying Elementwise Functions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since not all functions can be vectorized (accept NumPy arrays and return another array or value), the methods :meth:`~DataFrame.applymap` on DataFrame diff --git a/doc/source/computation.rst b/doc/source/computation.rst index f6c912bf59b34..8c75d4355c99a 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -617,7 +617,9 @@ Aggregation ----------- Once the ``Rolling``, ``Expanding`` or ``EWM`` objects have been created, several methods are available to -perform multiple computations on the data. This is very similar to a ``.groupby(...).agg`` seen :ref:`here `. +perform multiple computations on the data. These operations are similar to the :ref:`aggregating API `, +:ref:`groupby aggregates `, and :ref:`resample API `. + .. ipython:: python @@ -642,10 +644,10 @@ columns if none are selected. .. _stats.aggregate.multifunc: -Applying multiple functions at once -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Applying multiple functions +~~~~~~~~~~~~~~~~~~~~~~~~~~~ -With windowed Series you can also pass a list or dict of functions to do +With windowed ``Series`` you can also pass a list of functions to do aggregation with, outputting a DataFrame: .. ipython:: python @@ -666,7 +668,7 @@ Applying different functions to DataFrame columns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By passing a dict to ``aggregate`` you can apply a different aggregation to the -columns of a DataFrame: +columns of a ``DataFrame``: .. ipython:: python :okexcept: diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 03ee5e0d67913..cf4f1059ae17a 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -439,7 +439,9 @@ Aggregation ----------- Once the GroupBy object has been created, several methods are available to -perform a computation on the grouped data. +perform a computation on the grouped data. These operations are similar to the +:ref:`aggregating API `, :ref:`window functions API `, +and :ref:`resample API `. An obvious one is aggregation via the ``aggregate`` or equivalently ``agg`` method: diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 0a957772d785e..6a4ea2d5319ab 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1519,11 +1519,13 @@ We can instead only resample those groups where we have points as follows: ts.groupby(partial(round, freq='3T')).sum() +.. _timeseries.aggregate: + Aggregation ~~~~~~~~~~~ -Similar to :ref:`groupby aggregates ` and the :ref:`window functions `, a ``Resampler`` can be selectively -resampled. +Similar to the :ref:`aggregating API `, :ref:`groupby aggregates API `, and the :ref:`window functions API `, +a ``Resampler`` can be selectively resampled. Resampling a ``DataFrame``, the default will be to act on all columns with the same function. diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 04aed6c2c5466..da32de750e7de 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -9,6 +9,8 @@ users upgrade to this version. Highlights include: +- new ``.agg()`` API for Series/DataFrame similar to the groupby-rolling-resample API's, see :ref:`here ` +- Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here `. - The ``.ix`` indexer has been deprecated, see :ref:`here ` - ``Panel`` has been deprecated, see :ref:`here ` - Improved user API when accessing levels in ``.groupby()``, see :ref:`here ` @@ -32,6 +34,68 @@ Check the :ref:`API Changes ` and :ref:`deprecations New features ~~~~~~~~~~~~ +.. _whatsnew_0200.enhancements.agg: + +``agg`` API +^^^^^^^^^^^ + +Series & DataFrame have been enhanced to support the aggregation API. This is an already familiar API that +is supported for groupby, window operations, and resampling. This allows one to express, possibly multiple +aggregation operations, in a single concise way by using :meth:`~DataFrame.agg`, +and :meth:`~DataFrame.transform`. The full documentation is :ref:`here `` (:issue:`1623`) + +Here is a sample + +.. ipython:: python + + df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], + index=pd.date_range('1/1/2000', periods=10)) + df.iloc[3:7] = np.nan + df + +One can operate using string function names, callables, lists, or dictionaries of these. + +Using a single function is equivalent to ``.apply``. + +.. ipython:: python + + df.agg('sum') + +Multiple functions in lists. + +.. ipython:: python + + df.agg(['sum', 'min']) + +Dictionaries to provide the ability to provide selective aggregation per column. +You will get a matrix-like output of all of the aggregators. The output will consist +of all unique functions. Those that are not noted for a particular column will be ``NaN``: + +.. ipython:: python + + df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']}) + +The API also supports a ``.transform()`` function to provide for broadcasting results. + +.. ipython:: python + + df.transform(['abs', lambda x: x - x.min()]) + +When presented with mixed dtypes that cannot aggregate, ``.agg()`` will only take the valid +aggregations. This is similiar to how groupby ``.agg()`` works. (:issue:`15015`) + +.. ipython:: python + + df = pd.DataFrame({'A': [1, 2, 3], + 'B': [1., 2., 3.], + 'C': ['foo', 'bar', 'baz'], + 'D': pd.date_range('20130101', periods=3)}) + df.dtypes + +.. ipython:: python + + df.agg(['min', 'sum']) + .. _whatsnew_0200.enhancements.dataio_dtype: ``dtype`` keyword for data IO diff --git a/pandas/core/base.py b/pandas/core/base.py index 6566ee38c1ade..33c95197debdc 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -470,6 +470,15 @@ def _aggregate(self, arg, *args, **kwargs): obj = self._selected_obj + def nested_renaming_depr(level=4): + # deprecation of nested renaming + # GH 15931 + warnings.warn( + ("using a dict with renaming " + "is deprecated and will be removed in a future " + "version"), + FutureWarning, stacklevel=level) + # if we have a dict of any non-scalars # eg. {'A' : ['mean']}, normalize all to # be list-likes @@ -498,14 +507,10 @@ def _aggregate(self, arg, *args, **kwargs): raise SpecificationError('cannot perform renaming ' 'for {0} with a nested ' 'dictionary'.format(k)) + nested_renaming_depr(4 + (_level or 0)) - # deprecation of nested renaming - # GH 15931 - warnings.warn( - ("using a dict with renaming " - "is deprecated and will be removed in a future " - "version"), - FutureWarning, stacklevel=4) + elif isinstance(obj, ABCSeries): + nested_renaming_depr() arg = new_arg @@ -515,11 +520,7 @@ def _aggregate(self, arg, *args, **kwargs): keys = list(compat.iterkeys(arg)) if (isinstance(obj, ABCDataFrame) and len(obj.columns.intersection(keys)) != len(keys)): - warnings.warn( - ("using a dict with renaming " - "is deprecated and will be removed in a future " - "version"), - FutureWarning, stacklevel=4) + nested_renaming_depr() from pandas.tools.concat import concat diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c8c21b0c5fd7d..4565250c78387 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4189,6 +4189,42 @@ def diff(self, periods=1, axis=0): # ---------------------------------------------------------------------- # Function application + def _gotitem(self, key, ndim, subset=None): + """ + sub-classes to define + return a sliced object + + Parameters + ---------- + key : string / list of selections + ndim : 1,2 + requested ndim of result + subset : object, default None + subset to act on + """ + if subset is None: + subset = self + + # TODO: _shallow_copy(subset)? + return self[key] + + @Appender(_shared_docs['aggregate'] % _shared_doc_kwargs) + def aggregate(self, func, axis=0, *args, **kwargs): + axis = self._get_axis_number(axis) + + # TODO: flipped axis + result = None + if axis == 0: + try: + result, how = self._aggregate(func, axis=0, *args, **kwargs) + except TypeError: + pass + if result is None: + return self.apply(func, axis=axis, args=args, **kwargs) + return result + + agg = aggregate + def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds): """ @@ -4244,22 +4280,35 @@ def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None, See also -------- DataFrame.applymap: For elementwise operations + DataFrame.agg: only perform aggregating type operations + DataFrame.transform: only perform transformating type operations Returns ------- applied : Series or DataFrame """ axis = self._get_axis_number(axis) - if kwds or args and not isinstance(func, np.ufunc): + ignore_failures = kwds.pop('ignore_failures', False) + + # dispatch to agg + if axis == 0 and isinstance(func, (list, dict)): + return self.aggregate(func, axis=axis, *args, **kwds) + + if len(self.columns) == 0 and len(self.index) == 0: + return self._apply_empty_result(func, axis, reduce, *args, **kwds) + # if we are a string, try to dispatch + if isinstance(func, compat.string_types): + if axis: + kwds['axis'] = axis + return getattr(self, func)(*args, **kwds) + + if kwds or args and not isinstance(func, np.ufunc): def f(x): return func(x, *args, **kwds) else: f = func - if len(self.columns) == 0 and len(self.index) == 0: - return self._apply_empty_result(func, axis, reduce, *args, **kwds) - if isinstance(f, np.ufunc): with np.errstate(all='ignore'): results = f(self.values) @@ -4276,7 +4325,10 @@ def f(x): else: if reduce is None: reduce = True - return self._apply_standard(f, axis, reduce=reduce) + return self._apply_standard( + f, axis, + reduce=reduce, + ignore_failures=ignore_failures) else: return self._apply_broadcast(f, axis) @@ -5085,7 +5137,13 @@ def f(x): # this can end up with a non-reduction # but not always. if the types are mixed # with datelike then need to make sure a series - result = self.apply(f, reduce=False) + + # we only end up here if we have not specified + # numeric_only and yet we have tried a + # column-by-column reduction, where we have mixed type. + # So let's just do what we can + result = self.apply(f, reduce=False, + ignore_failures=True) if result.ndim == self.ndim: result = result.iloc[0] return result diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 86978a9739ca4..316c9f5e2ccd8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -32,7 +32,7 @@ SettingWithCopyError, SettingWithCopyWarning, AbstractMethodError) -from pandas.core.base import PandasObject +from pandas.core.base import PandasObject, SelectionMixin from pandas.core.index import (Index, MultiIndex, _ensure_index, InvalidIndexError) import pandas.core.indexing as indexing @@ -91,7 +91,7 @@ def _single_replace(self, to_replace, method, inplace, limit): return result -class NDFrame(PandasObject): +class NDFrame(PandasObject, SelectionMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure @@ -459,6 +459,16 @@ def size(self): """number of elements in the NDFrame""" return np.prod(self.shape) + @property + def _selected_obj(self): + """ internal compat with SelectionMixin """ + return self + + @property + def _obj_with_exclusions(self): + """ internal compat with SelectionMixin """ + return self + def _expand_axes(self, key): new_axes = [] for k, ax in zip(key, self.axes): @@ -2853,6 +2863,66 @@ def pipe(self, func, *args, **kwargs): else: return func(self, *args, **kwargs) + _shared_docs['aggregate'] = (""" + Aggregate using input function or dict of {column -> + function} + + .. versionadded:: 0.20.0 + + Parameters + ---------- + func : callable, string, dictionary, or list of string/callables + Function to use for aggregating the data. If a function, must either + work when passed a DataFrame or when passed to DataFrame.apply. If + passed a dict, the keys must be DataFrame column names. + + Accepted Combinations are: + - string function name + - function + - list of functions + - dict of column names -> functions (or list of functions) + + Notes + ----- + Numpy functions mean/median/prod/sum/std/var are special cased so the + default behavior is applying the function along axis=0 + (e.g., np.mean(arr_2d, axis=0)) as opposed to + mimicking the default Numpy behavior (e.g., np.mean(arr_2d)). + + Returns + ------- + aggregated : %(klass)s + + See also + -------- + """) + + _shared_docs['transform'] = (""" + Call function producing a like-indexed %(klass)s + and return a %(klass)s with the transformed values` + + .. versionadded:: 0.20.0 + + Parameters + ---------- + func : callable, string, dictionary, or list of string/callables + To apply to column + + Accepted Combinations are: + - string function name + - function + - list of functions + - dict of column names -> functions (or list of functions) + + Examples + -------- + >>> df.transform(lambda x: (x - x.mean()) / x.std()) + + Returns + ------- + transformed : %(klass)s + """) + # ---------------------------------------------------------------------- # Attribute access @@ -5990,6 +6060,17 @@ def ewm(self, com=None, span=None, halflife=None, alpha=None, cls.ewm = ewm + @Appender(_shared_docs['transform'] % _shared_doc_kwargs) + def transform(self, func, *args, **kwargs): + result = self.agg(func, *args, **kwargs) + if is_scalar(result) or len(result) != len(self): + raise ValueError("transforms cannot produce " + "aggregated results") + + return result + + cls.transform = transform + def _doc_parms(cls): """Return a tuple of the doc parms.""" diff --git a/pandas/core/series.py b/pandas/core/series.py index 5ee3ca73742ae..3305f0b6c439e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2144,6 +2144,49 @@ def map_f(values, f): return self._constructor(new_values, index=self.index).__finalize__(self) + def _gotitem(self, key, ndim, subset=None): + """ + sub-classes to define + return a sliced object + + Parameters + ---------- + key : string / list of selections + ndim : 1,2 + requested ndim of result + subset : object, default None + subset to act on + """ + return self + + @Appender(generic._shared_docs['aggregate'] % _shared_doc_kwargs) + def aggregate(self, func, axis=0, *args, **kwargs): + axis = self._get_axis_number(axis) + result, how = self._aggregate(func, *args, **kwargs) + if result is None: + + # we can be called from an inner function which + # passes this meta-data + kwargs.pop('_axis', None) + kwargs.pop('_level', None) + + # try a regular apply, this evaluates lambdas + # row-by-row; however if the lambda is expected a Series + # expression, e.g.: lambda x: x-x.quantile(0.25) + # this will fail, so we can try a vectorized evaluation + + # we cannot FIRST try the vectorized evaluation, becuase + # then .agg and .apply would have different semantics if the + # operation is actually defined on the Series, e.g. str + try: + result = self.apply(func, *args, **kwargs) + except (ValueError, AttributeError, TypeError): + result = func(self, *args, **kwargs) + + return result + + agg = aggregate + def apply(self, func, convert_dtype=True, args=(), **kwds): """ Invoke function on values of Series. Can be ufunc (a NumPy function @@ -2167,6 +2210,8 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): See also -------- Series.map: For element-wise operations + Series.agg: only perform aggregating type operations + Series.transform: only perform transformating type operations Examples -------- @@ -2244,6 +2289,15 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): return self._constructor(dtype=self.dtype, index=self.index).__finalize__(self) + # dispatch to agg + if isinstance(func, (list, dict)): + return self.aggregate(func, *args, **kwds) + + # if we are a string, try to dispatch + if isinstance(func, compat.string_types): + return self._try_aggregate_string_function(func, *args, **kwds) + + # handle ufuncs and lambdas if kwds or args and not isinstance(func, np.ufunc): f = lambda x: func(x, *args, **kwds) else: @@ -2253,6 +2307,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): if isinstance(f, np.ufunc): return f(self) + # row-wise access if is_extension_type(self.dtype): mapped = self._values.map(f) else: diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index 30fde4b5b78d8..157cd1cdf1b22 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -106,6 +106,17 @@ def test_apply_standard_nonunique(self): rs = df.T.apply(lambda s: s[0], axis=0) assert_series_equal(rs, xp) + def test_with_string_args(self): + + for arg in ['sum', 'mean', 'min', 'max', 'std']: + result = self.frame.apply(arg) + expected = getattr(self.frame, arg)() + tm.assert_series_equal(result, expected) + + result = self.frame.apply(arg, axis=1) + expected = getattr(self.frame, arg)(axis=1) + tm.assert_series_equal(result, expected) + def test_apply_broadcast(self): broadcasted = self.frame.apply(np.mean, broadcast=True) agged = self.frame.apply(np.mean) @@ -455,3 +466,170 @@ def test_apply_non_numpy_dtype(self): df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category') result = df.apply(lambda x: x) assert_frame_equal(result, df) + + +def zip_frames(*frames): + """ + take a list of frames, zip the columns together for each + assume that these all have the first frame columns + + return a new frame + """ + columns = frames[0].columns + zipped = [f[c] for c in columns for f in frames] + return pd.concat(zipped, axis=1) + + +class TestDataFrameAggregate(tm.TestCase, TestData): + + _multiprocess_can_split_ = True + + def test_agg_transform(self): + + with np.errstate(all='ignore'): + + f_sqrt = np.sqrt(self.frame) + f_abs = np.abs(self.frame) + + # ufunc + result = self.frame.transform(np.sqrt) + expected = f_sqrt.copy() + assert_frame_equal(result, expected) + + result = self.frame.apply(np.sqrt) + assert_frame_equal(result, expected) + + result = self.frame.transform(np.sqrt) + assert_frame_equal(result, expected) + + # list-like + result = self.frame.apply([np.sqrt]) + expected = f_sqrt.copy() + expected.columns = pd.MultiIndex.from_product( + [self.frame.columns, ['sqrt']]) + assert_frame_equal(result, expected) + + result = self.frame.transform([np.sqrt]) + assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both + # functions per series and then concatting + expected = zip_frames(f_sqrt, f_abs) + expected.columns = pd.MultiIndex.from_product( + [self.frame.columns, ['sqrt', 'absolute']]) + result = self.frame.apply([np.sqrt, np.abs]) + assert_frame_equal(result, expected) + + result = self.frame.transform(['sqrt', np.abs]) + assert_frame_equal(result, expected) + + def test_transform_and_agg_err(self): + # cannot both transform and agg + def f(): + self.frame.transform(['max', 'min']) + self.assertRaises(ValueError, f) + + def f(): + with np.errstate(all='ignore'): + self.frame.agg(['max', 'sqrt']) + self.assertRaises(ValueError, f) + + def f(): + with np.errstate(all='ignore'): + self.frame.transform(['max', 'sqrt']) + self.assertRaises(ValueError, f) + + df = pd.DataFrame({'A': range(5), 'B': 5}) + + def f(): + with np.errstate(all='ignore'): + df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}) + + def test_demo(self): + # demonstration tests + df = pd.DataFrame({'A': range(5), 'B': 5}) + + result = df.agg(['min', 'max']) + expected = DataFrame({'A': [0, 4], 'B': [5, 5]}, + columns=['A', 'B'], + index=['min', 'max']) + tm.assert_frame_equal(result, expected) + + result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']}) + expected = DataFrame({'A': [4.0, 0.0, np.nan], + 'B': [5.0, np.nan, 25.0]}, + columns=['A', 'B'], + index=['max', 'min', 'sum']) + tm.assert_frame_equal(result.reindex_like(expected), expected) + + def test_agg_dict_nested_renaming_depr(self): + + df = pd.DataFrame({'A': range(5), 'B': 5}) + + # nested renaming + with tm.assert_produces_warning(FutureWarning): + df.agg({'A': {'foo': 'min'}, + 'B': {'bar': 'max'}}) + + def test_agg_reduce(self): + # all reducers + expected = zip_frames(self.frame.mean().to_frame(), + self.frame.max().to_frame(), + self.frame.sum().to_frame()).T + expected.index = ['mean', 'max', 'sum'] + result = self.frame.agg(['mean', 'max', 'sum']) + assert_frame_equal(result, expected) + + # dict input with scalars + result = self.frame.agg({'A': 'mean', 'B': 'sum'}) + expected = Series([self.frame.A.mean(), self.frame.B.sum()], + index=['A', 'B']) + assert_series_equal(result.reindex_like(expected), expected) + + # dict input with lists + result = self.frame.agg({'A': ['mean'], 'B': ['sum']}) + expected = DataFrame({'A': Series([self.frame.A.mean()], + index=['mean']), + 'B': Series([self.frame.B.sum()], + index=['sum'])}) + assert_frame_equal(result.reindex_like(expected), expected) + + # dict input with lists with multiple + result = self.frame.agg({'A': ['mean', 'sum'], + 'B': ['sum', 'max']}) + expected = DataFrame({'A': Series([self.frame.A.mean(), + self.frame.A.sum()], + index=['mean', 'sum']), + 'B': Series([self.frame.B.sum(), + self.frame.B.max()], + index=['sum', 'max'])}) + assert_frame_equal(result.reindex_like(expected), expected) + + def test_nuiscance_columns(self): + + # GH 15015 + df = DataFrame({'A': [1, 2, 3], + 'B': [1., 2., 3.], + 'C': ['foo', 'bar', 'baz'], + 'D': pd.date_range('20130101', periods=3)}) + + result = df.agg('min') + expected = Series([1, 1., 'bar', pd.Timestamp('20130101')], + index=df.columns) + assert_series_equal(result, expected) + + result = df.agg(['min']) + expected = DataFrame([[1, 1., 'bar', pd.Timestamp('20130101')]], + index=['min'], columns=df.columns) + assert_frame_equal(result, expected) + + result = df.agg('sum') + expected = Series([6, 6., 'foobarbaz'], + index=['A', 'B', 'C']) + assert_series_equal(result, expected) + + result = df.agg(['sum']) + expected = DataFrame([[6, 6., 'foobarbaz']], + index=['sum'], columns=['A', 'B', 'C']) + assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index 22d1de99c48be..2abae97b3151b 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -310,12 +310,14 @@ def test_agg_dict_renaming_deprecation(self): 'B': range(5), 'C': range(5)}) - with tm.assert_produces_warning(FutureWarning) as w: + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False) as w: df.groupby('A').agg({'B': {'foo': ['sum', 'max']}, 'C': {'bar': ['count', 'min']}}) assert "using a dict with renaming" in str(w[0].message) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): df.groupby('A')[['B', 'C']].agg({'ma': 'max'}) with tm.assert_produces_warning(FutureWarning) as w: diff --git a/pandas/tests/groupby/test_value_counts.py b/pandas/tests/groupby/test_value_counts.py index 801d0da070112..b70a03ec3a1d3 100644 --- a/pandas/tests/groupby/test_value_counts.py +++ b/pandas/tests/groupby/test_value_counts.py @@ -7,6 +7,7 @@ from pandas import MultiIndex, DataFrame, Series, date_range +@pytest.mark.slow @pytest.mark.parametrize("n,m", product((100, 1000), (5, 20))) def test_series_groupby_value_counts(n, m): np.random.seed(1234) diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 16d1466bb90fe..524167602c249 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -1,13 +1,14 @@ # coding=utf-8 # pylint: disable-msg=E1101,W0612 +from collections import OrderedDict import numpy as np import pandas as pd from pandas import (Index, Series, DataFrame, isnull) from pandas.compat import lrange from pandas import compat -from pandas.util.testing import assert_series_equal +from pandas.util.testing import assert_series_equal, assert_frame_equal import pandas.util.testing as tm from .common import TestData @@ -23,16 +24,11 @@ def test_apply(self): import math assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts)) - # how to handle Series result, #2316 - result = self.ts.apply(lambda x: Series( - [x, x ** 2], index=['x', 'x^2'])) - expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2}) - tm.assert_frame_equal(result, expected) - # empty series s = Series(dtype=object, name='foo', index=pd.Index([], name='bar')) rs = s.apply(lambda x: x) tm.assert_series_equal(s, rs) + # check all metadata (GH 9322) self.assertIsNot(s, rs) self.assertIs(s.index, rs.index) @@ -64,6 +60,13 @@ def test_apply_dont_convert_dtype(self): result = s.apply(f, convert_dtype=False) self.assertEqual(result.dtype, object) + def test_with_string_args(self): + + for arg in ['sum', 'mean', 'min', 'max', 'std']: + result = self.ts.apply(arg) + expected = getattr(self.ts, arg)() + self.assertEqual(result, expected) + def test_apply_args(self): s = Series(['foo,bar']) @@ -136,6 +139,170 @@ def f(x): exp = pd.Series(['Asia/Tokyo'] * 25, name='XX') tm.assert_series_equal(result, exp) + def test_apply_dict_depr(self): + + tsdf = pd.DataFrame(np.random.randn(10, 3), + columns=['A', 'B', 'C'], + index=pd.date_range('1/1/2000', periods=10)) + with tm.assert_produces_warning(FutureWarning): + tsdf.A.agg({'foo': ['sum', 'mean']}) + + +class TestSeriesAggregate(TestData, tm.TestCase): + + _multiprocess_can_split_ = True + + def test_transform(self): + # transforming functions + + with np.errstate(all='ignore'): + + f_sqrt = np.sqrt(self.series) + f_abs = np.abs(self.series) + + # ufunc + result = self.series.transform(np.sqrt) + expected = f_sqrt.copy() + assert_series_equal(result, expected) + + result = self.series.apply(np.sqrt) + assert_series_equal(result, expected) + + # list-like + result = self.series.transform([np.sqrt]) + expected = f_sqrt.to_frame().copy() + expected.columns = ['sqrt'] + assert_frame_equal(result, expected) + + result = self.series.transform([np.sqrt]) + assert_frame_equal(result, expected) + + result = self.series.transform(['sqrt']) + assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both functions per + # series and then concatting + expected = pd.concat([f_sqrt, f_abs], axis=1) + expected.columns = ['sqrt', 'absolute'] + result = self.series.apply([np.sqrt, np.abs]) + assert_frame_equal(result, expected) + + result = self.series.transform(['sqrt', 'abs']) + expected.columns = ['sqrt', 'abs'] + assert_frame_equal(result, expected) + + # dict, provide renaming + expected = pd.concat([f_sqrt, f_abs], axis=1) + expected.columns = ['foo', 'bar'] + expected = expected.unstack().rename('series') + + result = self.series.apply({'foo': np.sqrt, 'bar': np.abs}) + assert_series_equal(result.reindex_like(expected), expected) + + def test_transform_and_agg_error(self): + # we are trying to transform with an aggregator + def f(): + self.series.transform(['min', 'max']) + self.assertRaises(ValueError, f) + + def f(): + with np.errstate(all='ignore'): + self.series.agg(['sqrt', 'max']) + self.assertRaises(ValueError, f) + + def f(): + with np.errstate(all='ignore'): + self.series.transform(['sqrt', 'max']) + self.assertRaises(ValueError, f) + + def f(): + with np.errstate(all='ignore'): + self.series.agg({'foo': np.sqrt, 'bar': 'sum'}) + self.assertRaises(ValueError, f) + + def test_demo(self): + # demonstration tests + s = Series(range(6), dtype='int64', name='series') + + result = s.agg(['min', 'max']) + expected = Series([0, 5], index=['min', 'max'], name='series') + tm.assert_series_equal(result, expected) + + result = s.agg({'foo': 'min'}) + expected = Series([0], index=['foo'], name='series') + tm.assert_series_equal(result, expected) + + # nested renaming + with tm.assert_produces_warning(FutureWarning): + result = s.agg({'foo': ['min', 'max']}) + + expected = DataFrame( + {'foo': [0, 5]}, + index=['min', 'max']).unstack().rename('series') + tm.assert_series_equal(result, expected) + + def test_multiple_aggregators_with_dict_api(self): + + s = Series(range(6), dtype='int64', name='series') + # nested renaming + with tm.assert_produces_warning(FutureWarning): + result = s.agg({'foo': ['min', 'max'], 'bar': ['sum', 'mean']}) + + expected = DataFrame( + {'foo': [5.0, np.nan, 0.0, np.nan], + 'bar': [np.nan, 2.5, np.nan, 15.0]}, + columns=['foo', 'bar'], + index=['max', 'mean', + 'min', 'sum']).unstack().rename('series') + tm.assert_series_equal(result.reindex_like(expected), expected) + + def test_agg_apply_evaluate_lambdas_the_same(self): + # test that we are evaluating row-by-row first + # before vectorized evaluation + result = self.series.apply(lambda x: str(x)) + expected = self.series.agg(lambda x: str(x)) + tm.assert_series_equal(result, expected) + + result = self.series.apply(str) + expected = self.series.agg(str) + tm.assert_series_equal(result, expected) + + def test_with_nested_series(self): + # GH 2316 + # .agg with a reducer and a transform, what to do + result = self.ts.apply(lambda x: Series( + [x, x ** 2], index=['x', 'x^2'])) + expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2}) + tm.assert_frame_equal(result, expected) + + result = self.ts.agg(lambda x: Series( + [x, x ** 2], index=['x', 'x^2'])) + tm.assert_frame_equal(result, expected) + + def test_replicate_describe(self): + # this also tests a result set that is all scalars + expected = self.series.describe() + result = self.series.apply(OrderedDict( + [('count', 'count'), + ('mean', 'mean'), + ('std', 'std'), + ('min', 'min'), + ('25%', lambda x: x.quantile(0.25)), + ('50%', 'median'), + ('75%', lambda x: x.quantile(0.75)), + ('max', 'max')])) + assert_series_equal(result, expected) + + def test_reduce(self): + # reductions with named functions + result = self.series.agg(['sum', 'mean']) + expected = Series([self.series.sum(), + self.series.mean()], + ['sum', 'mean'], + name=self.series.name) + assert_series_equal(result, expected) + class TestSeriesMap(TestData, tm.TestCase): From 614a48e3e6640b5694fd1ed6cbe56a760e89dd50 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Fri, 14 Apr 2017 11:37:19 -0400 Subject: [PATCH 23/56] DOC: whatsnew updates --- doc/source/basics.rst | 15 ++-- doc/source/computation.rst | 2 +- doc/source/timeseries.rst | 2 +- doc/source/whatsnew/v0.20.0.txt | 122 ++++++++++++++++++-------------- 4 files changed, 80 insertions(+), 61 deletions(-) diff --git a/doc/source/basics.rst b/doc/source/basics.rst index ab5d7e69c923b..5789f39266927 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -917,7 +917,8 @@ Aggregating with a dict +++++++++++++++++++++++ Passing a dictionary of column names to a scalar or a list of scalars, to ``DataFame.agg`` -allows you to customize which functions are applied to which columns. +allows you to customize which functions are applied to which columns. Note that the results +are not in any particular order, you can use an ``OrderedDict`` instead to guarantee ordering. .. ipython:: python @@ -977,9 +978,9 @@ Transform API .. versionadded:: 0.20.0 -The :method:`~DataFrame.transform` method returns an object that is indexed the same (same size) +The :meth:`~DataFrame.transform` method returns an object that is indexed the same (same size) as the original. This API allows you to provide *multiple* operations at the same -time rather than one-by-one. Its api is quite similar to the ``.agg`` API. +time rather than one-by-one. Its API is quite similar to the ``.agg`` API. Use a similar frame to the above sections. @@ -990,8 +991,8 @@ Use a similar frame to the above sections. tsdf.iloc[3:7] = np.nan tsdf -Transform the entire frame. Transform allows functions to input as a numpy function, string -function name and user defined function. +Transform the entire frame. ``.transform()`` allows input functions as: a numpy function, a string +function name or a user defined function. .. ipython:: python @@ -999,13 +1000,13 @@ function name and user defined function. tsdf.transform('abs') tsdf.transform(lambda x: x.abs()) -Since this is a single function, this is equivalent to a ufunc application +Here ``.transform()`` received a single function; this is equivalent to a ufunc application .. ipython:: python np.abs(tsdf) -Passing a single function to ``.transform()`` with a Series will yield a single Series in return. +Passing a single function to ``.transform()`` with a ``Series`` will yield a single ``Series`` in return. .. ipython:: python diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 8c75d4355c99a..76a030d355e33 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -618,7 +618,7 @@ Aggregation Once the ``Rolling``, ``Expanding`` or ``EWM`` objects have been created, several methods are available to perform multiple computations on the data. These operations are similar to the :ref:`aggregating API `, -:ref:`groupby aggregates `, and :ref:`resample API `. +:ref:`groupby API `, and :ref:`resample API `. .. ipython:: python diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 6a4ea2d5319ab..71d85f9b3995b 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1524,7 +1524,7 @@ We can instead only resample those groups where we have points as follows: Aggregation ~~~~~~~~~~~ -Similar to the :ref:`aggregating API `, :ref:`groupby aggregates API `, and the :ref:`window functions API `, +Similar to the :ref:`aggregating API `, :ref:`groupby API `, and the :ref:`window functions API `, a ``Resampler`` can be selectively resampled. Resampling a ``DataFrame``, the default will be to act on all columns with the same function. diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index da32de750e7de..133757b131312 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1,9 +1,9 @@ .. _whatsnew_0200: -v0.20.0 (April ??, 2017) +v0.20.0 (May 12, 2017) ------------------------ -This is a major release from 0.19 and includes a small number of API changes, several new features, +This is a major release from 0.19.2 and includes a small number of API changes, deprecations, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all users upgrade to this version. @@ -13,11 +13,11 @@ Highlights include: - Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here `. - The ``.ix`` indexer has been deprecated, see :ref:`here ` - ``Panel`` has been deprecated, see :ref:`here ` -- Improved user API when accessing levels in ``.groupby()``, see :ref:`here ` -- Improved support for UInt64 dtypes, see :ref:`here ` - Addition of an ``IntervalIndex`` and ``Interval`` scalar type, see :ref:`here ` -- A new orient for JSON serialization, ``orient='table'``, that uses the Table Schema spec, see :ref:`here ` -- Window Binary Corr/Cov operations return a MultiIndexed ``DataFrame`` rather than a ``Panel``, as ``Panel`` is now deprecated, see :ref:`here ` +- Improved user API when accessing levels in ``.groupby()``, see :ref:`here ` +- Improved support for ``UInt64`` dtypes, see :ref:`here ` +- A new orient for JSON serialization, ``orient='table'``, that uses the :ref:`Table Schema spec ` +- Window Binary Corr/Cov operations now return a MultiIndexed ``DataFrame`` rather than a ``Panel``, as ``Panel`` is now deprecated, see :ref:`here ` - Support for S3 handling now uses ``s3fs``, see :ref:`here ` - Google BigQuery support now uses the ``pandas-gbq`` library, see :ref:`here ` - Switched the test framework to use `pytest `__ (:issue:`13097`) @@ -42,7 +42,7 @@ New features Series & DataFrame have been enhanced to support the aggregation API. This is an already familiar API that is supported for groupby, window operations, and resampling. This allows one to express, possibly multiple aggregation operations, in a single concise way by using :meth:`~DataFrame.agg`, -and :meth:`~DataFrame.transform`. The full documentation is :ref:`here `` (:issue:`1623`) +and :meth:`~DataFrame.transform`. The full documentation is :ref:`here ` (:issue:`1623`) Here is a sample @@ -67,7 +67,7 @@ Multiple functions in lists. df.agg(['sum', 'min']) -Dictionaries to provide the ability to provide selective aggregation per column. +Using a dict provides the ability to have selective aggregation per column. You will get a matrix-like output of all of the aggregators. The output will consist of all unique functions. Those that are not noted for a particular column will be ``NaN``: @@ -129,7 +129,7 @@ fixed-width text files, and :func:`read_excel` for parsing Excel files. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :func:`to_datetime` has gained a new parameter, ``origin``, to define a reference date -from where to compute the resulting ``DatetimeIndex``. (:issue:`11276`, :issue:`11745`) +from where to compute the resulting ``DatetimeIndex`` when ``unit`` is specified. (:issue:`11276`, :issue:`11745`) Start with 1960-01-01 as the starting date @@ -138,7 +138,7 @@ Start with 1960-01-01 as the starting date pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01')) The default is set at ``origin='unix'``, which defaults to ``1970-01-01 00:00:00``. -Commonly called 'unix epoch' or POSIX time. +Commonly called 'unix epoch' or POSIX time. This was the previous default, so this is a backward compatible change. .. ipython:: python @@ -149,7 +149,7 @@ Commonly called 'unix epoch' or POSIX time. pandas errors ^^^^^^^^^^^^^ -We are adding a standard public location for all pandas exceptions & warnings ``pandas.errors``. (:issue:`14800`). Previously +We are adding a standard public module for all pandas exceptions & warnings ``pandas.errors``. (:issue:`14800`). Previously these exceptions & warnings could be imported from ``pandas.core.common`` or ``pandas.io.common``. These exceptions and warnings will be removed from the ``*.common`` locations in a future release. (:issue:`15541`) @@ -243,7 +243,7 @@ Inferring compression type from the extension rt = pd.read_pickle("data.pkl.xz", compression="infer") rt -The default is to 'infer +The default is to ``infer``: .. ipython:: python @@ -348,7 +348,7 @@ protocol). This gives frontends like the Jupyter notebook and `nteract`_ more flexiblity in how they display pandas objects, since they have more information about the data. -You must enable this by setting the ``display.html.table_schema`` option to True. +You must enable this by setting the ``display.html.table_schema`` option to ``True``. .. _Table Schema: http://specs.frictionlessdata.io/json-table-schema/ .. _nteract: http://nteract.io/ @@ -385,21 +385,24 @@ IntervalIndex ^^^^^^^^^^^^^ pandas has gained an ``IntervalIndex`` with its own dtype, ``interval`` as well as the ``Interval`` scalar type. These allow first-class support for interval -notation, specifically as a return type for the categories in ``pd.cut`` and ``pd.qcut``. The ``IntervalIndex`` allows some unique indexing, see the +notation, specifically as a return type for the categories in :func:`cut` and :func:`qcut`. The ``IntervalIndex`` allows some unique indexing, see the :ref:`docs `. (:issue:`7640`, :issue:`8625`) Previous behavior: +The returned categories were strings, representing Intervals + .. code-block:: ipython - In [2]: pd.cut(range(3), 2) + In [1]: c = pd.cut(range(4), bins=2) + + In [2]: c Out[2]: - [(-0.002, 1], (-0.002, 1], (1, 2]] - Categories (2, object): [(-0.002, 1] < (1, 2]] + [(-0.003, 1.5], (-0.003, 1.5], (1.5, 3], (1.5, 3]] + Categories (2, object): [(-0.003, 1.5] < (1.5, 3]] - # the returned categories are strings, representing Intervals - In [3]: pd.cut(range(3), 2).categories - Out[3]: Index(['(-0.002, 1]', '(1, 2]'], dtype='object') + In [3]: c.categories + Out[3]: Index(['(-0.003, 1.5]', '(1.5, 3]'], dtype='object') New behavior: @@ -409,28 +412,29 @@ New behavior: c c.categories -Furthermore, this allows one to bin *other* data with these same bins. ``NaN`` represents a missing +Furthermore, this allows one to bin *other* data with these same bins, with ``NaN`` represents a missing value similar to other dtypes. .. ipython:: python - pd.cut([0, 3, 1, 1], bins=c.categories) + pd.cut([0, 3, 5, 1], bins=c.categories) -These can also used in ``Series`` and ``DataFrame``, and indexed. +An ``IntervalIndex`` can also be used in ``Series`` and ``DataFrame`` as the index. .. ipython:: python df = pd.DataFrame({'A': range(4), 'B': pd.cut([0, 3, 1, 1], bins=c.categories)} ).set_index('B') + df -Selecting a specific interval +Selecting via a specific interval: .. ipython:: python df.loc[pd.Interval(1.5, 3.0)] -Selecting via a scalar value that is contained in the intervals. +Selecting via a scalar value that is contained *in* the intervals. .. ipython:: python @@ -454,7 +458,7 @@ Other Enhancements - ``DataFrame.groupby()`` has gained a ``.nunique()`` method to count the distinct values for all columns within each group (:issue:`14336`, :issue:`15197`). - ``pd.read_excel()`` now preserves sheet order when using ``sheetname=None`` (:issue:`9930`) -- Multiple offset aliases with decimal points are now supported (e.g. '0.5min' is parsed as '30s') (:issue:`8419`) +- Multiple offset aliases with decimal points are now supported (e.g. ``0.5min`` is parsed as ``30s``) (:issue:`8419`) - ``.isnull()`` and ``.notnull()`` have been added to ``Index`` object to make them more consistent with the ``Series`` API (:issue:`15300`) - New ``UnsortedIndexError`` (subclass of ``KeyError``) raised when indexing/slicing into an @@ -467,11 +471,11 @@ Other Enhancements - The ``usecols`` argument in ``pd.read_csv()`` now accepts a callable function as a value (:issue:`14154`) - The ``skiprows`` argument in ``pd.read_csv()`` now accepts a callable function as a value (:issue:`10882`) - The ``nrows`` and ``chunksize`` arguments in ``pd.read_csv()`` are supported if both are passed (:issue:`6774`, :issue:`15755`) -- ``pd.DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`) -- ``pd.DataFrame.plot`` can pass `matplotlib 2.0 default color cycle as a single string as color parameter `__. (:issue:`15516`) -- ``pd.Series.interpolate`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`) +- ``DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`) +- ``DataFrame.plot`` can pass the matplotlib 2.0 default color cycle as a single string as color parameter, see `here `__. (:issue:`15516`) +- ``Series.interpolate()`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`) - ``Timedelta.isoformat`` method added for formatting Timedeltas as an `ISO 8601 duration`_. See the :ref:`Timedelta docs ` (:issue:`15136`) -- ``.select_dtypes()`` now allows the string 'datetimetz' to generically select datetimes with tz (:issue:`14910`) +- ``.select_dtypes()`` now allows the string ``datetimetz`` to generically select datetimes with tz (:issue:`14910`) - The ``.to_latex()`` method will now accept ``multicolumn`` and ``multirow`` arguments to use the accompanying LaTeX enhancements - ``pd.merge_asof()`` gained the option ``direction='backward'|'forward'|'nearest'`` (:issue:`14887`) @@ -483,16 +487,16 @@ Other Enhancements - ``pd.read_html()`` will parse multiple header rows, creating a multiindex header. (:issue:`13434`). - HTML table output skips ``colspan`` or ``rowspan`` attribute if equal to 1. (:issue:`15403`) -- ``pd.TimedeltaIndex`` now has a custom datetick formatter specifically designed for nanosecond level precision (:issue:`8711`) +- ``TimedeltaIndex`` now has a custom datetick formatter specifically designed for nanosecond level precision (:issue:`8711`) - ``pd.types.concat.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs ` for more information. -- ``pd.DataFrame.to_latex`` and ``pd.DataFrame.to_string`` now allow optional header aliases. (:issue:`15536`) -- Re-enable the ``parse_dates`` keyword of ``read_excel`` to parse string columns as dates (:issue:`14326`) +- ``DataFrame.to_latex()`` and ``DataFrame.to_string()`` now allow optional header aliases. (:issue:`15536`) +- Re-enable the ``parse_dates`` keyword of ``pd.read_excel()`` to parse string columns as dates (:issue:`14326`) - Added ``.empty`` property to subclasses of ``Index``. (:issue:`15270`) - Enabled floor division for ``Timedelta`` and ``TimedeltaIndex`` (:issue:`15828`) - ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`) - ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`) - ``pandas.io.json.json_normalize()`` has gained a ``sep`` option that accepts ``str`` to separate joined fields; the default is ".", which is backward compatible. (:issue:`14883`) -- :func:`MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels `. (:issue:`15694`) +- :method:`~MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels `. (:issue:`15694`) - ``pd.read_csv()`` will now raise a ``ParserError`` error whenever any parsing error occurs (:issue:`15913`, :issue:`15925`) - ``pd.read_csv()`` now supports the ``error_bad_lines`` and ``warn_bad_lines`` arguments for the Python parser (:issue:`15925`) - ``parallel_coordinates()`` has gained a ``sort_labels`` keyword arg that sorts class labels and the colours assigned to them (:issue:`15908`) @@ -592,10 +596,10 @@ list, and a dict of column names to scalars or lists. This provides a useful syn However, ``.agg(..)`` can *also* accept a dict that allows 'renaming' of the result columns. This is a complicated and confusing syntax, as well as not consistent between ``Series`` and ``DataFrame``. We are deprecating this 'renaming' functionaility. -1) We are deprecating passing a dict to a grouped/rolled/resampled ``Series``. This allowed +- We are deprecating passing a dict to a grouped/rolled/resampled ``Series``. This allowed one to ``rename`` the resulting aggregation, but this had a completely different meaning than passing a dictionary to a grouped ``DataFrame``, which accepts column-to-aggregations. -2) We are deprecating passing a dict-of-dicts to a grouped/rolled/resampled ``DataFrame`` in a similar manner. +- We are deprecating passing a dict-of-dicts to a grouped/rolled/resampled ``DataFrame`` in a similar manner. This is an illustrative example: @@ -607,14 +611,14 @@ This is an illustrative example: df Here is a typical useful syntax for computing different aggregations for different columns. This -is a natural (and useful) syntax. We aggregate from the dict-to-list by taking the specified +is a natural, and useful syntax. We aggregate from the dict-to-list by taking the specified columns and applying the list of functions. This returns a ``MultiIndex`` for the columns. .. ipython:: python df.groupby('A').agg({'B': 'sum', 'C': 'min'}) -Here's an example of the first deprecation (1), passing a dict to a grouped ``Series``. This +Here's an example of the first deprecation, passing a dict to a grouped ``Series``. This is a combination aggregation & renaming: .. code-block:: ipython @@ -633,17 +637,18 @@ You can accomplish the same operation, more idiomatically by: .. ipython:: python - df.groupby('A').B.agg(['count']).rename({'count': 'foo'}) + df.groupby('A').B.agg(['count']).rename(columns={'count': 'foo'}) -Here's an example of the second deprecation (2), passing a dict-of-dict to a grouped ``DataFrame``: +Here's an example of the second deprecation, passing a dict-of-dict to a grouped ``DataFrame``: .. code-block:: python In [23]: (df.groupby('A') .agg({'B': {'foo': 'sum'}, 'C': {'bar': 'min'}}) ) - FutureWarning: using a dict with renaming is deprecated and will be removed in a future version + FutureWarning: using a dict with renaming is deprecated and + will be removed in a future version Out[23]: B C @@ -805,7 +810,7 @@ ndarray, you can always convert explicitly using ``np.asarray(idx.hour)``. pd.unique will now be consistent with extension types ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In prior versions, using ``Series.unique()`` and ``pd.unique(Series)`` on ``Categorical`` and tz-aware +In prior versions, using ``Series.unique()`` and :func:`unique` on ``Categorical`` and tz-aware datatypes would yield different return types. These are now made consistent. (:issue:`15903`) - Datetime tz-aware @@ -884,7 +889,7 @@ in prior versions of pandas. (:issue:`11915`). Partial String Indexing Changes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:ref:`DatetimeIndex Partial String Indexing ` now works as exact match, provided that string resolution coincides with index resolution, including a case when both are seconds (:issue:`14826`). See :ref:`Slice vs. Exact Match ` for details. +:ref:`DatetimeIndex Partial String Indexing ` now works as an exact match, provided that string resolution coincides with index resolution, including a case when both are seconds (:issue:`14826`). See :ref:`Slice vs. Exact Match ` for details. .. ipython:: python @@ -1031,7 +1036,7 @@ DataFrame.sort_index changes In certain cases, calling ``.sort_index()`` on a MultiIndexed DataFrame would return the *same* DataFrame without seeming to sort. This would happen with a ``lexsorted``, but non-monotonic levels. (:issue:`15622`, :issue:`15687`, :issue:`14015`, :issue:`13431`) -This is UNCHANGED between versions, but showing for illustration purposes: +This is *unchanged* from prior versions, but shown for illustration purposes: .. ipython:: python @@ -1196,21 +1201,28 @@ HDFStore where string comparison ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In previous versions most types could be compared to string column in a ``HDFStore`` -usually resulting in an invalid comparsion. These comparisions will now raise a +usually resulting in an invalid comparsion, returning an empty result frame. These comparisions will now raise a ``TypeError`` (:issue:`15492`) -New Behavior: +.. ipython:: python + + df = pd.DataFrame({'unparsed_date': ['2014-01-01', '2014-01-01']}) + df.to_hdf('store.h5', 'key', format='table', data_columns=True) + df.dtypes + +Previous Behavior: .. code-block:: ipython - In [15]: df = pd.DataFrame({'unparsed_date': ['2014-01-01', '2014-01-01']}) + In [4]: pd.read_hdf('store.h5', 'key', where='unparsed_date > ts') + File "", line 1 + (unparsed_date > 1970-01-01 00:00:01.388552400) + ^ + SyntaxError: invalid token - In [16]: df.dtypes - Out[16]: - unparsed_date object - dtype: object +New Behavior: - In [17]: df.to_hdf('store.h5', 'key', format='table', data_columns=True) +.. code-block:: ipython In [18]: ts = pd.Timestamp('2014-01-01') @@ -1218,6 +1230,12 @@ New Behavior: TypeError: Cannot compare 2014-01-01 00:00:00 of type to string column +.. ipython:: python + :suppress: + + import os + os.remove('store.h5') + .. _whatsnew_0200.api_breaking.index_order: Index.intersection and inner join now preserve the order of the left Index From 2d9451ddc36c194f1d65dab7568901c9ea998002 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Fri, 14 Apr 2017 19:55:19 +0000 Subject: [PATCH 24/56] DEPR: more deprecation warnings (#16001) --- pandas/tests/io/test_pytables.py | 19 +++++++++++-------- pandas/tests/io/test_sql.py | 8 +++++--- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 9908a320a6646..2df8872e23616 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -150,20 +150,23 @@ class TestHDFStore(Base, tm.TestCase): def test_factory_fun(self): path = create_tempfile(self.path) try: - with get_store(path) as tbl: - raise ValueError('blah') + with catch_warnings(record=True): + with get_store(path) as tbl: + raise ValueError('blah') except ValueError: pass finally: safe_remove(path) try: - with get_store(path) as tbl: - tbl['a'] = tm.makeDataFrame() + with catch_warnings(record=True): + with get_store(path) as tbl: + tbl['a'] = tm.makeDataFrame() - with get_store(path) as tbl: - self.assertEqual(len(tbl), 1) - self.assertEqual(type(tbl['a']), DataFrame) + with catch_warnings(record=True): + with get_store(path) as tbl: + self.assertEqual(len(tbl), 1) + self.assertEqual(type(tbl['a']), DataFrame) finally: safe_remove(self.path) @@ -348,7 +351,7 @@ def test_api_default_format(self): pandas.set_option('io.hdf.default_format', 'fixed') df.to_hdf(path, 'df') - with get_store(path) as store: + with HDFStore(path) as store: self.assertFalse(store.get_storer('df').is_table) self.assertRaises(ValueError, df.to_hdf, path, 'df2', append=True) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 5318e8532c58e..b4e8d6a3b972c 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -18,6 +18,7 @@ """ from __future__ import print_function +from warnings import catch_warnings import pytest import unittest import sqlite3 @@ -586,9 +587,10 @@ def test_to_sql_series(self): tm.assert_frame_equal(s.to_frame(), s2) def test_to_sql_panel(self): - panel = tm.makePanel() - self.assertRaises(NotImplementedError, sql.to_sql, panel, - 'test_panel', self.conn) + with catch_warnings(record=True): + panel = tm.makePanel() + self.assertRaises(NotImplementedError, sql.to_sql, panel, + 'test_panel', self.conn) def test_roundtrip(self): sql.to_sql(self.test_frame1, 'test_frame_roundtrip', From 85a9f8c2d5218c92a042bc7fdafe619758bfe31c Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Fri, 14 Apr 2017 21:31:31 +0000 Subject: [PATCH 25/56] TST: test addl feather dtypes (#16004) --- pandas/tests/io/test_feather.py | 38 +++++++++++++++++---------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index 6e2c28a0f68de..3fad2637ef057 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -8,23 +8,18 @@ from pandas.io.feather_format import to_feather, read_feather from feather import FeatherError -import pandas.util.testing as tm from pandas.util.testing import assert_frame_equal, ensure_clean -class TestFeather(tm.TestCase): - - def setUp(self): - pass +class TestFeather(object): def check_error_on_write(self, df, exc): # check that we are raising the exception # on writing - def f(): + with pytest.raises(exc): with ensure_clean() as path: to_feather(df, path) - self.assertRaises(exc, f) def check_round_trip(self, df): @@ -41,17 +36,21 @@ def test_error(self): def test_basic(self): - df = pd.DataFrame({'a': list('abc'), - 'b': list(range(1, 4)), - 'c': np.arange(3, 6).astype('u1'), - 'd': np.arange(4.0, 7.0, dtype='float64'), - 'e': [True, False, True], - 'f': pd.Categorical(list('abc')), - 'g': pd.date_range('20130101', periods=3), - 'h': pd.date_range('20130101', periods=3, - tz='US/Eastern'), - 'i': pd.date_range('20130101', periods=3, - freq='ns')}) + df = pd.DataFrame({'string': list('abc'), + 'int': list(range(1, 4)), + 'uint': np.arange(3, 6).astype('u1'), + 'float': np.arange(4.0, 7.0, dtype='float64'), + 'float_with_null': [1., np.nan, 3], + 'bool': [True, False, True], + 'bool_with_null': [True, np.nan, False], + 'cat': pd.Categorical(list('abc')), + 'dt': pd.date_range('20130101', periods=3), + 'dttz': pd.date_range('20130101', periods=3, + tz='US/Eastern'), + 'dt_with_null': [pd.Timestamp('20130101'), pd.NaT, + pd.Timestamp('20130103')], + 'dtns': pd.date_range('20130101', periods=3, + freq='ns')}) self.check_round_trip(df) @@ -80,6 +79,9 @@ def test_unsupported(self): df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)}) self.check_error_on_write(df, ValueError) + df = pd.DataFrame({'a': pd.timedelta_range('1 day', periods=3)}) + self.check_error_on_write(df, FeatherError) + # non-strings df = pd.DataFrame({'a': ['a', 1, 2.0]}) self.check_error_on_write(df, ValueError) From 9c56098a5fdf2ce1bec989671a077722e64d647f Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Sat, 15 Apr 2017 02:02:51 +0000 Subject: [PATCH 26/56] TST: 32bit compat for interval get_indexer (#16006) --- pandas/tests/indexes/test_interval.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index 25ca961895ca3..79b6ff2e7a2a7 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -413,24 +413,24 @@ def test_get_loc_interval(self): def test_get_indexer(self): actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3]) - expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='int64') + expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp') self.assert_numpy_array_equal(actual, expected) actual = self.index.get_indexer(self.index) - expected = np.array([0, 1], dtype='int64') + expected = np.array([0, 1], dtype='intp') self.assert_numpy_array_equal(actual, expected) index = IntervalIndex.from_breaks([0, 1, 2], closed='left') actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3]) - expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='int64') + expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp') self.assert_numpy_array_equal(actual, expected) actual = self.index.get_indexer(index[:1]) - expected = np.array([0], dtype='int64') + expected = np.array([0], dtype='intp') self.assert_numpy_array_equal(actual, expected) actual = self.index.get_indexer(index) - expected = np.array([-1, 1], dtype='int64') + expected = np.array([-1, 1], dtype='intp') self.assert_numpy_array_equal(actual, expected) def test_get_indexer_subintervals(self): @@ -439,21 +439,21 @@ def test_get_indexer_subintervals(self): # return indexers for wholly contained subintervals target = IntervalIndex.from_breaks(np.linspace(0, 2, 5)) actual = self.index.get_indexer(target) - expected = np.array([0, 0, 1, 1], dtype='int64') + expected = np.array([0, 0, 1, 1], dtype='p') self.assert_numpy_array_equal(actual, expected) target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2]) actual = self.index.get_indexer(target) - expected = np.array([0, 0, 1, 1], dtype='int64') + expected = np.array([0, 0, 1, 1], dtype='intp') self.assert_numpy_array_equal(actual, expected) actual = self.index.get_indexer(target[[0, -1]]) - expected = np.array([0, 1], dtype='int64') + expected = np.array([0, 1], dtype='intp') self.assert_numpy_array_equal(actual, expected) target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left') actual = self.index.get_indexer(target) - expected = np.array([0, 0, 0], dtype='int64') + expected = np.array([0, 0, 0], dtype='intp') self.assert_numpy_array_equal(actual, expected) def test_contains(self): @@ -505,7 +505,7 @@ def test_non_contiguous(self): index = IntervalIndex.from_tuples([(0, 1), (2, 3)]) target = [0.5, 1.5, 2.5] actual = index.get_indexer(target) - expected = np.array([0, -1, 1], dtype='int64') + expected = np.array([0, -1, 1], dtype='intp') self.assert_numpy_array_equal(actual, expected) self.assertNotIn(1.5, index) @@ -655,7 +655,7 @@ def test_datetime(self): target = pd.date_range('1999-12-31T12:00', periods=7, freq='12H') actual = idx.get_indexer(target) - expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='int64') + expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp') self.assert_numpy_array_equal(actual, expected) def test_append(self): @@ -779,9 +779,9 @@ def test_get_loc_closed(self): np.array([0], dtype='int64')) def test_get_indexer_closed(self): - x = np.arange(1000, dtype='int64') + x = np.arange(1000, dtype='intp') found = x - not_found = (-1 * np.ones(1000)).astype('int64') + not_found = (-1 * np.ones(1000)).astype('intp') for leaf_size in [1, 10, 100, 10000]: for closed in ['left', 'right', 'both', 'neither']: tree = IntervalTree(x, x + 0.5, closed=closed, From 61d84dbf161f72081165d170a36978d2d942e19d Mon Sep 17 00:00:00 2001 From: Daniel Himmelstein Date: Sat, 15 Apr 2017 09:46:43 -0400 Subject: [PATCH 27/56] Support dicts with default values in series.map (#16002) * series.map: support dicts with defaults closes #15999 --- doc/source/whatsnew/v0.20.0.txt | 4 +-- pandas/core/series.py | 41 ++++++++++++++++++++++++------ pandas/tests/series/test_apply.py | 42 ++++++++++++++++++++++++++++++- 3 files changed, 75 insertions(+), 12 deletions(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 133757b131312..089c4f59445e3 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -449,10 +449,7 @@ Other Enhancements - Integration with the ``feather-format``, including a new top-level ``pd.read_feather()`` and ``DataFrame.to_feather()`` method, see :ref:`here `. - ``Series.str.replace()`` now accepts a callable, as replacement, which is passed to ``re.sub`` (:issue:`15055`) - ``Series.str.replace()`` now accepts a compiled regular expression as a pattern (:issue:`15446`) - - - ``Series.sort_index`` accepts parameters ``kind`` and ``na_position`` (:issue:`13589`, :issue:`14444`) - - ``DataFrame`` has gained a ``nunique()`` method to count the distinct values over an axis (:issue:`14336`). - ``DataFrame`` has gained a ``melt()`` method, equivalent to ``pd.melt()``, for unpivoting from a wide to long format (:issue:`12640`). - ``DataFrame.groupby()`` has gained a ``.nunique()`` method to count the distinct values for all columns within each group (:issue:`14336`, :issue:`15197`). @@ -1302,6 +1299,7 @@ Other API Changes - ``CParserError`` has been renamed to ``ParserError`` in ``pd.read_csv()`` and will be removed in the future (:issue:`12665`) - ``SparseArray.cumsum()`` and ``SparseSeries.cumsum()`` will now always return ``SparseArray`` and ``SparseSeries`` respectively (:issue:`12855`) - ``DataFrame.applymap()`` with an empty ``DataFrame`` will return a copy of the empty ``DataFrame`` instead of a ``Series`` (:issue:`8222`) +- ``Series.map()`` now respects default values of dictionary subclasses with a ``__missing__`` method, such as ``collections.Counter`` (:issue:`15999`) - ``.loc`` has compat with ``.ix`` for accepting iterators, and NamedTuples (:issue:`15120`) - ``interpolate()`` and ``fillna()`` will raise a ``ValueError`` if the ``limit`` keyword argument is not greater than 0. (:issue:`9217`) - ``pd.read_csv()`` will now issue a ``ParserWarning`` whenever there are conflicting values provided by the ``dialect`` parameter and the user (:issue:`14898`) diff --git a/pandas/core/series.py b/pandas/core/series.py index 3305f0b6c439e..7f8a97af99490 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2079,8 +2079,8 @@ def map(self, arg, na_action=None): two bar three baz - Mapping a dictionary keys on the index labels works similar as - with a `Series`: + If `arg` is a dictionary, return a new Series with values converted + according to the dictionary's mapping: >>> z = {1: 'A', 2: 'B', 3: 'C'} @@ -2094,16 +2094,14 @@ def map(self, arg, na_action=None): >>> s = pd.Series([1, 2, 3, np.nan]) - >>> s2 = s.map(lambda x: 'this is a string {}'.format(x), - na_action=None) + >>> s2 = s.map('this is a string {}'.format, na_action=None) 0 this is a string 1.0 1 this is a string 2.0 2 this is a string 3.0 3 this is a string nan dtype: object - >>> s3 = s.map(lambda x: 'this is a string {}'.format(x), - na_action='ignore') + >>> s3 = s.map('this is a string {}'.format, na_action='ignore') 0 this is a string 1.0 1 this is a string 2.0 2 this is a string 3.0 @@ -2115,6 +2113,23 @@ def map(self, arg, na_action=None): Series.apply: For applying more complex functions on a Series DataFrame.apply: Apply a function row-/column-wise DataFrame.applymap: Apply a function elementwise on a whole DataFrame + + Notes + ----- + When `arg` is a dictionary, values in Series that are not in the + dictionary (as keys) are converted to ``NaN``. However, if the + dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. + provides a method for default values), then this default is used + rather than ``NaN``: + + >>> from collections import Counter + >>> counter = Counter() + >>> counter['bar'] += 1 + >>> y.map(counter) + 1 0 + 2 1 + 3 0 + dtype: int64 """ if is_extension_type(self.dtype): @@ -2132,13 +2147,23 @@ def map_f(values, f): else: map_f = lib.map_infer - if isinstance(arg, (dict, Series)): - if isinstance(arg, dict): + if isinstance(arg, dict): + if hasattr(arg, '__missing__'): + # If a dictionary subclass defines a default value method, + # convert arg to a lookup function (GH #15999). + dict_with_default = arg + arg = lambda x: dict_with_default[x] + else: + # Dictionary does not have a default. Thus it's safe to + # convert to an indexed series for efficiency. arg = self._constructor(arg, index=arg.keys()) + if isinstance(arg, Series): + # arg is a Series indexer = arg.index.get_indexer(values) new_values = algorithms.take_1d(arg._values, indexer) else: + # arg is a function new_values = map_f(values, arg) return self._constructor(new_values, diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 524167602c249..a4a49e3aeb826 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -1,7 +1,7 @@ # coding=utf-8 # pylint: disable-msg=E1101,W0612 -from collections import OrderedDict +from collections import Counter, defaultdict, OrderedDict import numpy as np import pandas as pd @@ -411,6 +411,46 @@ def test_map_dict_with_tuple_keys(self): tm.assert_series_equal(df['labels'], df['expected_labels'], check_names=False) + def test_map_counter(self): + s = Series(['a', 'b', 'c'], index=[1, 2, 3]) + counter = Counter() + counter['b'] = 5 + counter['c'] += 1 + result = s.map(counter) + expected = Series([0, 5, 1], index=[1, 2, 3]) + assert_series_equal(result, expected) + + def test_map_defaultdict(self): + s = Series([1, 2, 3], index=['a', 'b', 'c']) + default_dict = defaultdict(lambda: 'blank') + default_dict[1] = 'stuff' + result = s.map(default_dict) + expected = Series(['stuff', 'blank', 'blank'], index=['a', 'b', 'c']) + assert_series_equal(result, expected) + + def test_map_dict_subclass_with_missing(self): + """ + Test Series.map with a dictionary subclass that defines __missing__, + i.e. sets a default value (GH #15999). + """ + class DictWithMissing(dict): + def __missing__(self, key): + return 'missing' + s = Series([1, 2, 3]) + dictionary = DictWithMissing({3: 'three'}) + result = s.map(dictionary) + expected = Series(['missing', 'missing', 'three']) + assert_series_equal(result, expected) + + def test_map_dict_subclass_without_missing(self): + class DictWithoutMissing(dict): + pass + s = Series([1, 2, 3]) + dictionary = DictWithoutMissing({3: 'three'}) + result = s.map(dictionary) + expected = Series([np.nan, np.nan, 'three']) + assert_series_equal(result, expected) + def test_map_box(self): vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')] s = pd.Series(vals) From 413e2c64d4cfa17c331052e8d0a2b78551cdb53e Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Sat, 15 Apr 2017 08:59:00 -0500 Subject: [PATCH 28/56] ENH: Style blocks (#15954) --- .gitignore | 1 + MANIFEST.in | 1 + ci/requirements-3.5_DOC.run | 1 + doc/source/style.ipynb | 286 ++++++++++++++++++----------- doc/source/template_structure.html | 60 ++++++ doc/source/whatsnew/v0.20.0.txt | 3 + pandas/formats/style.py | 122 ++++++------ pandas/formats/templates/html.tpl | 70 +++++++ pandas/io/api.py | 17 ++ pandas/tests/api/test_api.py | 3 +- pandas/tests/formats/test_style.py | 34 +++- pandas/util/importing.py | 10 + setup.py | 3 +- 13 files changed, 447 insertions(+), 164 deletions(-) create mode 100644 doc/source/template_structure.html create mode 100644 pandas/formats/templates/html.tpl create mode 100644 pandas/util/importing.py diff --git a/.gitignore b/.gitignore index a509fcf736ea8..c953020f59342 100644 --- a/.gitignore +++ b/.gitignore @@ -103,3 +103,4 @@ doc/source/index.rst doc/build/html/index.html # Windows specific leftover: doc/tmp.sv +doc/source/templates/ diff --git a/MANIFEST.in b/MANIFEST.in index b7a7e6039ac9a..31de3466cb357 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -25,3 +25,4 @@ global-exclude *.png # recursive-include LICENSES * include versioneer.py include pandas/_version.py +include pandas/formats/templates/*.tpl diff --git a/ci/requirements-3.5_DOC.run b/ci/requirements-3.5_DOC.run index 7ed60758612bb..9647ab53ab835 100644 --- a/ci/requirements-3.5_DOC.run +++ b/ci/requirements-3.5_DOC.run @@ -1,5 +1,6 @@ ipython ipykernel +ipywidgets sphinx nbconvert nbformat diff --git a/doc/source/style.ipynb b/doc/source/style.ipynb index 2b8bf35a913c1..06763b2a5e741 100644 --- a/doc/source/style.ipynb +++ b/doc/source/style.ipynb @@ -54,7 +54,7 @@ }, "outputs": [], "source": [ - "import matplotlib\n", + "import matplotlib.pyplot\n", "# We have this here to trigger matplotlib's font cache stuff.\n", "# This cell is hidden from the output" ] @@ -87,9 +87,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style" @@ -107,9 +105,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.highlight_null().render().split('\\n')[:10]" @@ -160,9 +156,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "s = df.style.applymap(color_negative_red)\n", @@ -208,9 +202,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.apply(highlight_max)" @@ -234,9 +226,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.\\\n", @@ -290,9 +280,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.apply(highlight_max, color='darkorange', axis=None)" @@ -340,9 +328,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.apply(highlight_max, subset=['B', 'C', 'D'])" @@ -358,9 +344,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.applymap(color_negative_red,\n", @@ -393,9 +377,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.format(\"{:.2%}\")" @@ -411,9 +393,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.format({'B': \"{:0<4.0f}\", 'D': '{:+.2f}'})" @@ -429,9 +409,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.format({\"B\": lambda x: \"±{:.2f}\".format(abs(x))})" @@ -454,9 +432,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.highlight_null(null_color='red')" @@ -472,9 +448,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "import seaborn as sns\n", @@ -495,9 +469,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "# Uses the full color range\n", @@ -507,9 +479,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "# Compress the color range\n", @@ -529,9 +499,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.bar(subset=['A', 'B'], color='#d65f5f')" @@ -547,9 +515,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.highlight_max(axis=0)" @@ -558,9 +524,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.highlight_min(axis=0)" @@ -576,9 +540,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.set_properties(**{'background-color': 'black',\n", @@ -603,9 +565,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df2 = -df\n", @@ -616,9 +576,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "style2 = df2.style\n", @@ -671,9 +629,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "with pd.option_context('display.precision', 2):\n", @@ -693,9 +649,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style\\\n", @@ -728,9 +682,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "df.style.set_caption('Colormaps, with a caption.')\\\n", @@ -756,9 +708,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "from IPython.display import HTML\n", @@ -854,9 +804,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "from IPython.html import widgets\n", @@ -892,16 +840,14 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, + "metadata": {}, "outputs": [], "source": [ "np.random.seed(25)\n", "cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)\n", - "df = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n", + "bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()\n", "\n", - "df.style.background_gradient(cmap, axis=1)\\\n", + "bigdf.style.background_gradient(cmap, axis=1)\\\n", " .set_properties(**{'max-width': '80px', 'font-size': '1pt'})\\\n", " .set_caption(\"Hover to magify\")\\\n", " .set_precision(2)\\\n", @@ -924,29 +870,157 @@ "\n", "### Subclassing\n", "\n", - "This section contains a bit of information about the implementation of `Styler`.\n", - "Since the feature is so new all of this is subject to change, even more so than the end-use API.\n", - "\n", - "As users apply styles (via `.apply`, `.applymap` or one of the builtins), we don't actually calculate anything.\n", - "Instead, we append functions and arguments to a list `self._todo`.\n", - "When asked (typically in `.render` we'll walk through the list and execute each function (this is in `self._compute()`.\n", - "These functions update an internal `defaultdict(list)`, `self.ctx` which maps DataFrame row / column positions to CSS attribute, value pairs.\n", - "\n", - "We take the extra step through `self._todo` so that we can export styles and set them on other `Styler`s.\n", - "\n", - "Rendering uses [Jinja](http://jinja.pocoo.org/) templates.\n", - "The `.translate` method takes `self.ctx` and builds another dictionary ready to be passed into `Styler.template.render`, the Jinja template.\n", - "\n", - "\n", - "### Alternate templates\n", - "\n", - "We've used [Jinja](http://jinja.pocoo.org/) templates to build up the HTML.\n", - "The template is stored as a class variable ``Styler.template.``. Subclasses can override that.\n", + "If the default template doesn't quite suit your needs, you can subclass Styler and extend or override the template.\n", + "We'll show an example of extending the default template to insert a custom header before each table." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from jinja2 import Environment, ChoiceLoader, FileSystemLoader\n", + "from IPython.display import HTML\n", + "from pandas.io.api import Styler" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%mkdir templates" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This next cell writes the custom template.\n", + "We extend the template `html.tpl`, which comes with pandas." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%file templates/myhtml.tpl\n", + "{% extends \"html.tpl\" %}\n", + "{% block table %}\n", + "

{{ table_title|default(\"My Table\") }}

\n", + "{{ super() }}\n", + "{% endblock table %}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we've created a template, we need to set up a subclass of ``pd.Styler`` that\n", + "knows about it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "class MyStyler(pd.Styler):\n", + " env = Environment(\n", + " loader=ChoiceLoader([\n", + " FileSystemLoader(\"templates\"), # contains ours\n", + " pd.Styler.loader, # the default\n", + " ])\n", + " )\n", + " template = env.get_template(\"myhtml.tpl\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that we include the original loader in our environment's loader.\n", + "That's because we extend the original template, so the Jinja environment needs\n", + "to be able to find it.\n", "\n", - "```python\n", - "class CustomStyle(Styler):\n", - " template = Template(\"\"\"...\"\"\")\n", - "```" + "Now we can use that custom styler. It's `__init__` takes a DataFrame." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "MyStyler(df)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our custom template accepts a `table_title` keyword. We can provide the value in the `.render` method." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "HTML(MyStyler(df).render(table_title=\"Extending Example\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For convenience, we provide the `Styler.from_custom_template` method that does the same as the custom subclass." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "EasyStyler = pd.Styler.from_custom_template(\"templates\", \"myhtml.tpl\")\n", + "EasyStyler(df)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here's the template structure:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with open(\"template_structure.html\") as f:\n", + " structure = f.read()\n", + " \n", + "HTML(structure)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See the template in the [GitHub repo](https://github.com/pandas-dev/pandas) for more details." ] } ], diff --git a/doc/source/template_structure.html b/doc/source/template_structure.html new file mode 100644 index 0000000000000..81dbe2b7d0217 --- /dev/null +++ b/doc/source/template_structure.html @@ -0,0 +1,60 @@ + + + +
before_style
+
style +
<style type="text/css">
+
table_styles
+
before_cellstyle
+
cellstyle
+
</style>
+
+ +
before_table
+ +
table +
<table ...>
+
caption
+ +
thead +
before_head_rows
+
head_tr (loop over headers)
+
after_head_rows
+
+ +
tbody +
before_rows
+
tr (loop over data rows)
+
after_rows
+
+
</table>
+
+ +
after_table
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 089c4f59445e3..821b178c1cd17 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -483,6 +483,9 @@ Other Enhancements - ``DataFrame.to_excel()`` has a new ``freeze_panes`` parameter to turn on Freeze Panes when exporting to Excel (:issue:`15160`) - ``pd.read_html()`` will parse multiple header rows, creating a multiindex header. (:issue:`13434`). - HTML table output skips ``colspan`` or ``rowspan`` attribute if equal to 1. (:issue:`15403`) +- ``pd.io.api.Styler`` template now has blocks for easier extension, :ref:`see the example notebook ` (:issue:`15649`) +- ``pd.io.api.Styler.render`` now accepts ``**kwargs`` to allow user-defined variables in the template (:issue:`15649`) + - ``TimedeltaIndex`` now has a custom datetick formatter specifically designed for nanosecond level precision (:issue:`8711`) - ``pd.types.concat.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs ` for more information. diff --git a/pandas/formats/style.py b/pandas/formats/style.py index e712010a8b4f2..af02077bd5b41 100644 --- a/pandas/formats/style.py +++ b/pandas/formats/style.py @@ -10,7 +10,9 @@ from collections import defaultdict, MutableMapping try: - from jinja2 import Template + from jinja2 import ( + PackageLoader, Environment, ChoiceLoader, FileSystemLoader + ) except ImportError: msg = "pandas.Styler requires jinja2. "\ "Please install with `conda install Jinja2`\n"\ @@ -68,7 +70,9 @@ class Styler(object): Attributes ---------- - template: Jinja Template + env : Jinja2 Environment + template: Jinja2 Template + loader : Jinja2 Loader Notes ----- @@ -103,56 +107,12 @@ class Styler(object): -------- pandas.DataFrame.style """ - template = Template(""" - - - - {% if caption %} - - {% endif %} - - - {% for r in head %} - - {% for c in r %} - {% if c.is_visible != False %} - <{{c.type}} class="{{c.class}}" {{ c.attributes|join(" ") }}> - {{c.value}} - {% endif %} - {% endfor %} - - {% endfor %} - - - {% for r in body %} - - {% for c in r %} - {% if c.is_visible != False %} - <{{c.type}} id="T_{{uuid}}{{c.id}}" - class="{{c.class}}" {{ c.attributes|join(" ") }}> - {{ c.display_value }} - {% endif %} - {% endfor %} - - {% endfor %} - -
{{caption}}
- """) + loader = PackageLoader("pandas", "formats/templates") + env = Environment( + loader=loader, + trim_blocks=True, + ) + template = env.get_template("html.tpl") def __init__(self, data, precision=None, table_styles=None, uuid=None, caption=None, table_attributes=None): @@ -400,12 +360,22 @@ def format(self, formatter, subset=None): self._display_funcs[(i, j)] = formatter return self - def render(self): - """ + def render(self, **kwargs): + r""" Render the built up styles to HTML .. versionadded:: 0.17.1 + Parameters + ---------- + **kwargs: + Any additional keyword arguments are passed through + to ``self.template.render``. This is useful when you + need to provide additional variables for a custom + template. + + .. versionadded:: 0.20 + Returns ------- rendered: str @@ -418,8 +388,22 @@ def render(self): last item in a Notebook cell. When calling ``Styler.render()`` directly, wrap the result in ``IPython.display.HTML`` to view the rendered HTML in the notebook. + + Pandas uses the following keys in render. Arguments passed + in ``**kwargs`` take precedence, so think carefuly if you want + to override them: + + * head + * cellstyle + * body + * uuid + * precision + * table_styles + * caption + * table_attributes """ self._compute() + # TODO: namespace all the pandas keys d = self._translate() # filter out empty styles, every cell will have a class # but the list of props may just be [['', '']]. @@ -427,6 +411,7 @@ def render(self): trimmed = [x for x in d['cellstyle'] if any(any(y) for y in x['props'])] d['cellstyle'] = trimmed + d.update(kwargs) return self.template.render(**d) def _update_ctx(self, attrs): @@ -961,6 +946,35 @@ def _highlight_extrema(data, color='yellow', max_=True): return pd.DataFrame(np.where(extrema, attr, ''), index=data.index, columns=data.columns) + @classmethod + def from_custom_template(cls, searchpath, name): + """ + Factory function for creating a subclass of ``Styler`` + with a custom template and Jinja environment. + + Parameters + ---------- + searchpath : str or list + Path or paths of directories containing the templates + name : str + Name of your custom template to use for rendering + + Returns + ------- + MyStyler : subclass of Styler + has the correct ``env`` and ``template`` class attributes set. + """ + loader = ChoiceLoader([ + FileSystemLoader(searchpath), + cls.loader, + ]) + + class MyStyler(cls): + env = Environment(loader=loader) + template = env.get_template(name) + + return MyStyler + def _is_visible(idx_row, idx_col, lengths): """ diff --git a/pandas/formats/templates/html.tpl b/pandas/formats/templates/html.tpl new file mode 100644 index 0000000000000..706db1ecdd961 --- /dev/null +++ b/pandas/formats/templates/html.tpl @@ -0,0 +1,70 @@ +{# Update the template_structure.html document too #} +{%- block before_style -%}{%- endblock before_style -%} +{% block style %} + +{%- endblock style %} +{%- block before_table %}{% endblock before_table %} +{%- block table %} + +{%- block caption %} +{%- if caption -%} + +{%- endif -%} +{%- endblock caption %} +{%- block thead %} + + {%- block before_head_rows %}{% endblock %} + {%- for r in head %} + {%- block head_tr scoped %} + + {%- for c in r %} + {%- if c.is_visible != False %} + <{{ c.type }} class="{{c.class}}" {{ c.attributes|join(" ") }}>{{c.value}} + {%- endif %} + {%- endfor %} + + {%- endblock head_tr %} + {%- endfor %} + {%- block after_head_rows %}{% endblock %} + +{%- endblock thead %} +{%- block tbody %} + + {%- block before_rows %}{%- endblock before_rows %} + {%- for r in body %} + {%- block tr scoped %} + + {%- for c in r %} + {%- if c.is_visible != False %} + <{{ c.type }} id="T_{{ uuid }}{{ c.id }}" class="{{ c.class }}" {{ c.attributes|join(" ") }}>{{ c.display_value }} + {%- endif %} + {%- endfor %} + + {%- endblock tr %} + {%- endfor %} + {%- block after_rows %}{%- endblock after_rows %} + +{%- endblock tbody %} +
{{caption}}
+{%- endblock table %} +{%- block after_table %}{% endblock after_table %} diff --git a/pandas/io/api.py b/pandas/io/api.py index e312e7bc2f300..4744d41472ff1 100644 --- a/pandas/io/api.py +++ b/pandas/io/api.py @@ -17,6 +17,23 @@ from pandas.io.pickle import read_pickle, to_pickle from pandas.io.packers import read_msgpack, to_msgpack from pandas.io.gbq import read_gbq +try: + from pandas.formats.style import Styler +except ImportError: + from pandas.compat import add_metaclass as _add_metaclass + from pandas.util.importing import _UnSubclassable + + # We want to *not* raise an ImportError upon importing this module + # We *do* want to raise an ImportError with a custom message + # when the class is instantiated or subclassed. + @_add_metaclass(_UnSubclassable) + class Styler(object): + msg = ("pandas.io.api.Styler requires jinja2. " + "Please install with `conda install jinja2` " + "or `pip install jinja2`") + def __init__(self, *args, **kargs): + raise ImportError(self.msg) + # deprecation, xref #13790 def Term(*args, **kwargs): diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index a15d7cf26cbea..6d92898042b23 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -49,7 +49,8 @@ class TestPDApi(Base, tm.TestCase): 'Period', 'PeriodIndex', 'RangeIndex', 'UInt64Index', 'Series', 'SparseArray', 'SparseDataFrame', 'SparseSeries', 'TimeGrouper', 'Timedelta', - 'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex'] + 'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex', + 'Styler'] # these are already deprecated; awaiting removal deprecated_classes = ['WidePanel', 'Panel4D', diff --git a/pandas/tests/formats/test_style.py b/pandas/tests/formats/test_style.py index 44af0b8ebb085..08f8f2f32763d 100644 --- a/pandas/tests/formats/test_style.py +++ b/pandas/tests/formats/test_style.py @@ -1,6 +1,7 @@ -import pytest - import copy +import textwrap + +import pytest import numpy as np import pandas as pd from pandas import DataFrame @@ -717,3 +718,32 @@ def test_background_gradient(self): result = (df.style.background_gradient(subset=pd.IndexSlice[1, 'A']) ._compute().ctx) self.assertEqual(result[(1, 0)], ['background-color: #fff7fb']) + + +def test_block_names(): + # catch accidental removal of a block + expected = { + 'before_style', 'style', 'table_styles', 'before_cellstyle', + 'cellstyle', 'before_table', 'table', 'caption', 'thead', 'tbody', + 'after_table', 'before_head_rows', 'head_tr', 'after_head_rows', + 'before_rows', 'tr', 'after_rows', + } + result = set(Styler.template.blocks) + assert result == expected + + +def test_from_custom_template(tmpdir): + p = tmpdir.mkdir("templates").join("myhtml.tpl") + p.write(textwrap.dedent("""\ + {% extends "html.tpl" %} + {% block table %} +

{{ table_title|default("My Table") }}

+ {{ super() }} + {% endblock table %}""")) + result = Styler.from_custom_template(str(tmpdir.join('templates')), + 'myhtml.tpl') + assert issubclass(result, Styler) + assert result.env is not Styler.env + assert result.template is not Styler.template + styler = result(pd.DataFrame({"A": [1, 2]})) + assert styler.render() diff --git a/pandas/util/importing.py b/pandas/util/importing.py new file mode 100644 index 0000000000000..9323fb97baac0 --- /dev/null +++ b/pandas/util/importing.py @@ -0,0 +1,10 @@ +class _UnSubclassable(type): + """ + Metaclass to raise an ImportError when subclassed + """ + msg = "" + + def __init__(cls, name, bases, clsdict): + if len(cls.mro()) > 2: + raise ImportError(cls.msg) + super(_UnSubclassable, cls).__init__(name, bases, clsdict) diff --git a/setup.py b/setup.py index 6707af7eb0908..d8ee52f9b4f43 100755 --- a/setup.py +++ b/setup.py @@ -704,7 +704,8 @@ def pxd(name): 'data/html_encoding/*.html', 'json/data/*.json'], 'pandas.tests.tools': ['data/*.csv'], - 'pandas.tests.tseries': ['data/*.pickle'] + 'pandas.tests.tseries': ['data/*.pickle'], + 'pandas.formats': ['templates/*.tpl'] }, ext_modules=extensions, maintainer_email=EMAIL, From 7993fc81098936a893ec0dc0d84d41cfe4eb4218 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Sat, 15 Apr 2017 18:02:25 +0200 Subject: [PATCH 29/56] CLN/API: move plotting funcs to pandas.plotting (#16005) closes #12548 --- doc/source/visualization.rst | 28 +- doc/source/whatsnew/v0.20.0.txt | 26 + pandas/__init__.py | 10 +- pandas/core/config_init.py | 2 +- pandas/core/frame.py | 10 +- pandas/core/groupby.py | 2 +- pandas/core/series.py | 2 +- pandas/plotting/__init__.py | 19 + pandas/plotting/_compat.py | 67 + pandas/plotting/_converter.py | 1026 +++++ pandas/plotting/_core.py | 2828 ++++++++++++ pandas/plotting/_misc.py | 573 +++ pandas/plotting/_style.py | 246 + pandas/plotting/_timeseries.py | 339 ++ pandas/plotting/_tools.py | 383 ++ pandas/tests/api/test_api.py | 8 +- pandas/tests/plotting/common.py | 22 +- pandas/tests/plotting/test_boxplot_method.py | 5 +- .../{tseries => plotting}/test_converter.py | 2 +- pandas/tests/plotting/test_datetimelike.py | 6 +- pandas/tests/plotting/test_deprecated.py | 58 + pandas/tests/plotting/test_frame.py | 18 +- pandas/tests/plotting/test_hist_method.py | 15 +- pandas/tests/plotting/test_misc.py | 16 +- pandas/tests/plotting/test_series.py | 43 +- pandas/tools/plotting.py | 4040 +---------------- pandas/tseries/converter.py | 1043 +---- pandas/tseries/plotting.py | 345 +- pandas/util/doctools.py | 2 +- setup.py | 1 + 30 files changed, 5696 insertions(+), 5489 deletions(-) create mode 100644 pandas/plotting/__init__.py create mode 100644 pandas/plotting/_compat.py create mode 100644 pandas/plotting/_converter.py create mode 100644 pandas/plotting/_core.py create mode 100644 pandas/plotting/_misc.py create mode 100644 pandas/plotting/_style.py create mode 100644 pandas/plotting/_timeseries.py create mode 100644 pandas/plotting/_tools.py rename pandas/tests/{tseries => plotting}/test_converter.py (99%) create mode 100644 pandas/tests/plotting/test_deprecated.py diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index e8998bf6f6f5c..fb799c642131d 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -152,7 +152,7 @@ You can also create these other plots using the methods ``DataFrame.plot.` In addition to these ``kind`` s, there are the :ref:`DataFrame.hist() `, and :ref:`DataFrame.boxplot() ` methods, which use a separate interface. -Finally, there are several :ref:`plotting functions ` in ``pandas.tools.plotting`` +Finally, there are several :ref:`plotting functions ` in ``pandas.plotting`` that take a :class:`Series` or :class:`DataFrame` as an argument. These include @@ -823,7 +823,7 @@ before plotting. Plotting Tools -------------- -These functions can be imported from ``pandas.tools.plotting`` +These functions can be imported from ``pandas.plotting`` and take a :class:`Series` or :class:`DataFrame` as an argument. .. _visualization.scatter_matrix: @@ -834,7 +834,7 @@ Scatter Matrix Plot .. versionadded:: 0.7.3 You can create a scatter plot matrix using the -``scatter_matrix`` method in ``pandas.tools.plotting``: +``scatter_matrix`` method in ``pandas.plotting``: .. ipython:: python :suppress: @@ -843,7 +843,7 @@ You can create a scatter plot matrix using the .. ipython:: python - from pandas.tools.plotting import scatter_matrix + from pandas.plotting import scatter_matrix df = pd.DataFrame(np.random.randn(1000, 4), columns=['a', 'b', 'c', 'd']) @savefig scatter_matrix_kde.png @@ -896,7 +896,7 @@ of the same class will usually be closer together and form larger structures. .. ipython:: python - from pandas.tools.plotting import andrews_curves + from pandas.plotting import andrews_curves data = pd.read_csv('data/iris.data') @@ -918,7 +918,7 @@ represents one data point. Points that tend to cluster will appear closer togeth .. ipython:: python - from pandas.tools.plotting import parallel_coordinates + from pandas.plotting import parallel_coordinates data = pd.read_csv('data/iris.data') @@ -948,7 +948,7 @@ implies that the underlying data are not random. .. ipython:: python - from pandas.tools.plotting import lag_plot + from pandas.plotting import lag_plot plt.figure() @@ -983,7 +983,7 @@ confidence band. .. ipython:: python - from pandas.tools.plotting import autocorrelation_plot + from pandas.plotting import autocorrelation_plot plt.figure() @@ -1016,7 +1016,7 @@ are what constitutes the bootstrap plot. .. ipython:: python - from pandas.tools.plotting import bootstrap_plot + from pandas.plotting import bootstrap_plot data = pd.Series(np.random.rand(1000)) @@ -1048,7 +1048,7 @@ be colored differently. .. ipython:: python - from pandas.tools.plotting import radviz + from pandas.plotting import radviz data = pd.read_csv('data/iris.data') @@ -1228,14 +1228,14 @@ Using the ``x_compat`` parameter, you can suppress this behavior: plt.close('all') If you have more than one plot that needs to be suppressed, the ``use`` method -in ``pandas.plot_params`` can be used in a `with statement`: +in ``pandas.plotting.plot_params`` can be used in a `with statement`: .. ipython:: python plt.figure() @savefig ser_plot_suppress_context.png - with pd.plot_params.use('x_compat', True): + with pd.plotting.plot_params.use('x_compat', True): df.A.plot(color='r') df.B.plot(color='g') df.C.plot(color='b') @@ -1450,11 +1450,11 @@ Also, you can pass different :class:`DataFrame` or :class:`Series` for ``table`` plt.close('all') -Finally, there is a helper function ``pandas.tools.plotting.table`` to create a table from :class:`DataFrame` and :class:`Series`, and add it to an ``matplotlib.Axes``. This function can accept keywords which matplotlib table has. +Finally, there is a helper function ``pandas.plotting.table`` to create a table from :class:`DataFrame` and :class:`Series`, and add it to an ``matplotlib.Axes``. This function can accept keywords which matplotlib table has. .. ipython:: python - from pandas.tools.plotting import table + from pandas.plotting import table fig, ax = plt.subplots(1, 1) table(ax, np.round(df.describe(), 2), diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 821b178c1cd17..914995244fe5f 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -21,6 +21,7 @@ Highlights include: - Support for S3 handling now uses ``s3fs``, see :ref:`here ` - Google BigQuery support now uses the ``pandas-gbq`` library, see :ref:`here ` - Switched the test framework to use `pytest `__ (:issue:`13097`) +- The ``pandas.tools.plotting`` module has been deprecated, moved to ``pandas.plotting``. See :ref:`here ` Check the :ref:`API Changes ` and :ref:`deprecations ` before updating. @@ -557,6 +558,31 @@ Using ``.iloc``. Here we will get the location of the 'A' column, then use *posi df.iloc[[0, 2], df.columns.get_loc('A')] +.. _whatsnew_0200.api_breaking.deprecate_plotting + +Deprecate .plotting +^^^^^^^^^^^^^^^^^^^ + +The ``pandas.tools.plotting`` module has been deprecated, in favor of the top level ``pandas.plotting`` module. All the public plotting functions are now available +from ``pandas.plotting`` (:issue:`12548`). + +Furthermore, the top-level ``pandas.scatter_matrix`` and ``pandas.plot_params`` are deprecated. +Users can import these from ``pandas.plotting`` as well. + +Previous script: + +.. code-block:: python + + pd.tools.plotting.scatter_matrix(df) + pd.scatter_matrix(df) + +Should be changed to: + +.. code-block:: python + + pd.plotting.scatter_matrix(df) + + .. _whatsnew_0200.api_breaking.deprecate_panel: Deprecate Panel diff --git a/pandas/__init__.py b/pandas/__init__.py index 529750cd97076..bc38919f2c78c 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -49,7 +49,15 @@ from pandas.tools.merge import (merge, ordered_merge, merge_ordered, merge_asof) from pandas.tools.pivot import pivot_table, crosstab -from pandas.tools.plotting import scatter_matrix, plot_params + +# deprecate tools.plotting, plot_params and scatter_matrix on the top namespace +import pandas.tools.plotting +plot_params = pandas.plotting._style._Options(deprecated=True) +# do not import deprecate to top namespace +scatter_matrix = pandas.util.decorators.deprecate( + 'pandas.scatter_matrix', pandas.plotting.scatter_matrix, + 'pandas.plotting.scatter_matrix') + from pandas.tools.tile import cut, qcut from pandas.tools.util import to_numeric from pandas.core.reshape import melt diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 931fe0661818d..cf2a653638e90 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -285,7 +285,7 @@ def mpl_style_cb(key): stacklevel=5) import sys - from pandas.tools.plotting import mpl_stylesheet + from pandas.plotting._style import mpl_stylesheet global style_backup val = cf.get_option(key) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4565250c78387..a5256868ce419 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -90,7 +90,7 @@ import pandas.core.ops as ops import pandas.formats.format as fmt from pandas.formats.printing import pprint_thing -import pandas.tools.plotting as gfx +import pandas.plotting._core as gfx from pandas._libs import lib, algos as libalgos @@ -5909,11 +5909,11 @@ def _put_str(s, space): @Appender(_shared_docs['boxplot'] % _shared_doc_kwargs) def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, figsize=None, layout=None, return_type=None, **kwds): - import pandas.tools.plotting as plots + from pandas.plotting._core import boxplot import matplotlib.pyplot as plt - ax = plots.boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, - grid=grid, rot=rot, figsize=figsize, layout=layout, - return_type=return_type, **kwds) + ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, + grid=grid, rot=rot, figsize=figsize, layout=layout, + return_type=return_type, **kwds) plt.draw_if_interactive() return ax diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 45a9577c8d8b2..27e256a8eb572 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -4159,7 +4159,7 @@ def groupby_series(obj, col=None): return results -from pandas.tools.plotting import boxplot_frame_groupby # noqa +from pandas.plotting._core import boxplot_frame_groupby # noqa DataFrameGroupBy.boxplot = boxplot_frame_groupby diff --git a/pandas/core/series.py b/pandas/core/series.py index 7f8a97af99490..1cf537cf3c315 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3026,7 +3026,7 @@ def create_from_value(value, index, dtype): # ---------------------------------------------------------------------- # Add plotting methods to Series -import pandas.tools.plotting as _gfx # noqa +import pandas.plotting._core as _gfx # noqa Series.plot = base.AccessorProperty(_gfx.SeriesPlotMethods, _gfx.SeriesPlotMethods) diff --git a/pandas/plotting/__init__.py b/pandas/plotting/__init__.py new file mode 100644 index 0000000000000..c3cbedb0fc28c --- /dev/null +++ b/pandas/plotting/__init__.py @@ -0,0 +1,19 @@ +""" +Plotting api +""" + +# flake8: noqa + +try: # mpl optional + from pandas.plotting import _converter + _converter.register() # needs to override so set_xlim works with str/number +except ImportError: + pass + +from pandas.plotting._misc import (scatter_matrix, radviz, + andrews_curves, bootstrap_plot, + parallel_coordinates, lag_plot, + autocorrelation_plot) +from pandas.plotting._core import boxplot +from pandas.plotting._style import plot_params +from pandas.plotting._tools import table diff --git a/pandas/plotting/_compat.py b/pandas/plotting/_compat.py new file mode 100644 index 0000000000000..7b04b9e1171ec --- /dev/null +++ b/pandas/plotting/_compat.py @@ -0,0 +1,67 @@ +# being a bit too dynamic +# pylint: disable=E1101 +from __future__ import division + +from distutils.version import LooseVersion + + +def _mpl_le_1_2_1(): + try: + import matplotlib as mpl + return (str(mpl.__version__) <= LooseVersion('1.2.1') and + str(mpl.__version__)[0] != '0') + except ImportError: + return False + + +def _mpl_ge_1_3_1(): + try: + import matplotlib + # The or v[0] == '0' is because their versioneer is + # messed up on dev + return (matplotlib.__version__ >= LooseVersion('1.3.1') or + matplotlib.__version__[0] == '0') + except ImportError: + return False + + +def _mpl_ge_1_4_0(): + try: + import matplotlib + return (matplotlib.__version__ >= LooseVersion('1.4') or + matplotlib.__version__[0] == '0') + except ImportError: + return False + + +def _mpl_ge_1_5_0(): + try: + import matplotlib + return (matplotlib.__version__ >= LooseVersion('1.5') or + matplotlib.__version__[0] == '0') + except ImportError: + return False + + +def _mpl_ge_2_0_0(): + try: + import matplotlib + return matplotlib.__version__ >= LooseVersion('2.0') + except ImportError: + return False + + +def _mpl_le_2_0_0(): + try: + import matplotlib + return matplotlib.compare_versions('2.0.0', matplotlib.__version__) + except ImportError: + return False + + +def _mpl_ge_2_0_1(): + try: + import matplotlib + return matplotlib.__version__ >= LooseVersion('2.0.1') + except ImportError: + return False diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py new file mode 100644 index 0000000000000..0aa8cc31646c5 --- /dev/null +++ b/pandas/plotting/_converter.py @@ -0,0 +1,1026 @@ +from datetime import datetime, timedelta +import datetime as pydt +import numpy as np + +from dateutil.relativedelta import relativedelta + +import matplotlib.units as units +import matplotlib.dates as dates + +from matplotlib.ticker import Formatter, AutoLocator, Locator +from matplotlib.transforms import nonsingular + + +from pandas.types.common import (is_float, is_integer, + is_integer_dtype, + is_float_dtype, + is_datetime64_ns_dtype, + is_period_arraylike, + ) + +from pandas.compat import lrange +import pandas.compat as compat +import pandas._libs.lib as lib +import pandas.core.common as com +from pandas.core.index import Index + +from pandas.core.series import Series +from pandas.tseries.index import date_range +import pandas.tseries.tools as tools +import pandas.tseries.frequencies as frequencies +from pandas.tseries.frequencies import FreqGroup +from pandas.tseries.period import Period, PeriodIndex + +from pandas.plotting._compat import _mpl_le_2_0_0 + +# constants +HOURS_PER_DAY = 24. +MIN_PER_HOUR = 60. +SEC_PER_MIN = 60. + +SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR +SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY + +MUSEC_PER_DAY = 1e6 * SEC_PER_DAY + + +def register(): + units.registry[lib.Timestamp] = DatetimeConverter() + units.registry[Period] = PeriodConverter() + units.registry[pydt.datetime] = DatetimeConverter() + units.registry[pydt.date] = DatetimeConverter() + units.registry[pydt.time] = TimeConverter() + units.registry[np.datetime64] = DatetimeConverter() + + +def _to_ordinalf(tm): + tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second + + float(tm.microsecond / 1e6)) + return tot_sec + + +def time2num(d): + if isinstance(d, compat.string_types): + parsed = tools.to_datetime(d) + if not isinstance(parsed, datetime): + raise ValueError('Could not parse time %s' % d) + return _to_ordinalf(parsed.time()) + if isinstance(d, pydt.time): + return _to_ordinalf(d) + return d + + +class TimeConverter(units.ConversionInterface): + + @staticmethod + def convert(value, unit, axis): + valid_types = (str, pydt.time) + if (isinstance(value, valid_types) or is_integer(value) or + is_float(value)): + return time2num(value) + if isinstance(value, Index): + return value.map(time2num) + if isinstance(value, (list, tuple, np.ndarray, Index)): + return [time2num(x) for x in value] + return value + + @staticmethod + def axisinfo(unit, axis): + if unit != 'time': + return None + + majloc = AutoLocator() + majfmt = TimeFormatter(majloc) + return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='time') + + @staticmethod + def default_units(x, axis): + return 'time' + + +# time formatter +class TimeFormatter(Formatter): + + def __init__(self, locs): + self.locs = locs + + def __call__(self, x, pos=0): + fmt = '%H:%M:%S' + s = int(x) + ms = int((x - s) * 1e3) + us = int((x - s) * 1e6 - ms) + m, s = divmod(s, 60) + h, m = divmod(m, 60) + _, h = divmod(h, 24) + if us != 0: + fmt += '.%6f' + elif ms != 0: + fmt += '.%3f' + + return pydt.time(h, m, s, us).strftime(fmt) + + +# Period Conversion + + +class PeriodConverter(dates.DateConverter): + + @staticmethod + def convert(values, units, axis): + if not hasattr(axis, 'freq'): + raise TypeError('Axis must have `freq` set to convert to Periods') + valid_types = (compat.string_types, datetime, + Period, pydt.date, pydt.time) + if (isinstance(values, valid_types) or is_integer(values) or + is_float(values)): + return get_datevalue(values, axis.freq) + if isinstance(values, PeriodIndex): + return values.asfreq(axis.freq)._values + if isinstance(values, Index): + return values.map(lambda x: get_datevalue(x, axis.freq)) + if is_period_arraylike(values): + return PeriodIndex(values, freq=axis.freq)._values + if isinstance(values, (list, tuple, np.ndarray, Index)): + return [get_datevalue(x, axis.freq) for x in values] + return values + + +def get_datevalue(date, freq): + if isinstance(date, Period): + return date.asfreq(freq).ordinal + elif isinstance(date, (compat.string_types, datetime, + pydt.date, pydt.time)): + return Period(date, freq).ordinal + elif (is_integer(date) or is_float(date) or + (isinstance(date, (np.ndarray, Index)) and (date.size == 1))): + return date + elif date is None: + return None + raise ValueError("Unrecognizable date '%s'" % date) + + +def _dt_to_float_ordinal(dt): + """ + Convert :mod:`datetime` to the Gregorian date as UTC float days, + preserving hours, minutes, seconds and microseconds. Return value + is a :func:`float`. + """ + if (isinstance(dt, (np.ndarray, Index, Series) + ) and is_datetime64_ns_dtype(dt)): + base = dates.epoch2num(dt.asi8 / 1.0E9) + else: + base = dates.date2num(dt) + return base + + +# Datetime Conversion +class DatetimeConverter(dates.DateConverter): + + @staticmethod + def convert(values, unit, axis): + def try_parse(values): + try: + return _dt_to_float_ordinal(tools.to_datetime(values)) + except Exception: + return values + + if isinstance(values, (datetime, pydt.date)): + return _dt_to_float_ordinal(values) + elif isinstance(values, np.datetime64): + return _dt_to_float_ordinal(lib.Timestamp(values)) + elif isinstance(values, pydt.time): + return dates.date2num(values) + elif (is_integer(values) or is_float(values)): + return values + elif isinstance(values, compat.string_types): + return try_parse(values) + elif isinstance(values, (list, tuple, np.ndarray, Index)): + if isinstance(values, Index): + values = values.values + if not isinstance(values, np.ndarray): + values = com._asarray_tuplesafe(values) + + if is_integer_dtype(values) or is_float_dtype(values): + return values + + try: + values = tools.to_datetime(values) + if isinstance(values, Index): + values = _dt_to_float_ordinal(values) + else: + values = [_dt_to_float_ordinal(x) for x in values] + except Exception: + values = _dt_to_float_ordinal(values) + + return values + + @staticmethod + def axisinfo(unit, axis): + """ + Return the :class:`~matplotlib.units.AxisInfo` for *unit*. + + *unit* is a tzinfo instance or None. + The *axis* argument is required but not used. + """ + tz = unit + + majloc = PandasAutoDateLocator(tz=tz) + majfmt = PandasAutoDateFormatter(majloc, tz=tz) + datemin = pydt.date(2000, 1, 1) + datemax = pydt.date(2010, 1, 1) + + return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='', + default_limits=(datemin, datemax)) + + +class PandasAutoDateFormatter(dates.AutoDateFormatter): + + def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'): + dates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt) + # matplotlib.dates._UTC has no _utcoffset called by pandas + if self._tz is dates.UTC: + self._tz._utcoffset = self._tz.utcoffset(None) + + # For mpl > 2.0 the format strings are controlled via rcparams + # so do not mess with them. For mpl < 2.0 change the second + # break point and add a musec break point + if _mpl_le_2_0_0(): + self.scaled[1. / SEC_PER_DAY] = '%H:%M:%S' + self.scaled[1. / MUSEC_PER_DAY] = '%H:%M:%S.%f' + + +class PandasAutoDateLocator(dates.AutoDateLocator): + + def get_locator(self, dmin, dmax): + 'Pick the best locator based on a distance.' + delta = relativedelta(dmax, dmin) + + num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days + num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds + tot_sec = num_days * 86400. + num_sec + + if abs(tot_sec) < self.minticks: + self._freq = -1 + locator = MilliSecondLocator(self.tz) + locator.set_axis(self.axis) + + locator.set_view_interval(*self.axis.get_view_interval()) + locator.set_data_interval(*self.axis.get_data_interval()) + return locator + + return dates.AutoDateLocator.get_locator(self, dmin, dmax) + + def _get_unit(self): + return MilliSecondLocator.get_unit_generic(self._freq) + + +class MilliSecondLocator(dates.DateLocator): + + UNIT = 1. / (24 * 3600 * 1000) + + def __init__(self, tz): + dates.DateLocator.__init__(self, tz) + self._interval = 1. + + def _get_unit(self): + return self.get_unit_generic(-1) + + @staticmethod + def get_unit_generic(freq): + unit = dates.RRuleLocator.get_unit_generic(freq) + if unit < 0: + return MilliSecondLocator.UNIT + return unit + + def __call__(self): + # if no data have been set, this will tank with a ValueError + try: + dmin, dmax = self.viewlim_to_dt() + except ValueError: + return [] + + if dmin > dmax: + dmax, dmin = dmin, dmax + # We need to cap at the endpoints of valid datetime + + # TODO(wesm) unused? + # delta = relativedelta(dmax, dmin) + # try: + # start = dmin - delta + # except ValueError: + # start = _from_ordinal(1.0) + + # try: + # stop = dmax + delta + # except ValueError: + # # The magic number! + # stop = _from_ordinal(3652059.9999999) + + nmax, nmin = dates.date2num((dmax, dmin)) + + num = (nmax - nmin) * 86400 * 1000 + max_millis_ticks = 6 + for interval in [1, 10, 50, 100, 200, 500]: + if num <= interval * (max_millis_ticks - 1): + self._interval = interval + break + else: + # We went through the whole loop without breaking, default to 1 + self._interval = 1000. + + estimate = (nmax - nmin) / (self._get_unit() * self._get_interval()) + + if estimate > self.MAXTICKS * 2: + raise RuntimeError(('MillisecondLocator estimated to generate %d ' + 'ticks from %s to %s: exceeds Locator.MAXTICKS' + '* 2 (%d) ') % + (estimate, dmin, dmax, self.MAXTICKS * 2)) + + freq = '%dL' % self._get_interval() + tz = self.tz.tzname(None) + st = _from_ordinal(dates.date2num(dmin)) # strip tz + ed = _from_ordinal(dates.date2num(dmax)) + all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject + + try: + if len(all_dates) > 0: + locs = self.raise_if_exceeds(dates.date2num(all_dates)) + return locs + except Exception: # pragma: no cover + pass + + lims = dates.date2num([dmin, dmax]) + return lims + + def _get_interval(self): + return self._interval + + def autoscale(self): + """ + Set the view limits to include the data range. + """ + dmin, dmax = self.datalim_to_dt() + if dmin > dmax: + dmax, dmin = dmin, dmax + + # We need to cap at the endpoints of valid datetime + + # TODO(wesm): unused? + + # delta = relativedelta(dmax, dmin) + # try: + # start = dmin - delta + # except ValueError: + # start = _from_ordinal(1.0) + + # try: + # stop = dmax + delta + # except ValueError: + # # The magic number! + # stop = _from_ordinal(3652059.9999999) + + dmin, dmax = self.datalim_to_dt() + + vmin = dates.date2num(dmin) + vmax = dates.date2num(dmax) + + return self.nonsingular(vmin, vmax) + + +def _from_ordinal(x, tz=None): + ix = int(x) + dt = datetime.fromordinal(ix) + remainder = float(x) - ix + hour, remainder = divmod(24 * remainder, 1) + minute, remainder = divmod(60 * remainder, 1) + second, remainder = divmod(60 * remainder, 1) + microsecond = int(1e6 * remainder) + if microsecond < 10: + microsecond = 0 # compensate for rounding errors + dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute), + int(second), microsecond) + if tz is not None: + dt = dt.astimezone(tz) + + if microsecond > 999990: # compensate for rounding errors + dt += timedelta(microseconds=1e6 - microsecond) + + return dt + +# Fixed frequency dynamic tick locators and formatters + +# ------------------------------------------------------------------------- +# --- Locators --- +# ------------------------------------------------------------------------- + + +def _get_default_annual_spacing(nyears): + """ + Returns a default spacing between consecutive ticks for annual data. + """ + if nyears < 11: + (min_spacing, maj_spacing) = (1, 1) + elif nyears < 20: + (min_spacing, maj_spacing) = (1, 2) + elif nyears < 50: + (min_spacing, maj_spacing) = (1, 5) + elif nyears < 100: + (min_spacing, maj_spacing) = (5, 10) + elif nyears < 200: + (min_spacing, maj_spacing) = (5, 25) + elif nyears < 600: + (min_spacing, maj_spacing) = (10, 50) + else: + factor = nyears // 1000 + 1 + (min_spacing, maj_spacing) = (factor * 20, factor * 100) + return (min_spacing, maj_spacing) + + +def period_break(dates, period): + """ + Returns the indices where the given period changes. + + Parameters + ---------- + dates : PeriodIndex + Array of intervals to monitor. + period : string + Name of the period to monitor. + """ + current = getattr(dates, period) + previous = getattr(dates - 1, period) + return np.nonzero(current - previous)[0] + + +def has_level_label(label_flags, vmin): + """ + Returns true if the ``label_flags`` indicate there is at least one label + for this level. + + if the minimum view limit is not an exact integer, then the first tick + label won't be shown, so we must adjust for that. + """ + if label_flags.size == 0 or (label_flags.size == 1 and + label_flags[0] == 0 and + vmin % 1 > 0.0): + return False + else: + return True + + +def _daily_finder(vmin, vmax, freq): + periodsperday = -1 + + if freq >= FreqGroup.FR_HR: + if freq == FreqGroup.FR_NS: + periodsperday = 24 * 60 * 60 * 1000000000 + elif freq == FreqGroup.FR_US: + periodsperday = 24 * 60 * 60 * 1000000 + elif freq == FreqGroup.FR_MS: + periodsperday = 24 * 60 * 60 * 1000 + elif freq == FreqGroup.FR_SEC: + periodsperday = 24 * 60 * 60 + elif freq == FreqGroup.FR_MIN: + periodsperday = 24 * 60 + elif freq == FreqGroup.FR_HR: + periodsperday = 24 + else: # pragma: no cover + raise ValueError("unexpected frequency: %s" % freq) + periodsperyear = 365 * periodsperday + periodspermonth = 28 * periodsperday + + elif freq == FreqGroup.FR_BUS: + periodsperyear = 261 + periodspermonth = 19 + elif freq == FreqGroup.FR_DAY: + periodsperyear = 365 + periodspermonth = 28 + elif frequencies.get_freq_group(freq) == FreqGroup.FR_WK: + periodsperyear = 52 + periodspermonth = 3 + else: # pragma: no cover + raise ValueError("unexpected frequency") + + # save this for later usage + vmin_orig = vmin + + (vmin, vmax) = (Period(ordinal=int(vmin), freq=freq), + Period(ordinal=int(vmax), freq=freq)) + span = vmax.ordinal - vmin.ordinal + 1 + dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq) + # Initialize the output + info = np.zeros(span, + dtype=[('val', np.int64), ('maj', bool), + ('min', bool), ('fmt', '|S20')]) + info['val'][:] = dates_._values + info['fmt'][:] = '' + info['maj'][[0, -1]] = True + # .. and set some shortcuts + info_maj = info['maj'] + info_min = info['min'] + info_fmt = info['fmt'] + + def first_label(label_flags): + if (label_flags[0] == 0) and (label_flags.size > 1) and \ + ((vmin_orig % 1) > 0.0): + return label_flags[1] + else: + return label_flags[0] + + # Case 1. Less than a month + if span <= periodspermonth: + day_start = period_break(dates_, 'day') + month_start = period_break(dates_, 'month') + + def _hour_finder(label_interval, force_year_start): + _hour = dates_.hour + _prev_hour = (dates_ - 1).hour + hour_start = (_hour - _prev_hour) != 0 + info_maj[day_start] = True + info_min[hour_start & (_hour % label_interval == 0)] = True + year_start = period_break(dates_, 'year') + info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M' + info_fmt[day_start] = '%H:%M\n%d-%b' + info_fmt[year_start] = '%H:%M\n%d-%b\n%Y' + if force_year_start and not has_level_label(year_start, vmin_orig): + info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y' + + def _minute_finder(label_interval): + hour_start = period_break(dates_, 'hour') + _minute = dates_.minute + _prev_minute = (dates_ - 1).minute + minute_start = (_minute - _prev_minute) != 0 + info_maj[hour_start] = True + info_min[minute_start & (_minute % label_interval == 0)] = True + year_start = period_break(dates_, 'year') + info_fmt = info['fmt'] + info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M' + info_fmt[day_start] = '%H:%M\n%d-%b' + info_fmt[year_start] = '%H:%M\n%d-%b\n%Y' + + def _second_finder(label_interval): + minute_start = period_break(dates_, 'minute') + _second = dates_.second + _prev_second = (dates_ - 1).second + second_start = (_second - _prev_second) != 0 + info['maj'][minute_start] = True + info['min'][second_start & (_second % label_interval == 0)] = True + year_start = period_break(dates_, 'year') + info_fmt = info['fmt'] + info_fmt[second_start & (_second % + label_interval == 0)] = '%H:%M:%S' + info_fmt[day_start] = '%H:%M:%S\n%d-%b' + info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y' + + if span < periodsperday / 12000.0: + _second_finder(1) + elif span < periodsperday / 6000.0: + _second_finder(2) + elif span < periodsperday / 2400.0: + _second_finder(5) + elif span < periodsperday / 1200.0: + _second_finder(10) + elif span < periodsperday / 800.0: + _second_finder(15) + elif span < periodsperday / 400.0: + _second_finder(30) + elif span < periodsperday / 150.0: + _minute_finder(1) + elif span < periodsperday / 70.0: + _minute_finder(2) + elif span < periodsperday / 24.0: + _minute_finder(5) + elif span < periodsperday / 12.0: + _minute_finder(15) + elif span < periodsperday / 6.0: + _minute_finder(30) + elif span < periodsperday / 2.5: + _hour_finder(1, False) + elif span < periodsperday / 1.5: + _hour_finder(2, False) + elif span < periodsperday * 1.25: + _hour_finder(3, False) + elif span < periodsperday * 2.5: + _hour_finder(6, True) + elif span < periodsperday * 4: + _hour_finder(12, True) + else: + info_maj[month_start] = True + info_min[day_start] = True + year_start = period_break(dates_, 'year') + info_fmt = info['fmt'] + info_fmt[day_start] = '%d' + info_fmt[month_start] = '%d\n%b' + info_fmt[year_start] = '%d\n%b\n%Y' + if not has_level_label(year_start, vmin_orig): + if not has_level_label(month_start, vmin_orig): + info_fmt[first_label(day_start)] = '%d\n%b\n%Y' + else: + info_fmt[first_label(month_start)] = '%d\n%b\n%Y' + + # Case 2. Less than three months + elif span <= periodsperyear // 4: + month_start = period_break(dates_, 'month') + info_maj[month_start] = True + if freq < FreqGroup.FR_HR: + info['min'] = True + else: + day_start = period_break(dates_, 'day') + info['min'][day_start] = True + week_start = period_break(dates_, 'week') + year_start = period_break(dates_, 'year') + info_fmt[week_start] = '%d' + info_fmt[month_start] = '\n\n%b' + info_fmt[year_start] = '\n\n%b\n%Y' + if not has_level_label(year_start, vmin_orig): + if not has_level_label(month_start, vmin_orig): + info_fmt[first_label(week_start)] = '\n\n%b\n%Y' + else: + info_fmt[first_label(month_start)] = '\n\n%b\n%Y' + # Case 3. Less than 14 months ............... + elif span <= 1.15 * periodsperyear: + year_start = period_break(dates_, 'year') + month_start = period_break(dates_, 'month') + week_start = period_break(dates_, 'week') + info_maj[month_start] = True + info_min[week_start] = True + info_min[year_start] = False + info_min[month_start] = False + info_fmt[month_start] = '%b' + info_fmt[year_start] = '%b\n%Y' + if not has_level_label(year_start, vmin_orig): + info_fmt[first_label(month_start)] = '%b\n%Y' + # Case 4. Less than 2.5 years ............... + elif span <= 2.5 * periodsperyear: + year_start = period_break(dates_, 'year') + quarter_start = period_break(dates_, 'quarter') + month_start = period_break(dates_, 'month') + info_maj[quarter_start] = True + info_min[month_start] = True + info_fmt[quarter_start] = '%b' + info_fmt[year_start] = '%b\n%Y' + # Case 4. Less than 4 years ................. + elif span <= 4 * periodsperyear: + year_start = period_break(dates_, 'year') + month_start = period_break(dates_, 'month') + info_maj[year_start] = True + info_min[month_start] = True + info_min[year_start] = False + + month_break = dates_[month_start].month + jan_or_jul = month_start[(month_break == 1) | (month_break == 7)] + info_fmt[jan_or_jul] = '%b' + info_fmt[year_start] = '%b\n%Y' + # Case 5. Less than 11 years ................ + elif span <= 11 * periodsperyear: + year_start = period_break(dates_, 'year') + quarter_start = period_break(dates_, 'quarter') + info_maj[year_start] = True + info_min[quarter_start] = True + info_min[year_start] = False + info_fmt[year_start] = '%Y' + # Case 6. More than 12 years ................ + else: + year_start = period_break(dates_, 'year') + year_break = dates_[year_start].year + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + major_idx = year_start[(year_break % maj_anndef == 0)] + info_maj[major_idx] = True + minor_idx = year_start[(year_break % min_anndef == 0)] + info_min[minor_idx] = True + info_fmt[major_idx] = '%Y' + + return info + + +def _monthly_finder(vmin, vmax, freq): + periodsperyear = 12 + + vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 + + # Initialize the output + info = np.zeros(span, + dtype=[('val', int), ('maj', bool), ('min', bool), + ('fmt', '|S8')]) + info['val'] = np.arange(vmin, vmax + 1) + dates_ = info['val'] + info['fmt'] = '' + year_start = (dates_ % 12 == 0).nonzero()[0] + info_maj = info['maj'] + info_fmt = info['fmt'] + + if span <= 1.15 * periodsperyear: + info_maj[year_start] = True + info['min'] = True + + info_fmt[:] = '%b' + info_fmt[year_start] = '%b\n%Y' + + if not has_level_label(year_start, vmin_orig): + if dates_.size > 1: + idx = 1 + else: + idx = 0 + info_fmt[idx] = '%b\n%Y' + + elif span <= 2.5 * periodsperyear: + quarter_start = (dates_ % 3 == 0).nonzero() + info_maj[year_start] = True + # TODO: Check the following : is it really info['fmt'] ? + info['fmt'][quarter_start] = True + info['min'] = True + + info_fmt[quarter_start] = '%b' + info_fmt[year_start] = '%b\n%Y' + + elif span <= 4 * periodsperyear: + info_maj[year_start] = True + info['min'] = True + + jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6) + info_fmt[jan_or_jul] = '%b' + info_fmt[year_start] = '%b\n%Y' + + elif span <= 11 * periodsperyear: + quarter_start = (dates_ % 3 == 0).nonzero() + info_maj[year_start] = True + info['min'][quarter_start] = True + + info_fmt[year_start] = '%Y' + + else: + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + years = dates_[year_start] // 12 + 1 + major_idx = year_start[(years % maj_anndef == 0)] + info_maj[major_idx] = True + info['min'][year_start[(years % min_anndef == 0)]] = True + + info_fmt[major_idx] = '%Y' + + return info + + +def _quarterly_finder(vmin, vmax, freq): + periodsperyear = 4 + vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 + + info = np.zeros(span, + dtype=[('val', int), ('maj', bool), ('min', bool), + ('fmt', '|S8')]) + info['val'] = np.arange(vmin, vmax + 1) + info['fmt'] = '' + dates_ = info['val'] + info_maj = info['maj'] + info_fmt = info['fmt'] + year_start = (dates_ % 4 == 0).nonzero()[0] + + if span <= 3.5 * periodsperyear: + info_maj[year_start] = True + info['min'] = True + + info_fmt[:] = 'Q%q' + info_fmt[year_start] = 'Q%q\n%F' + if not has_level_label(year_start, vmin_orig): + if dates_.size > 1: + idx = 1 + else: + idx = 0 + info_fmt[idx] = 'Q%q\n%F' + + elif span <= 11 * periodsperyear: + info_maj[year_start] = True + info['min'] = True + info_fmt[year_start] = '%F' + + else: + years = dates_[year_start] // 4 + 1 + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + major_idx = year_start[(years % maj_anndef == 0)] + info_maj[major_idx] = True + info['min'][year_start[(years % min_anndef == 0)]] = True + info_fmt[major_idx] = '%F' + + return info + + +def _annual_finder(vmin, vmax, freq): + (vmin, vmax) = (int(vmin), int(vmax + 1)) + span = vmax - vmin + 1 + + info = np.zeros(span, + dtype=[('val', int), ('maj', bool), ('min', bool), + ('fmt', '|S8')]) + info['val'] = np.arange(vmin, vmax + 1) + info['fmt'] = '' + dates_ = info['val'] + + (min_anndef, maj_anndef) = _get_default_annual_spacing(span) + major_idx = dates_ % maj_anndef == 0 + info['maj'][major_idx] = True + info['min'][(dates_ % min_anndef == 0)] = True + info['fmt'][major_idx] = '%Y' + + return info + + +def get_finder(freq): + if isinstance(freq, compat.string_types): + freq = frequencies.get_freq(freq) + fgroup = frequencies.get_freq_group(freq) + + if fgroup == FreqGroup.FR_ANN: + return _annual_finder + elif fgroup == FreqGroup.FR_QTR: + return _quarterly_finder + elif freq == FreqGroup.FR_MTH: + return _monthly_finder + elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK): + return _daily_finder + else: # pragma: no cover + errmsg = "Unsupported frequency: %s" % (freq) + raise NotImplementedError(errmsg) + + +class TimeSeries_DateLocator(Locator): + """ + Locates the ticks along an axis controlled by a :class:`Series`. + + Parameters + ---------- + freq : {var} + Valid frequency specifier. + minor_locator : {False, True}, optional + Whether the locator is for minor ticks (True) or not. + dynamic_mode : {True, False}, optional + Whether the locator should work in dynamic mode. + base : {int}, optional + quarter : {int}, optional + month : {int}, optional + day : {int}, optional + """ + + def __init__(self, freq, minor_locator=False, dynamic_mode=True, + base=1, quarter=1, month=1, day=1, plot_obj=None): + if isinstance(freq, compat.string_types): + freq = frequencies.get_freq(freq) + self.freq = freq + self.base = base + (self.quarter, self.month, self.day) = (quarter, month, day) + self.isminor = minor_locator + self.isdynamic = dynamic_mode + self.offset = 0 + self.plot_obj = plot_obj + self.finder = get_finder(freq) + + def _get_default_locs(self, vmin, vmax): + "Returns the default locations of ticks." + + if self.plot_obj.date_axis_info is None: + self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) + + locator = self.plot_obj.date_axis_info + + if self.isminor: + return np.compress(locator['min'], locator['val']) + return np.compress(locator['maj'], locator['val']) + + def __call__(self): + 'Return the locations of the ticks.' + # axis calls Locator.set_axis inside set_m_formatter + vi = tuple(self.axis.get_view_interval()) + if vi != self.plot_obj.view_interval: + self.plot_obj.date_axis_info = None + self.plot_obj.view_interval = vi + vmin, vmax = vi + if vmax < vmin: + vmin, vmax = vmax, vmin + if self.isdynamic: + locs = self._get_default_locs(vmin, vmax) + else: # pragma: no cover + base = self.base + (d, m) = divmod(vmin, base) + vmin = (d + 1) * base + locs = lrange(vmin, vmax + 1, base) + return locs + + def autoscale(self): + """ + Sets the view limits to the nearest multiples of base that contain the + data. + """ + # requires matplotlib >= 0.98.0 + (vmin, vmax) = self.axis.get_data_interval() + + locs = self._get_default_locs(vmin, vmax) + (vmin, vmax) = locs[[0, -1]] + if vmin == vmax: + vmin -= 1 + vmax += 1 + return nonsingular(vmin, vmax) + +# ------------------------------------------------------------------------- +# --- Formatter --- +# ------------------------------------------------------------------------- + + +class TimeSeries_DateFormatter(Formatter): + """ + Formats the ticks along an axis controlled by a :class:`PeriodIndex`. + + Parameters + ---------- + freq : {int, string} + Valid frequency specifier. + minor_locator : {False, True} + Whether the current formatter should apply to minor ticks (True) or + major ticks (False). + dynamic_mode : {True, False} + Whether the formatter works in dynamic mode or not. + """ + + def __init__(self, freq, minor_locator=False, dynamic_mode=True, + plot_obj=None): + if isinstance(freq, compat.string_types): + freq = frequencies.get_freq(freq) + self.format = None + self.freq = freq + self.locs = [] + self.formatdict = None + self.isminor = minor_locator + self.isdynamic = dynamic_mode + self.offset = 0 + self.plot_obj = plot_obj + self.finder = get_finder(freq) + + def _set_default_format(self, vmin, vmax): + "Returns the default ticks spacing." + + if self.plot_obj.date_axis_info is None: + self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) + info = self.plot_obj.date_axis_info + + if self.isminor: + format = np.compress(info['min'] & np.logical_not(info['maj']), + info) + else: + format = np.compress(info['maj'], info) + self.formatdict = dict([(x, f) for (x, _, _, f) in format]) + return self.formatdict + + def set_locs(self, locs): + 'Sets the locations of the ticks' + # don't actually use the locs. This is just needed to work with + # matplotlib. Force to use vmin, vmax + self.locs = locs + + (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) + if vi != self.plot_obj.view_interval: + self.plot_obj.date_axis_info = None + self.plot_obj.view_interval = vi + if vmax < vmin: + (vmin, vmax) = (vmax, vmin) + self._set_default_format(vmin, vmax) + + def __call__(self, x, pos=0): + if self.formatdict is None: + return '' + else: + fmt = self.formatdict.pop(x, '') + return Period(ordinal=int(x), freq=self.freq).strftime(fmt) + + +class TimeSeries_TimedeltaFormatter(Formatter): + """ + Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`. + """ + + @staticmethod + def format_timedelta_ticks(x, pos, n_decimals): + """ + Convert seconds to 'D days HH:MM:SS.F' + """ + s, ns = divmod(x, 1e9) + m, s = divmod(s, 60) + h, m = divmod(m, 60) + d, h = divmod(h, 24) + decimals = int(ns * 10**(n_decimals - 9)) + s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)) + if n_decimals > 0: + s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals) + if d != 0: + s = '{:d} days '.format(int(d)) + s + return s + + def __call__(self, x, pos=0): + (vmin, vmax) = tuple(self.axis.get_view_interval()) + n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin)))) + if n_decimals > 9: + n_decimals = 9 + return self.format_timedelta_ticks(x, pos, n_decimals) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py new file mode 100644 index 0000000000000..3980f5e7f2f61 --- /dev/null +++ b/pandas/plotting/_core.py @@ -0,0 +1,2828 @@ +# being a bit too dynamic +# pylint: disable=E1101 +from __future__ import division + +import warnings +import re +from collections import namedtuple +from distutils.version import LooseVersion + +import numpy as np + +from pandas.util.decorators import cache_readonly +from pandas.core.base import PandasObject +from pandas.types.common import (is_list_like, + is_integer, + is_number, + is_hashable, + is_iterator) +from pandas.core.common import AbstractMethodError, isnull, _try_sort +from pandas.core.generic import _shared_docs, _shared_doc_kwargs +from pandas.core.index import Index, MultiIndex +from pandas.core.series import Series, remove_na +from pandas.tseries.period import PeriodIndex +from pandas.compat import range, lrange, map, zip, string_types +import pandas.compat as compat +from pandas.formats.printing import pprint_thing +from pandas.util.decorators import Appender + +from pandas.plotting._compat import (_mpl_ge_1_3_1, + _mpl_ge_1_5_0) +from pandas.plotting._style import (mpl_stylesheet, plot_params, + _get_standard_colors) +from pandas.plotting._tools import (_subplots, _flatten, table, + _handle_shared_axes, _get_all_lines, + _get_xlim, _set_ticks_props, + format_date_labels) + + +if _mpl_ge_1_5_0(): + # Compat with mp 1.5, which uses cycler. + import cycler + colors = mpl_stylesheet.pop('axes.color_cycle') + mpl_stylesheet['axes.prop_cycle'] = cycler.cycler('color', colors) + + +def _get_standard_kind(kind): + return {'density': 'kde'}.get(kind, kind) + + +def _gca(): + import matplotlib.pyplot as plt + return plt.gca() + + +def _gcf(): + import matplotlib.pyplot as plt + return plt.gcf() + + +class MPLPlot(object): + """ + Base class for assembling a pandas plot using matplotlib + + Parameters + ---------- + data : + + """ + + @property + def _kind(self): + """Specify kind str. Must be overridden in child class""" + raise NotImplementedError + + _layout_type = 'vertical' + _default_rot = 0 + orientation = None + _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog', + 'mark_right', 'stacked'] + _attr_defaults = {'logy': False, 'logx': False, 'loglog': False, + 'mark_right': True, 'stacked': False} + + def __init__(self, data, kind=None, by=None, subplots=False, sharex=None, + sharey=False, use_index=True, + figsize=None, grid=None, legend=True, rot=None, + ax=None, fig=None, title=None, xlim=None, ylim=None, + xticks=None, yticks=None, + sort_columns=False, fontsize=None, + secondary_y=False, colormap=None, + table=False, layout=None, **kwds): + + self.data = data + self.by = by + + self.kind = kind + + self.sort_columns = sort_columns + + self.subplots = subplots + + if sharex is None: + if ax is None: + self.sharex = True + else: + # if we get an axis, the users should do the visibility + # setting... + self.sharex = False + else: + self.sharex = sharex + + self.sharey = sharey + self.figsize = figsize + self.layout = layout + + self.xticks = xticks + self.yticks = yticks + self.xlim = xlim + self.ylim = ylim + self.title = title + self.use_index = use_index + + self.fontsize = fontsize + + if rot is not None: + self.rot = rot + # need to know for format_date_labels since it's rotated to 30 by + # default + self._rot_set = True + else: + self._rot_set = False + self.rot = self._default_rot + + if grid is None: + grid = False if secondary_y else self.plt.rcParams['axes.grid'] + + self.grid = grid + self.legend = legend + self.legend_handles = [] + self.legend_labels = [] + + for attr in self._pop_attributes: + value = kwds.pop(attr, self._attr_defaults.get(attr, None)) + setattr(self, attr, value) + + self.ax = ax + self.fig = fig + self.axes = None + + # parse errorbar input if given + xerr = kwds.pop('xerr', None) + yerr = kwds.pop('yerr', None) + self.errors = {} + for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]): + self.errors[kw] = self._parse_errorbars(kw, err) + + if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)): + secondary_y = [secondary_y] + self.secondary_y = secondary_y + + # ugly TypeError if user passes matplotlib's `cmap` name. + # Probably better to accept either. + if 'cmap' in kwds and colormap: + raise TypeError("Only specify one of `cmap` and `colormap`.") + elif 'cmap' in kwds: + self.colormap = kwds.pop('cmap') + else: + self.colormap = colormap + + self.table = table + + self.kwds = kwds + + self._validate_color_args() + + def _validate_color_args(self): + if 'color' not in self.kwds and 'colors' in self.kwds: + warnings.warn(("'colors' is being deprecated. Please use 'color'" + "instead of 'colors'")) + colors = self.kwds.pop('colors') + self.kwds['color'] = colors + + if ('color' in self.kwds and self.nseries == 1): + # support series.plot(color='green') + self.kwds['color'] = [self.kwds['color']] + + if ('color' in self.kwds or 'colors' in self.kwds) and \ + self.colormap is not None: + warnings.warn("'color' and 'colormap' cannot be used " + "simultaneously. Using 'color'") + + if 'color' in self.kwds and self.style is not None: + if is_list_like(self.style): + styles = self.style + else: + styles = [self.style] + # need only a single match + for s in styles: + if re.match('^[a-z]+?', s) is not None: + raise ValueError( + "Cannot pass 'style' string with a color " + "symbol and 'color' keyword argument. Please" + " use one or the other or pass 'style' " + "without a color symbol") + + def _iter_data(self, data=None, keep_index=False, fillna=None): + if data is None: + data = self.data + if fillna is not None: + data = data.fillna(fillna) + + # TODO: unused? + # if self.sort_columns: + # columns = _try_sort(data.columns) + # else: + # columns = data.columns + + for col, values in data.iteritems(): + if keep_index is True: + yield col, values + else: + yield col, values.values + + @property + def nseries(self): + if self.data.ndim == 1: + return 1 + else: + return self.data.shape[1] + + def draw(self): + self.plt.draw_if_interactive() + + def generate(self): + self._args_adjust() + self._compute_plot_data() + self._setup_subplots() + self._make_plot() + self._add_table() + self._make_legend() + self._adorn_subplots() + + for ax in self.axes: + self._post_plot_logic_common(ax, self.data) + self._post_plot_logic(ax, self.data) + + def _args_adjust(self): + pass + + def _has_plotted_object(self, ax): + """check whether ax has data""" + return (len(ax.lines) != 0 or + len(ax.artists) != 0 or + len(ax.containers) != 0) + + def _maybe_right_yaxis(self, ax, axes_num): + if not self.on_right(axes_num): + # secondary axes may be passed via ax kw + return self._get_ax_layer(ax) + + if hasattr(ax, 'right_ax'): + # if it has right_ax proparty, ``ax`` must be left axes + return ax.right_ax + elif hasattr(ax, 'left_ax'): + # if it has left_ax proparty, ``ax`` must be right axes + return ax + else: + # otherwise, create twin axes + orig_ax, new_ax = ax, ax.twinx() + # TODO: use Matplotlib public API when available + new_ax._get_lines = orig_ax._get_lines + new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill + orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax + + if not self._has_plotted_object(orig_ax): # no data on left y + orig_ax.get_yaxis().set_visible(False) + return new_ax + + def _setup_subplots(self): + if self.subplots: + fig, axes = _subplots(naxes=self.nseries, + sharex=self.sharex, sharey=self.sharey, + figsize=self.figsize, ax=self.ax, + layout=self.layout, + layout_type=self._layout_type) + else: + if self.ax is None: + fig = self.plt.figure(figsize=self.figsize) + axes = fig.add_subplot(111) + else: + fig = self.ax.get_figure() + if self.figsize is not None: + fig.set_size_inches(self.figsize) + axes = self.ax + + axes = _flatten(axes) + + if self.logx or self.loglog: + [a.set_xscale('log') for a in axes] + if self.logy or self.loglog: + [a.set_yscale('log') for a in axes] + + self.fig = fig + self.axes = axes + + @property + def result(self): + """ + Return result axes + """ + if self.subplots: + if self.layout is not None and not is_list_like(self.ax): + return self.axes.reshape(*self.layout) + else: + return self.axes + else: + sec_true = isinstance(self.secondary_y, bool) and self.secondary_y + all_sec = (is_list_like(self.secondary_y) and + len(self.secondary_y) == self.nseries) + if (sec_true or all_sec): + # if all data is plotted on secondary, return right axes + return self._get_ax_layer(self.axes[0], primary=False) + else: + return self.axes[0] + + def _compute_plot_data(self): + data = self.data + + if isinstance(data, Series): + label = self.label + if label is None and data.name is None: + label = 'None' + data = data.to_frame(name=label) + + numeric_data = data._convert(datetime=True)._get_numeric_data() + + try: + is_empty = numeric_data.empty + except AttributeError: + is_empty = not len(numeric_data) + + # no empty frames or series allowed + if is_empty: + raise TypeError('Empty {0!r}: no numeric data to ' + 'plot'.format(numeric_data.__class__.__name__)) + + self.data = numeric_data + + def _make_plot(self): + raise AbstractMethodError(self) + + def _add_table(self): + if self.table is False: + return + elif self.table is True: + data = self.data.transpose() + else: + data = self.table + ax = self._get_ax(0) + table(ax, data) + + def _post_plot_logic_common(self, ax, data): + """Common post process for each axes""" + labels = [pprint_thing(key) for key in data.index] + labels = dict(zip(range(len(data.index)), labels)) + + if self.orientation == 'vertical' or self.orientation is None: + if self._need_to_set_index: + xticklabels = [labels.get(x, '') for x in ax.get_xticks()] + ax.set_xticklabels(xticklabels) + self._apply_axis_properties(ax.xaxis, rot=self.rot, + fontsize=self.fontsize) + self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize) + elif self.orientation == 'horizontal': + if self._need_to_set_index: + yticklabels = [labels.get(y, '') for y in ax.get_yticks()] + ax.set_yticklabels(yticklabels) + self._apply_axis_properties(ax.yaxis, rot=self.rot, + fontsize=self.fontsize) + self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize) + else: # pragma no cover + raise ValueError + + def _post_plot_logic(self, ax, data): + """Post process for each axes. Overridden in child classes""" + pass + + def _adorn_subplots(self): + """Common post process unrelated to data""" + if len(self.axes) > 0: + all_axes = self._get_subplots() + nrows, ncols = self._get_axes_layout() + _handle_shared_axes(axarr=all_axes, nplots=len(all_axes), + naxes=nrows * ncols, nrows=nrows, + ncols=ncols, sharex=self.sharex, + sharey=self.sharey) + + for ax in self.axes: + if self.yticks is not None: + ax.set_yticks(self.yticks) + + if self.xticks is not None: + ax.set_xticks(self.xticks) + + if self.ylim is not None: + ax.set_ylim(self.ylim) + + if self.xlim is not None: + ax.set_xlim(self.xlim) + + ax.grid(self.grid) + + if self.title: + if self.subplots: + if is_list_like(self.title): + if len(self.title) != self.nseries: + msg = ('The length of `title` must equal the number ' + 'of columns if using `title` of type `list` ' + 'and `subplots=True`.\n' + 'length of title = {}\n' + 'number of columns = {}').format( + len(self.title), self.nseries) + raise ValueError(msg) + + for (ax, title) in zip(self.axes, self.title): + ax.set_title(title) + else: + self.fig.suptitle(self.title) + else: + if is_list_like(self.title): + msg = ('Using `title` of type `list` is not supported ' + 'unless `subplots=True` is passed') + raise ValueError(msg) + self.axes[0].set_title(self.title) + + def _apply_axis_properties(self, axis, rot=None, fontsize=None): + labels = axis.get_majorticklabels() + axis.get_minorticklabels() + for label in labels: + if rot is not None: + label.set_rotation(rot) + if fontsize is not None: + label.set_fontsize(fontsize) + + @property + def legend_title(self): + if not isinstance(self.data.columns, MultiIndex): + name = self.data.columns.name + if name is not None: + name = pprint_thing(name) + return name + else: + stringified = map(pprint_thing, + self.data.columns.names) + return ','.join(stringified) + + def _add_legend_handle(self, handle, label, index=None): + if label is not None: + if self.mark_right and index is not None: + if self.on_right(index): + label = label + ' (right)' + self.legend_handles.append(handle) + self.legend_labels.append(label) + + def _make_legend(self): + ax, leg = self._get_ax_legend(self.axes[0]) + + handles = [] + labels = [] + title = '' + + if not self.subplots: + if leg is not None: + title = leg.get_title().get_text() + handles = leg.legendHandles + labels = [x.get_text() for x in leg.get_texts()] + + if self.legend: + if self.legend == 'reverse': + self.legend_handles = reversed(self.legend_handles) + self.legend_labels = reversed(self.legend_labels) + + handles += self.legend_handles + labels += self.legend_labels + if self.legend_title is not None: + title = self.legend_title + + if len(handles) > 0: + ax.legend(handles, labels, loc='best', title=title) + + elif self.subplots and self.legend: + for ax in self.axes: + if ax.get_visible(): + ax.legend(loc='best') + + def _get_ax_legend(self, ax): + leg = ax.get_legend() + other_ax = (getattr(ax, 'left_ax', None) or + getattr(ax, 'right_ax', None)) + other_leg = None + if other_ax is not None: + other_leg = other_ax.get_legend() + if leg is None and other_leg is not None: + leg = other_leg + ax = other_ax + return ax, leg + + @cache_readonly + def plt(self): + import matplotlib.pyplot as plt + return plt + + @staticmethod + def mpl_ge_1_3_1(): + return _mpl_ge_1_3_1() + + @staticmethod + def mpl_ge_1_5_0(): + return _mpl_ge_1_5_0() + + _need_to_set_index = False + + def _get_xticks(self, convert_period=False): + index = self.data.index + is_datetype = index.inferred_type in ('datetime', 'date', + 'datetime64', 'time') + + if self.use_index: + if convert_period and isinstance(index, PeriodIndex): + self.data = self.data.reindex(index=index.sort_values()) + x = self.data.index.to_timestamp()._mpl_repr() + elif index.is_numeric(): + """ + Matplotlib supports numeric values or datetime objects as + xaxis values. Taking LBYL approach here, by the time + matplotlib raises exception when using non numeric/datetime + values for xaxis, several actions are already taken by plt. + """ + x = index._mpl_repr() + elif is_datetype: + self.data = self.data.sort_index() + x = self.data.index._mpl_repr() + else: + self._need_to_set_index = True + x = lrange(len(index)) + else: + x = lrange(len(index)) + + return x + + @classmethod + def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): + mask = isnull(y) + if mask.any(): + y = np.ma.array(y) + y = np.ma.masked_where(mask, y) + + if isinstance(x, Index): + x = x._mpl_repr() + + if is_errorbar: + if 'xerr' in kwds: + kwds['xerr'] = np.array(kwds.get('xerr')) + if 'yerr' in kwds: + kwds['yerr'] = np.array(kwds.get('yerr')) + return ax.errorbar(x, y, **kwds) + else: + # prevent style kwarg from going to errorbar, where it is + # unsupported + if style is not None: + args = (x, y, style) + else: + args = (x, y) + return ax.plot(*args, **kwds) + + def _get_index_name(self): + if isinstance(self.data.index, MultiIndex): + name = self.data.index.names + if any(x is not None for x in name): + name = ','.join([pprint_thing(x) for x in name]) + else: + name = None + else: + name = self.data.index.name + if name is not None: + name = pprint_thing(name) + + return name + + @classmethod + def _get_ax_layer(cls, ax, primary=True): + """get left (primary) or right (secondary) axes""" + if primary: + return getattr(ax, 'left_ax', ax) + else: + return getattr(ax, 'right_ax', ax) + + def _get_ax(self, i): + # get the twinx ax if appropriate + if self.subplots: + ax = self.axes[i] + ax = self._maybe_right_yaxis(ax, i) + self.axes[i] = ax + else: + ax = self.axes[0] + ax = self._maybe_right_yaxis(ax, i) + + ax.get_yaxis().set_visible(True) + return ax + + def on_right(self, i): + if isinstance(self.secondary_y, bool): + return self.secondary_y + + if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)): + return self.data.columns[i] in self.secondary_y + + def _apply_style_colors(self, colors, kwds, col_num, label): + """ + Manage style and color based on column number and its label. + Returns tuple of appropriate style and kwds which "color" may be added. + """ + style = None + if self.style is not None: + if isinstance(self.style, list): + try: + style = self.style[col_num] + except IndexError: + pass + elif isinstance(self.style, dict): + style = self.style.get(label, style) + else: + style = self.style + + has_color = 'color' in kwds or self.colormap is not None + nocolor_style = style is None or re.match('[a-z]+', style) is None + if (has_color or self.subplots) and nocolor_style: + kwds['color'] = colors[col_num % len(colors)] + return style, kwds + + def _get_colors(self, num_colors=None, color_kwds='color'): + if num_colors is None: + num_colors = self.nseries + + return _get_standard_colors(num_colors=num_colors, + colormap=self.colormap, + color=self.kwds.get(color_kwds)) + + def _parse_errorbars(self, label, err): + """ + Look for error keyword arguments and return the actual errorbar data + or return the error DataFrame/dict + + Error bars can be specified in several ways: + Series: the user provides a pandas.Series object of the same + length as the data + ndarray: provides a np.ndarray of the same length as the data + DataFrame/dict: error values are paired with keys matching the + key in the plotted DataFrame + str: the name of the column within the plotted DataFrame + """ + + if err is None: + return None + + from pandas import DataFrame, Series + + def match_labels(data, e): + e = e.reindex_axis(data.index) + return e + + # key-matched DataFrame + if isinstance(err, DataFrame): + + err = match_labels(self.data, err) + # key-matched dict + elif isinstance(err, dict): + pass + + # Series of error values + elif isinstance(err, Series): + # broadcast error series across data + err = match_labels(self.data, err) + err = np.atleast_2d(err) + err = np.tile(err, (self.nseries, 1)) + + # errors are a column in the dataframe + elif isinstance(err, string_types): + evalues = self.data[err].values + self.data = self.data[self.data.columns.drop(err)] + err = np.atleast_2d(evalues) + err = np.tile(err, (self.nseries, 1)) + + elif is_list_like(err): + if is_iterator(err): + err = np.atleast_2d(list(err)) + else: + # raw error values + err = np.atleast_2d(err) + + err_shape = err.shape + + # asymmetrical error bars + if err.ndim == 3: + if (err_shape[0] != self.nseries) or \ + (err_shape[1] != 2) or \ + (err_shape[2] != len(self.data)): + msg = "Asymmetrical error bars should be provided " + \ + "with the shape (%u, 2, %u)" % \ + (self.nseries, len(self.data)) + raise ValueError(msg) + + # broadcast errors to each data series + if len(err) == 1: + err = np.tile(err, (self.nseries, 1)) + + elif is_number(err): + err = np.tile([err], (self.nseries, len(self.data))) + + else: + msg = "No valid %s detected" % label + raise ValueError(msg) + + return err + + def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True): + from pandas import DataFrame + errors = {} + + for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]): + if flag: + err = self.errors[kw] + # user provided label-matched dataframe of errors + if isinstance(err, (DataFrame, dict)): + if label is not None and label in err.keys(): + err = err[label] + else: + err = None + elif index is not None and err is not None: + err = err[index] + + if err is not None: + errors[kw] = err + return errors + + def _get_subplots(self): + from matplotlib.axes import Subplot + return [ax for ax in self.axes[0].get_figure().get_axes() + if isinstance(ax, Subplot)] + + def _get_axes_layout(self): + axes = self._get_subplots() + x_set = set() + y_set = set() + for ax in axes: + # check axes coordinates to estimate layout + points = ax.get_position().get_points() + x_set.add(points[0][0]) + y_set.add(points[0][1]) + return (len(y_set), len(x_set)) + + +class PlanePlot(MPLPlot): + """ + Abstract class for plotting on plane, currently scatter and hexbin. + """ + + _layout_type = 'single' + + def __init__(self, data, x, y, **kwargs): + MPLPlot.__init__(self, data, **kwargs) + if x is None or y is None: + raise ValueError(self._kind + ' requires and x and y column') + if is_integer(x) and not self.data.columns.holds_integer(): + x = self.data.columns[x] + if is_integer(y) and not self.data.columns.holds_integer(): + y = self.data.columns[y] + self.x = x + self.y = y + + @property + def nseries(self): + return 1 + + def _post_plot_logic(self, ax, data): + x, y = self.x, self.y + ax.set_ylabel(pprint_thing(y)) + ax.set_xlabel(pprint_thing(x)) + + +class ScatterPlot(PlanePlot): + _kind = 'scatter' + + def __init__(self, data, x, y, s=None, c=None, **kwargs): + if s is None: + # hide the matplotlib default for size, in case we want to change + # the handling of this argument later + s = 20 + super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs) + if is_integer(c) and not self.data.columns.holds_integer(): + c = self.data.columns[c] + self.c = c + + def _make_plot(self): + x, y, c, data = self.x, self.y, self.c, self.data + ax = self.axes[0] + + c_is_column = is_hashable(c) and c in self.data.columns + + # plot a colorbar only if a colormap is provided or necessary + cb = self.kwds.pop('colorbar', self.colormap or c_is_column) + + # pandas uses colormap, matplotlib uses cmap. + cmap = self.colormap or 'Greys' + cmap = self.plt.cm.get_cmap(cmap) + color = self.kwds.pop("color", None) + if c is not None and color is not None: + raise TypeError('Specify exactly one of `c` and `color`') + elif c is None and color is None: + c_values = self.plt.rcParams['patch.facecolor'] + elif color is not None: + c_values = color + elif c_is_column: + c_values = self.data[c].values + else: + c_values = c + + if self.legend and hasattr(self, 'label'): + label = self.label + else: + label = None + scatter = ax.scatter(data[x].values, data[y].values, c=c_values, + label=label, cmap=cmap, **self.kwds) + if cb: + img = ax.collections[0] + kws = dict(ax=ax) + if self.mpl_ge_1_3_1(): + kws['label'] = c if c_is_column else '' + self.fig.colorbar(img, **kws) + + if label is not None: + self._add_legend_handle(scatter, label) + else: + self.legend = False + + errors_x = self._get_errorbars(label=x, index=0, yerr=False) + errors_y = self._get_errorbars(label=y, index=0, xerr=False) + if len(errors_x) > 0 or len(errors_y) > 0: + err_kwds = dict(errors_x, **errors_y) + err_kwds['ecolor'] = scatter.get_facecolor()[0] + ax.errorbar(data[x].values, data[y].values, + linestyle='none', **err_kwds) + + +class HexBinPlot(PlanePlot): + _kind = 'hexbin' + + def __init__(self, data, x, y, C=None, **kwargs): + super(HexBinPlot, self).__init__(data, x, y, **kwargs) + if is_integer(C) and not self.data.columns.holds_integer(): + C = self.data.columns[C] + self.C = C + + def _make_plot(self): + x, y, data, C = self.x, self.y, self.data, self.C + ax = self.axes[0] + # pandas uses colormap, matplotlib uses cmap. + cmap = self.colormap or 'BuGn' + cmap = self.plt.cm.get_cmap(cmap) + cb = self.kwds.pop('colorbar', True) + + if C is None: + c_values = None + else: + c_values = data[C].values + + ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap, + **self.kwds) + if cb: + img = ax.collections[0] + self.fig.colorbar(img, ax=ax) + + def _make_legend(self): + pass + + +class LinePlot(MPLPlot): + _kind = 'line' + _default_rot = 0 + orientation = 'vertical' + + def __init__(self, data, **kwargs): + MPLPlot.__init__(self, data, **kwargs) + if self.stacked: + self.data = self.data.fillna(value=0) + self.x_compat = plot_params['x_compat'] + if 'x_compat' in self.kwds: + self.x_compat = bool(self.kwds.pop('x_compat')) + + def _is_ts_plot(self): + # this is slightly deceptive + return not self.x_compat and self.use_index and self._use_dynamic_x() + + def _use_dynamic_x(self): + from pandas.plotting._timeseries import _use_dynamic_x + return _use_dynamic_x(self._get_ax(0), self.data) + + def _make_plot(self): + if self._is_ts_plot(): + from pandas.plotting._timeseries import _maybe_convert_index + data = _maybe_convert_index(self._get_ax(0), self.data) + + x = data.index # dummy, not used + plotf = self._ts_plot + it = self._iter_data(data=data, keep_index=True) + else: + x = self._get_xticks(convert_period=True) + plotf = self._plot + it = self._iter_data() + + stacking_id = self._get_stacking_id() + is_errorbar = any(e is not None for e in self.errors.values()) + + colors = self._get_colors() + for i, (label, y) in enumerate(it): + ax = self._get_ax(i) + kwds = self.kwds.copy() + style, kwds = self._apply_style_colors(colors, kwds, i, label) + + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) + + label = pprint_thing(label) # .encode('utf-8') + kwds['label'] = label + + newlines = plotf(ax, x, y, style=style, column_num=i, + stacking_id=stacking_id, + is_errorbar=is_errorbar, + **kwds) + self._add_legend_handle(newlines[0], label, index=i) + + lines = _get_all_lines(ax) + left, right = _get_xlim(lines) + ax.set_xlim(left, right) + + @classmethod + def _plot(cls, ax, x, y, style=None, column_num=None, + stacking_id=None, **kwds): + # column_num is used to get the target column from protf in line and + # area plots + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(y)) + y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label']) + lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds) + cls._update_stacker(ax, stacking_id, y) + return lines + + @classmethod + def _ts_plot(cls, ax, x, data, style=None, **kwds): + from pandas.plotting._timeseries import (_maybe_resample, + _decorate_axes, + format_dateaxis) + # accept x to be consistent with normal plot func, + # x is not passed to tsplot as it uses data.index as x coordinate + # column_num must be in kwds for stacking purpose + freq, data = _maybe_resample(data, ax, kwds) + + # Set ax with freq info + _decorate_axes(ax, freq, kwds) + # digging deeper + if hasattr(ax, 'left_ax'): + _decorate_axes(ax.left_ax, freq, kwds) + if hasattr(ax, 'right_ax'): + _decorate_axes(ax.right_ax, freq, kwds) + ax._plot_data.append((data, cls._kind, kwds)) + + lines = cls._plot(ax, data.index, data.values, style=style, **kwds) + # set date formatter, locators and rescale limits + format_dateaxis(ax, ax.freq, data.index) + return lines + + def _get_stacking_id(self): + if self.stacked: + return id(self.data) + else: + return None + + @classmethod + def _initialize_stacker(cls, ax, stacking_id, n): + if stacking_id is None: + return + if not hasattr(ax, '_stacker_pos_prior'): + ax._stacker_pos_prior = {} + if not hasattr(ax, '_stacker_neg_prior'): + ax._stacker_neg_prior = {} + ax._stacker_pos_prior[stacking_id] = np.zeros(n) + ax._stacker_neg_prior[stacking_id] = np.zeros(n) + + @classmethod + def _get_stacked_values(cls, ax, stacking_id, values, label): + if stacking_id is None: + return values + if not hasattr(ax, '_stacker_pos_prior'): + # stacker may not be initialized for subplots + cls._initialize_stacker(ax, stacking_id, len(values)) + + if (values >= 0).all(): + return ax._stacker_pos_prior[stacking_id] + values + elif (values <= 0).all(): + return ax._stacker_neg_prior[stacking_id] + values + + raise ValueError('When stacked is True, each column must be either ' + 'all positive or negative.' + '{0} contains both positive and negative values' + .format(label)) + + @classmethod + def _update_stacker(cls, ax, stacking_id, values): + if stacking_id is None: + return + if (values >= 0).all(): + ax._stacker_pos_prior[stacking_id] += values + elif (values <= 0).all(): + ax._stacker_neg_prior[stacking_id] += values + + def _post_plot_logic(self, ax, data): + condition = (not self._use_dynamic_x() and + data.index.is_all_dates and + not self.subplots or + (self.subplots and self.sharex)) + + index_name = self._get_index_name() + + if condition: + # irregular TS rotated 30 deg. by default + # probably a better place to check / set this. + if not self._rot_set: + self.rot = 30 + format_date_labels(ax, rot=self.rot) + + if index_name is not None and self.use_index: + ax.set_xlabel(index_name) + + +class AreaPlot(LinePlot): + _kind = 'area' + + def __init__(self, data, **kwargs): + kwargs.setdefault('stacked', True) + data = data.fillna(value=0) + LinePlot.__init__(self, data, **kwargs) + + if not self.stacked: + # use smaller alpha to distinguish overlap + self.kwds.setdefault('alpha', 0.5) + + if self.logy or self.loglog: + raise ValueError("Log-y scales are not supported in area plot") + + @classmethod + def _plot(cls, ax, x, y, style=None, column_num=None, + stacking_id=None, is_errorbar=False, **kwds): + + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(y)) + y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label']) + + # need to remove label, because subplots uses mpl legend as it is + line_kwds = kwds.copy() + if cls.mpl_ge_1_5_0(): + line_kwds.pop('label') + lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds) + + # get data from the line to get coordinates for fill_between + xdata, y_values = lines[0].get_data(orig=False) + + # unable to use ``_get_stacked_values`` here to get starting point + if stacking_id is None: + start = np.zeros(len(y)) + elif (y >= 0).all(): + start = ax._stacker_pos_prior[stacking_id] + elif (y <= 0).all(): + start = ax._stacker_neg_prior[stacking_id] + else: + start = np.zeros(len(y)) + + if 'color' not in kwds: + kwds['color'] = lines[0].get_color() + + rect = ax.fill_between(xdata, start, y_values, **kwds) + cls._update_stacker(ax, stacking_id, y) + + # LinePlot expects list of artists + res = [rect] if cls.mpl_ge_1_5_0() else lines + return res + + def _add_legend_handle(self, handle, label, index=None): + if not self.mpl_ge_1_5_0(): + from matplotlib.patches import Rectangle + # Because fill_between isn't supported in legend, + # specifically add Rectangle handle here + alpha = self.kwds.get('alpha', None) + handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), + alpha=alpha) + LinePlot._add_legend_handle(self, handle, label, index=index) + + def _post_plot_logic(self, ax, data): + LinePlot._post_plot_logic(self, ax, data) + + if self.ylim is None: + if (data >= 0).all().all(): + ax.set_ylim(0, None) + elif (data <= 0).all().all(): + ax.set_ylim(None, 0) + + +class BarPlot(MPLPlot): + _kind = 'bar' + _default_rot = 90 + orientation = 'vertical' + + def __init__(self, data, **kwargs): + self.bar_width = kwargs.pop('width', 0.5) + pos = kwargs.pop('position', 0.5) + kwargs.setdefault('align', 'center') + self.tick_pos = np.arange(len(data)) + + self.bottom = kwargs.pop('bottom', 0) + self.left = kwargs.pop('left', 0) + + self.log = kwargs.pop('log', False) + MPLPlot.__init__(self, data, **kwargs) + + if self.stacked or self.subplots: + self.tickoffset = self.bar_width * pos + if kwargs['align'] == 'edge': + self.lim_offset = self.bar_width / 2 + else: + self.lim_offset = 0 + else: + if kwargs['align'] == 'edge': + w = self.bar_width / self.nseries + self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5 + self.lim_offset = w * 0.5 + else: + self.tickoffset = self.bar_width * pos + self.lim_offset = 0 + + self.ax_pos = self.tick_pos - self.tickoffset + + def _args_adjust(self): + if is_list_like(self.bottom): + self.bottom = np.array(self.bottom) + if is_list_like(self.left): + self.left = np.array(self.left) + + @classmethod + def _plot(cls, ax, x, y, w, start=0, log=False, **kwds): + return ax.bar(x, y, w, bottom=start, log=log, **kwds) + + @property + def _start_base(self): + return self.bottom + + def _make_plot(self): + import matplotlib as mpl + + colors = self._get_colors() + ncolors = len(colors) + + pos_prior = neg_prior = np.zeros(len(self.data)) + K = self.nseries + + for i, (label, y) in enumerate(self._iter_data(fillna=0)): + ax = self._get_ax(i) + kwds = self.kwds.copy() + kwds['color'] = colors[i % ncolors] + + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) + + label = pprint_thing(label) + + if (('yerr' in kwds) or ('xerr' in kwds)) \ + and (kwds.get('ecolor') is None): + kwds['ecolor'] = mpl.rcParams['xtick.color'] + + start = 0 + if self.log and (y >= 1).all(): + start = 1 + start = start + self._start_base + + if self.subplots: + w = self.bar_width / 2 + rect = self._plot(ax, self.ax_pos + w, y, self.bar_width, + start=start, label=label, + log=self.log, **kwds) + ax.set_title(label) + elif self.stacked: + mask = y > 0 + start = np.where(mask, pos_prior, neg_prior) + self._start_base + w = self.bar_width / 2 + rect = self._plot(ax, self.ax_pos + w, y, self.bar_width, + start=start, label=label, + log=self.log, **kwds) + pos_prior = pos_prior + np.where(mask, y, 0) + neg_prior = neg_prior + np.where(mask, 0, y) + else: + w = self.bar_width / K + rect = self._plot(ax, self.ax_pos + (i + 0.5) * w, y, w, + start=start, label=label, + log=self.log, **kwds) + self._add_legend_handle(rect, label, index=i) + + def _post_plot_logic(self, ax, data): + if self.use_index: + str_index = [pprint_thing(key) for key in data.index] + else: + str_index = [pprint_thing(key) for key in range(data.shape[0])] + name = self._get_index_name() + + s_edge = self.ax_pos[0] - 0.25 + self.lim_offset + e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset + + self._decorate_ticks(ax, name, str_index, s_edge, e_edge) + + def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge): + ax.set_xlim((start_edge, end_edge)) + ax.set_xticks(self.tick_pos) + ax.set_xticklabels(ticklabels) + if name is not None and self.use_index: + ax.set_xlabel(name) + + +class BarhPlot(BarPlot): + _kind = 'barh' + _default_rot = 0 + orientation = 'horizontal' + + @property + def _start_base(self): + return self.left + + @classmethod + def _plot(cls, ax, x, y, w, start=0, log=False, **kwds): + return ax.barh(x, y, w, left=start, log=log, **kwds) + + def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge): + # horizontal bars + ax.set_ylim((start_edge, end_edge)) + ax.set_yticks(self.tick_pos) + ax.set_yticklabels(ticklabels) + if name is not None and self.use_index: + ax.set_ylabel(name) + + +class HistPlot(LinePlot): + _kind = 'hist' + + def __init__(self, data, bins=10, bottom=0, **kwargs): + self.bins = bins # use mpl default + self.bottom = bottom + # Do not call LinePlot.__init__ which may fill nan + MPLPlot.__init__(self, data, **kwargs) + + def _args_adjust(self): + if is_integer(self.bins): + # create common bin edge + values = (self.data._convert(datetime=True)._get_numeric_data()) + values = np.ravel(values) + values = values[~isnull(values)] + + hist, self.bins = np.histogram( + values, bins=self.bins, + range=self.kwds.get('range', None), + weights=self.kwds.get('weights', None)) + + if is_list_like(self.bottom): + self.bottom = np.array(self.bottom) + + @classmethod + def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, + stacking_id=None, **kwds): + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(bins) - 1) + y = y[~isnull(y)] + + base = np.zeros(len(bins) - 1) + bottom = bottom + \ + cls._get_stacked_values(ax, stacking_id, base, kwds['label']) + # ignore style + n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds) + cls._update_stacker(ax, stacking_id, n) + return patches + + def _make_plot(self): + colors = self._get_colors() + stacking_id = self._get_stacking_id() + + for i, (label, y) in enumerate(self._iter_data()): + ax = self._get_ax(i) + + kwds = self.kwds.copy() + + label = pprint_thing(label) + kwds['label'] = label + + style, kwds = self._apply_style_colors(colors, kwds, i, label) + if style is not None: + kwds['style'] = style + + kwds = self._make_plot_keywords(kwds, y) + artists = self._plot(ax, y, column_num=i, + stacking_id=stacking_id, **kwds) + self._add_legend_handle(artists[0], label, index=i) + + def _make_plot_keywords(self, kwds, y): + """merge BoxPlot/KdePlot properties to passed kwds""" + # y is required for KdePlot + kwds['bottom'] = self.bottom + kwds['bins'] = self.bins + return kwds + + def _post_plot_logic(self, ax, data): + if self.orientation == 'horizontal': + ax.set_xlabel('Frequency') + else: + ax.set_ylabel('Frequency') + + @property + def orientation(self): + if self.kwds.get('orientation', None) == 'horizontal': + return 'horizontal' + else: + return 'vertical' + + +class KdePlot(HistPlot): + _kind = 'kde' + orientation = 'vertical' + + def __init__(self, data, bw_method=None, ind=None, **kwargs): + MPLPlot.__init__(self, data, **kwargs) + self.bw_method = bw_method + self.ind = ind + + def _args_adjust(self): + pass + + def _get_ind(self, y): + if self.ind is None: + # np.nanmax() and np.nanmin() ignores the missing values + sample_range = np.nanmax(y) - np.nanmin(y) + ind = np.linspace(np.nanmin(y) - 0.5 * sample_range, + np.nanmax(y) + 0.5 * sample_range, 1000) + else: + ind = self.ind + return ind + + @classmethod + def _plot(cls, ax, y, style=None, bw_method=None, ind=None, + column_num=None, stacking_id=None, **kwds): + from scipy.stats import gaussian_kde + from scipy import __version__ as spv + + y = remove_na(y) + + if LooseVersion(spv) >= '0.11.0': + gkde = gaussian_kde(y, bw_method=bw_method) + else: + gkde = gaussian_kde(y) + if bw_method is not None: + msg = ('bw_method was added in Scipy 0.11.0.' + + ' Scipy version in use is %s.' % spv) + warnings.warn(msg) + + y = gkde.evaluate(ind) + lines = MPLPlot._plot(ax, ind, y, style=style, **kwds) + return lines + + def _make_plot_keywords(self, kwds, y): + kwds['bw_method'] = self.bw_method + kwds['ind'] = self._get_ind(y) + return kwds + + def _post_plot_logic(self, ax, data): + ax.set_ylabel('Density') + + +class PiePlot(MPLPlot): + _kind = 'pie' + _layout_type = 'horizontal' + + def __init__(self, data, kind=None, **kwargs): + data = data.fillna(value=0) + if (data < 0).any().any(): + raise ValueError("{0} doesn't allow negative values".format(kind)) + MPLPlot.__init__(self, data, kind=kind, **kwargs) + + def _args_adjust(self): + self.grid = False + self.logy = False + self.logx = False + self.loglog = False + + def _validate_color_args(self): + pass + + def _make_plot(self): + colors = self._get_colors( + num_colors=len(self.data), color_kwds='colors') + self.kwds.setdefault('colors', colors) + + for i, (label, y) in enumerate(self._iter_data()): + ax = self._get_ax(i) + if label is not None: + label = pprint_thing(label) + ax.set_ylabel(label) + + kwds = self.kwds.copy() + + def blank_labeler(label, value): + if value == 0: + return '' + else: + return label + + idx = [pprint_thing(v) for v in self.data.index] + labels = kwds.pop('labels', idx) + # labels is used for each wedge's labels + # Blank out labels for values of 0 so they don't overlap + # with nonzero wedges + if labels is not None: + blabels = [blank_labeler(l, value) for + l, value in zip(labels, y)] + else: + blabels = None + results = ax.pie(y, labels=blabels, **kwds) + + if kwds.get('autopct', None) is not None: + patches, texts, autotexts = results + else: + patches, texts = results + autotexts = [] + + if self.fontsize is not None: + for t in texts + autotexts: + t.set_fontsize(self.fontsize) + + # leglabels is used for legend labels + leglabels = labels if labels is not None else idx + for p, l in zip(patches, leglabels): + self._add_legend_handle(p, l) + + +class BoxPlot(LinePlot): + _kind = 'box' + _layout_type = 'horizontal' + + _valid_return_types = (None, 'axes', 'dict', 'both') + # namedtuple to hold results + BP = namedtuple("Boxplot", ['ax', 'lines']) + + def __init__(self, data, return_type='axes', **kwargs): + # Do not call LinePlot.__init__ which may fill nan + if return_type not in self._valid_return_types: + raise ValueError( + "return_type must be {None, 'axes', 'dict', 'both'}") + + self.return_type = return_type + MPLPlot.__init__(self, data, **kwargs) + + def _args_adjust(self): + if self.subplots: + # Disable label ax sharing. Otherwise, all subplots shows last + # column label + if self.orientation == 'vertical': + self.sharex = False + else: + self.sharey = False + + @classmethod + def _plot(cls, ax, y, column_num=None, return_type='axes', **kwds): + if y.ndim == 2: + y = [remove_na(v) for v in y] + # Boxplot fails with empty arrays, so need to add a NaN + # if any cols are empty + # GH 8181 + y = [v if v.size > 0 else np.array([np.nan]) for v in y] + else: + y = remove_na(y) + bp = ax.boxplot(y, **kwds) + + if return_type == 'dict': + return bp, bp + elif return_type == 'both': + return cls.BP(ax=ax, lines=bp), bp + else: + return ax, bp + + def _validate_color_args(self): + if 'color' in self.kwds: + if self.colormap is not None: + warnings.warn("'color' and 'colormap' cannot be used " + "simultaneously. Using 'color'") + self.color = self.kwds.pop('color') + + if isinstance(self.color, dict): + valid_keys = ['boxes', 'whiskers', 'medians', 'caps'] + for key, values in compat.iteritems(self.color): + if key not in valid_keys: + raise ValueError("color dict contains invalid " + "key '{0}' " + "The key must be either {1}" + .format(key, valid_keys)) + else: + self.color = None + + # get standard colors for default + colors = _get_standard_colors(num_colors=3, + colormap=self.colormap, + color=None) + # use 2 colors by default, for box/whisker and median + # flier colors isn't needed here + # because it can be specified by ``sym`` kw + self._boxes_c = colors[0] + self._whiskers_c = colors[0] + self._medians_c = colors[2] + self._caps_c = 'k' # mpl default + + def _get_colors(self, num_colors=None, color_kwds='color'): + pass + + def maybe_color_bp(self, bp): + if isinstance(self.color, dict): + boxes = self.color.get('boxes', self._boxes_c) + whiskers = self.color.get('whiskers', self._whiskers_c) + medians = self.color.get('medians', self._medians_c) + caps = self.color.get('caps', self._caps_c) + else: + # Other types are forwarded to matplotlib + # If None, use default colors + boxes = self.color or self._boxes_c + whiskers = self.color or self._whiskers_c + medians = self.color or self._medians_c + caps = self.color or self._caps_c + + from matplotlib.artist import setp + setp(bp['boxes'], color=boxes, alpha=1) + setp(bp['whiskers'], color=whiskers, alpha=1) + setp(bp['medians'], color=medians, alpha=1) + setp(bp['caps'], color=caps, alpha=1) + + def _make_plot(self): + if self.subplots: + self._return_obj = Series() + + for i, (label, y) in enumerate(self._iter_data()): + ax = self._get_ax(i) + kwds = self.kwds.copy() + + ret, bp = self._plot(ax, y, column_num=i, + return_type=self.return_type, **kwds) + self.maybe_color_bp(bp) + self._return_obj[label] = ret + + label = [pprint_thing(label)] + self._set_ticklabels(ax, label) + else: + y = self.data.values.T + ax = self._get_ax(0) + kwds = self.kwds.copy() + + ret, bp = self._plot(ax, y, column_num=0, + return_type=self.return_type, **kwds) + self.maybe_color_bp(bp) + self._return_obj = ret + + labels = [l for l, _ in self._iter_data()] + labels = [pprint_thing(l) for l in labels] + if not self.use_index: + labels = [pprint_thing(key) for key in range(len(labels))] + self._set_ticklabels(ax, labels) + + def _set_ticklabels(self, ax, labels): + if self.orientation == 'vertical': + ax.set_xticklabels(labels) + else: + ax.set_yticklabels(labels) + + def _make_legend(self): + pass + + def _post_plot_logic(self, ax, data): + pass + + @property + def orientation(self): + if self.kwds.get('vert', True): + return 'vertical' + else: + return 'horizontal' + + @property + def result(self): + if self.return_type is None: + return super(BoxPlot, self).result + else: + return self._return_obj + + +# kinds supported by both dataframe and series +_common_kinds = ['line', 'bar', 'barh', + 'kde', 'density', 'area', 'hist', 'box'] +# kinds supported by dataframe +_dataframe_kinds = ['scatter', 'hexbin'] +# kinds supported only by series or dataframe single column +_series_kinds = ['pie'] +_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds + +_klasses = [LinePlot, BarPlot, BarhPlot, KdePlot, HistPlot, BoxPlot, + ScatterPlot, HexBinPlot, AreaPlot, PiePlot] + +_plot_klass = {} +for klass in _klasses: + _plot_klass[klass._kind] = klass + + +def _plot(data, x=None, y=None, subplots=False, + ax=None, kind='line', **kwds): + kind = _get_standard_kind(kind.lower().strip()) + if kind in _all_kinds: + klass = _plot_klass[kind] + else: + raise ValueError("%r is not a valid plot kind" % kind) + + from pandas import DataFrame + if kind in _dataframe_kinds: + if isinstance(data, DataFrame): + plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax, + kind=kind, **kwds) + else: + raise ValueError("plot kind %r can only be used for data frames" + % kind) + + elif kind in _series_kinds: + if isinstance(data, DataFrame): + if y is None and subplots is False: + msg = "{0} requires either y column or 'subplots=True'" + raise ValueError(msg.format(kind)) + elif y is not None: + if is_integer(y) and not data.columns.holds_integer(): + y = data.columns[y] + # converted to series actually. copy to not modify + data = data[y].copy() + data.index.name = y + plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds) + else: + if isinstance(data, DataFrame): + if x is not None: + if is_integer(x) and not data.columns.holds_integer(): + x = data.columns[x] + data = data.set_index(x) + + if y is not None: + if is_integer(y) and not data.columns.holds_integer(): + y = data.columns[y] + label = kwds['label'] if 'label' in kwds else y + series = data[y].copy() # Don't modify + series.name = label + + for kw in ['xerr', 'yerr']: + if (kw in kwds) and \ + (isinstance(kwds[kw], string_types) or + is_integer(kwds[kw])): + try: + kwds[kw] = data[kwds[kw]] + except (IndexError, KeyError, TypeError): + pass + data = series + plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds) + + plot_obj.generate() + plot_obj.draw() + return plot_obj.result + + +df_kind = """- 'scatter' : scatter plot + - 'hexbin' : hexbin plot""" +series_kind = "" + +df_coord = """x : label or position, default None + y : label or position, default None + Allows plotting of one column versus another""" +series_coord = "" + +df_unique = """stacked : boolean, default False in line and + bar plots, and True in area plot. If True, create stacked plot. + sort_columns : boolean, default False + Sort column names to determine plot ordering + secondary_y : boolean or sequence, default False + Whether to plot on the secondary y-axis + If a list/tuple, which columns to plot on secondary y-axis""" +series_unique = """label : label argument to provide to plot + secondary_y : boolean or sequence of ints, default False + If True then y-axis will be on the right""" + +df_ax = """ax : matplotlib axes object, default None + subplots : boolean, default False + Make separate subplots for each column + sharex : boolean, default True if ax is None else False + In case subplots=True, share x axis and set some x axis labels to + invisible; defaults to True if ax is None otherwise False if an ax + is passed in; Be aware, that passing in both an ax and sharex=True + will alter all x axis labels for all axis in a figure! + sharey : boolean, default False + In case subplots=True, share y axis and set some y axis labels to + invisible + layout : tuple (optional) + (rows, columns) for the layout of subplots""" +series_ax = """ax : matplotlib axes object + If not passed, uses gca()""" + +df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe + column, the values of that column are used to color each point. + - If `kind` = 'hexbin', you can control the size of the bins with the + `gridsize` argument. By default, a histogram of the counts around each + `(x, y)` point is computed. You can specify alternative aggregations + by passing values to the `C` and `reduce_C_function` arguments. + `C` specifies the value at each `(x, y)` point and `reduce_C_function` + is a function of one argument that reduces all the values in a bin to + a single number (e.g. `mean`, `max`, `sum`, `std`).""" +series_note = "" + +_shared_doc_df_kwargs = dict(klass='DataFrame', klass_obj='df', + klass_kind=df_kind, klass_coord=df_coord, + klass_ax=df_ax, klass_unique=df_unique, + klass_note=df_note) +_shared_doc_series_kwargs = dict(klass='Series', klass_obj='s', + klass_kind=series_kind, + klass_coord=series_coord, klass_ax=series_ax, + klass_unique=series_unique, + klass_note=series_note) + +_shared_docs['plot'] = """ + Make plots of %(klass)s using matplotlib / pylab. + + *New in version 0.17.0:* Each plot kind has a corresponding method on the + ``%(klass)s.plot`` accessor: + ``%(klass_obj)s.plot(kind='line')`` is equivalent to + ``%(klass_obj)s.plot.line()``. + + Parameters + ---------- + data : %(klass)s + %(klass_coord)s + kind : str + - 'line' : line plot (default) + - 'bar' : vertical bar plot + - 'barh' : horizontal bar plot + - 'hist' : histogram + - 'box' : boxplot + - 'kde' : Kernel Density Estimation plot + - 'density' : same as 'kde' + - 'area' : area plot + - 'pie' : pie plot + %(klass_kind)s + %(klass_ax)s + figsize : a tuple (width, height) in inches + use_index : boolean, default True + Use index as ticks for x axis + title : string or list + Title to use for the plot. If a string is passed, print the string at + the top of the figure. If a list is passed and `subplots` is True, + print each item in the list above the corresponding subplot. + grid : boolean, default None (matlab style default) + Axis grid lines + legend : False/True/'reverse' + Place legend on axis subplots + style : list or dict + matplotlib line style per column + logx : boolean, default False + Use log scaling on x axis + logy : boolean, default False + Use log scaling on y axis + loglog : boolean, default False + Use log scaling on both x and y axes + xticks : sequence + Values to use for the xticks + yticks : sequence + Values to use for the yticks + xlim : 2-tuple/list + ylim : 2-tuple/list + rot : int, default None + Rotation for ticks (xticks for vertical, yticks for horizontal plots) + fontsize : int, default None + Font size for xticks and yticks + colormap : str or matplotlib colormap object, default None + Colormap to select colors from. If string, load colormap with that name + from matplotlib. + colorbar : boolean, optional + If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots) + position : float + Specify relative alignments for bar plot layout. + From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) + layout : tuple (optional) + (rows, columns) for the layout of the plot + table : boolean, Series or DataFrame, default False + If True, draw a table using the data in the DataFrame and the data will + be transposed to meet matplotlib's default layout. + If a Series or DataFrame is passed, use passed data to draw a table. + yerr : DataFrame, Series, array-like, dict and str + See :ref:`Plotting with Error Bars ` for + detail. + xerr : same types as yerr. + %(klass_unique)s + mark_right : boolean, default True + When using a secondary_y axis, automatically mark the column + labels with "(right)" in the legend + kwds : keywords + Options to pass to matplotlib plotting method + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + + Notes + ----- + + - See matplotlib documentation online for more on this subject + - If `kind` = 'bar' or 'barh', you can specify relative alignments + for bar plot layout by `position` keyword. + From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) + %(klass_note)s + + """ + + +@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs) +def plot_frame(data, x=None, y=None, kind='line', ax=None, + subplots=False, sharex=None, sharey=False, layout=None, + figsize=None, use_index=True, title=None, grid=None, + legend=True, style=None, logx=False, logy=False, loglog=False, + xticks=None, yticks=None, xlim=None, ylim=None, + rot=None, fontsize=None, colormap=None, table=False, + yerr=None, xerr=None, + secondary_y=False, sort_columns=False, + **kwds): + return _plot(data, kind=kind, x=x, y=y, ax=ax, + subplots=subplots, sharex=sharex, sharey=sharey, + layout=layout, figsize=figsize, use_index=use_index, + title=title, grid=grid, legend=legend, + style=style, logx=logx, logy=logy, loglog=loglog, + xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, + rot=rot, fontsize=fontsize, colormap=colormap, table=table, + yerr=yerr, xerr=xerr, + secondary_y=secondary_y, sort_columns=sort_columns, + **kwds) + + +@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs) +def plot_series(data, kind='line', ax=None, # Series unique + figsize=None, use_index=True, title=None, grid=None, + legend=False, style=None, logx=False, logy=False, loglog=False, + xticks=None, yticks=None, xlim=None, ylim=None, + rot=None, fontsize=None, colormap=None, table=False, + yerr=None, xerr=None, + label=None, secondary_y=False, # Series unique + **kwds): + + import matplotlib.pyplot as plt + """ + If no axes is specified, check whether there are existing figures + If there is no existing figures, _gca() will + create a figure with the default figsize, causing the figsize=parameter to + be ignored. + """ + if ax is None and len(plt.get_fignums()) > 0: + ax = _gca() + ax = MPLPlot._get_ax_layer(ax) + return _plot(data, kind=kind, ax=ax, + figsize=figsize, use_index=use_index, title=title, + grid=grid, legend=legend, + style=style, logx=logx, logy=logy, loglog=loglog, + xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, + rot=rot, fontsize=fontsize, colormap=colormap, table=table, + yerr=yerr, xerr=xerr, + label=label, secondary_y=secondary_y, + **kwds) + + +_shared_docs['boxplot'] = """ + Make a box plot from DataFrame column optionally grouped by some columns or + other inputs + + Parameters + ---------- + data : the pandas object holding the data + column : column name or list of names, or vector + Can be any valid input to groupby + by : string or sequence + Column in the DataFrame to group by + ax : Matplotlib axes object, optional + fontsize : int or string + rot : label rotation angle + figsize : A tuple (width, height) in inches + grid : Setting this to True will show the grid + layout : tuple (optional) + (rows, columns) for the layout of the plot + return_type : {None, 'axes', 'dict', 'both'}, default None + The kind of object to return. The default is ``axes`` + 'axes' returns the matplotlib axes the boxplot is drawn on; + 'dict' returns a dictionary whose values are the matplotlib + Lines of the boxplot; + 'both' returns a namedtuple with the axes and dict. + + When grouping with ``by``, a Series mapping columns to ``return_type`` + is returned, unless ``return_type`` is None, in which case a NumPy + array of axes is returned with the same shape as ``layout``. + See the prose documentation for more. + + kwds : other plotting keyword arguments to be passed to matplotlib boxplot + function + + Returns + ------- + lines : dict + ax : matplotlib Axes + (ax, lines): namedtuple + + Notes + ----- + Use ``return_type='dict'`` when you want to tweak the appearance + of the lines after plotting. In this case a dict containing the Lines + making up the boxes, caps, fliers, medians, and whiskers is returned. + """ + + +@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs) +def boxplot(data, column=None, by=None, ax=None, fontsize=None, + rot=0, grid=True, figsize=None, layout=None, return_type=None, + **kwds): + + # validate return_type: + if return_type not in BoxPlot._valid_return_types: + raise ValueError("return_type must be {'axes', 'dict', 'both'}") + + from pandas import Series, DataFrame + if isinstance(data, Series): + data = DataFrame({'x': data}) + column = 'x' + + def _get_colors(): + return _get_standard_colors(color=kwds.get('color'), num_colors=1) + + def maybe_color_bp(bp): + if 'color' not in kwds: + from matplotlib.artist import setp + setp(bp['boxes'], color=colors[0], alpha=1) + setp(bp['whiskers'], color=colors[0], alpha=1) + setp(bp['medians'], color=colors[2], alpha=1) + + def plot_group(keys, values, ax): + keys = [pprint_thing(x) for x in keys] + values = [remove_na(v) for v in values] + bp = ax.boxplot(values, **kwds) + if fontsize is not None: + ax.tick_params(axis='both', labelsize=fontsize) + if kwds.get('vert', 1): + ax.set_xticklabels(keys, rotation=rot) + else: + ax.set_yticklabels(keys, rotation=rot) + maybe_color_bp(bp) + + # Return axes in multiplot case, maybe revisit later # 985 + if return_type == 'dict': + return bp + elif return_type == 'both': + return BoxPlot.BP(ax=ax, lines=bp) + else: + return ax + + colors = _get_colors() + if column is None: + columns = None + else: + if isinstance(column, (list, tuple)): + columns = column + else: + columns = [column] + + if by is not None: + # Prefer array return type for 2-D plots to match the subplot layout + # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580 + result = _grouped_plot_by_column(plot_group, data, columns=columns, + by=by, grid=grid, figsize=figsize, + ax=ax, layout=layout, + return_type=return_type) + else: + if return_type is None: + return_type = 'axes' + if layout is not None: + raise ValueError("The 'layout' keyword is not supported when " + "'by' is None") + + if ax is None: + ax = _gca() + data = data._get_numeric_data() + if columns is None: + columns = data.columns + else: + data = data[columns] + + result = plot_group(columns, data.values.T, ax) + ax.grid(grid) + + return result + + +def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, + **kwargs): + """ + Make a scatter plot from two DataFrame columns + + Parameters + ---------- + data : DataFrame + x : Column name for the x-axis values + y : Column name for the y-axis values + ax : Matplotlib axis object + figsize : A tuple (width, height) in inches + grid : Setting this to True will show the grid + kwargs : other plotting keyword arguments + To be passed to scatter function + + Returns + ------- + fig : matplotlib.Figure + """ + import matplotlib.pyplot as plt + + kwargs.setdefault('edgecolors', 'none') + + def plot_group(group, ax): + xvals = group[x].values + yvals = group[y].values + ax.scatter(xvals, yvals, **kwargs) + ax.grid(grid) + + if by is not None: + fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax) + else: + if ax is None: + fig = plt.figure() + ax = fig.add_subplot(111) + else: + fig = ax.get_figure() + plot_group(data, ax) + ax.set_ylabel(pprint_thing(y)) + ax.set_xlabel(pprint_thing(x)) + + ax.grid(grid) + + return fig + + +def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, + xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False, + sharey=False, figsize=None, layout=None, bins=10, **kwds): + """ + Draw histogram of the DataFrame's series using matplotlib / pylab. + + Parameters + ---------- + data : DataFrame + column : string or sequence + If passed, will be used to limit data to a subset of columns + by : object, optional + If passed, then used to form histograms for separate groups + grid : boolean, default True + Whether to show axis grid lines + xlabelsize : int, default None + If specified changes the x-axis label size + xrot : float, default None + rotation of x axis labels + ylabelsize : int, default None + If specified changes the y-axis label size + yrot : float, default None + rotation of y axis labels + ax : matplotlib axes object, default None + sharex : boolean, default True if ax is None else False + In case subplots=True, share x axis and set some x axis labels to + invisible; defaults to True if ax is None otherwise False if an ax + is passed in; Be aware, that passing in both an ax and sharex=True + will alter all x axis labels for all subplots in a figure! + sharey : boolean, default False + In case subplots=True, share y axis and set some y axis labels to + invisible + figsize : tuple + The size of the figure to create in inches by default + layout : tuple, optional + Tuple of (rows, columns) for the layout of the histograms + bins : integer, default 10 + Number of histogram bins to be used + kwds : other plotting keyword arguments + To be passed to hist function + """ + + if by is not None: + axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, + figsize=figsize, sharex=sharex, sharey=sharey, + layout=layout, bins=bins, xlabelsize=xlabelsize, + xrot=xrot, ylabelsize=ylabelsize, + yrot=yrot, **kwds) + return axes + + if column is not None: + if not isinstance(column, (list, np.ndarray, Index)): + column = [column] + data = data[column] + data = data._get_numeric_data() + naxes = len(data.columns) + + fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False, + sharex=sharex, sharey=sharey, figsize=figsize, + layout=layout) + _axes = _flatten(axes) + + for i, col in enumerate(_try_sort(data.columns)): + ax = _axes[i] + ax.hist(data[col].dropna().values, bins=bins, **kwds) + ax.set_title(col) + ax.grid(grid) + + _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, + ylabelsize=ylabelsize, yrot=yrot) + fig.subplots_adjust(wspace=0.3, hspace=0.3) + + return axes + + +def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, + xrot=None, ylabelsize=None, yrot=None, figsize=None, + bins=10, **kwds): + """ + Draw histogram of the input series using matplotlib + + Parameters + ---------- + by : object, optional + If passed, then used to form histograms for separate groups + ax : matplotlib axis object + If not passed, uses gca() + grid : boolean, default True + Whether to show axis grid lines + xlabelsize : int, default None + If specified changes the x-axis label size + xrot : float, default None + rotation of x axis labels + ylabelsize : int, default None + If specified changes the y-axis label size + yrot : float, default None + rotation of y axis labels + figsize : tuple, default None + figure size in inches by default + bins: integer, default 10 + Number of histogram bins to be used + kwds : keywords + To be passed to the actual plotting function + + Notes + ----- + See matplotlib documentation online for more on this + + """ + import matplotlib.pyplot as plt + + if by is None: + if kwds.get('layout', None) is not None: + raise ValueError("The 'layout' keyword is not supported when " + "'by' is None") + # hack until the plotting interface is a bit more unified + fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else + plt.figure(figsize=figsize)) + if (figsize is not None and tuple(figsize) != + tuple(fig.get_size_inches())): + fig.set_size_inches(*figsize, forward=True) + if ax is None: + ax = fig.gca() + elif ax.get_figure() != fig: + raise AssertionError('passed axis not bound to passed figure') + values = self.dropna().values + + ax.hist(values, bins=bins, **kwds) + ax.grid(grid) + axes = np.array([ax]) + + _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, + ylabelsize=ylabelsize, yrot=yrot) + + else: + if 'figure' in kwds: + raise ValueError("Cannot pass 'figure' when using the " + "'by' argument, since a new 'Figure' instance " + "will be created") + axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize, + bins=bins, xlabelsize=xlabelsize, xrot=xrot, + ylabelsize=ylabelsize, yrot=yrot, **kwds) + + if hasattr(axes, 'ndim'): + if axes.ndim == 1 and len(axes) == 1: + return axes[0] + return axes + + +def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, + layout=None, sharex=False, sharey=False, rot=90, grid=True, + xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, + **kwargs): + """ + Grouped histogram + + Parameters + ---------- + data: Series/DataFrame + column: object, optional + by: object, optional + ax: axes, optional + bins: int, default 50 + figsize: tuple, optional + layout: optional + sharex: boolean, default False + sharey: boolean, default False + rot: int, default 90 + grid: bool, default True + kwargs: dict, keyword arguments passed to matplotlib.Axes.hist + + Returns + ------- + axes: collection of Matplotlib Axes + """ + def plot_group(group, ax): + ax.hist(group.dropna().values, bins=bins, **kwargs) + + xrot = xrot or rot + + fig, axes = _grouped_plot(plot_group, data, column=column, + by=by, sharex=sharex, sharey=sharey, ax=ax, + figsize=figsize, layout=layout, rot=rot) + + _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, + ylabelsize=ylabelsize, yrot=yrot) + + fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, + hspace=0.5, wspace=0.3) + return axes + + +def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, + rot=0, grid=True, ax=None, figsize=None, + layout=None, **kwds): + """ + Make box plots from DataFrameGroupBy data. + + Parameters + ---------- + grouped : Grouped DataFrame + subplots : + * ``False`` - no subplots will be used + * ``True`` - create a subplot for each group + column : column name or list of names, or vector + Can be any valid input to groupby + fontsize : int or string + rot : label rotation angle + grid : Setting this to True will show the grid + ax : Matplotlib axis object, default None + figsize : A tuple (width, height) in inches + layout : tuple (optional) + (rows, columns) for the layout of the plot + kwds : other plotting keyword arguments to be passed to matplotlib boxplot + function + + Returns + ------- + dict of key/value = group key/DataFrame.boxplot return value + or DataFrame.boxplot return value in case subplots=figures=False + + Examples + -------- + >>> import pandas + >>> import numpy as np + >>> import itertools + >>> + >>> tuples = [t for t in itertools.product(range(1000), range(4))] + >>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1']) + >>> data = np.random.randn(len(index),4) + >>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index) + >>> + >>> grouped = df.groupby(level='lvl1') + >>> boxplot_frame_groupby(grouped) + >>> + >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1) + >>> boxplot_frame_groupby(grouped, subplots=False) + """ + if subplots is True: + naxes = len(grouped) + fig, axes = _subplots(naxes=naxes, squeeze=False, + ax=ax, sharex=False, sharey=True, + figsize=figsize, layout=layout) + axes = _flatten(axes) + + ret = Series() + for (key, group), ax in zip(grouped, axes): + d = group.boxplot(ax=ax, column=column, fontsize=fontsize, + rot=rot, grid=grid, **kwds) + ax.set_title(pprint_thing(key)) + ret.loc[key] = d + fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, + right=0.9, wspace=0.2) + else: + from pandas.tools.concat import concat + keys, frames = zip(*grouped) + if grouped.axis == 0: + df = concat(frames, keys=keys, axis=1) + else: + if len(frames) > 1: + df = frames[0].join(frames[1::]) + else: + df = frames[0] + ret = df.boxplot(column=column, fontsize=fontsize, rot=rot, + grid=grid, ax=ax, figsize=figsize, + layout=layout, **kwds) + return ret + + +def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, + figsize=None, sharex=True, sharey=True, layout=None, + rot=0, ax=None, **kwargs): + from pandas import DataFrame + + if figsize == 'default': + # allowed to specify mpl default with 'default' + warnings.warn("figsize='default' is deprecated. Specify figure" + "size by tuple instead", FutureWarning, stacklevel=4) + figsize = None + + grouped = data.groupby(by) + if column is not None: + grouped = grouped[column] + + naxes = len(grouped) + fig, axes = _subplots(naxes=naxes, figsize=figsize, + sharex=sharex, sharey=sharey, ax=ax, + layout=layout) + + _axes = _flatten(axes) + + for i, (key, group) in enumerate(grouped): + ax = _axes[i] + if numeric_only and isinstance(group, DataFrame): + group = group._get_numeric_data() + plotf(group, ax, **kwargs) + ax.set_title(pprint_thing(key)) + + return fig, axes + + +def _grouped_plot_by_column(plotf, data, columns=None, by=None, + numeric_only=True, grid=False, + figsize=None, ax=None, layout=None, + return_type=None, **kwargs): + grouped = data.groupby(by) + if columns is None: + if not isinstance(by, (list, tuple)): + by = [by] + columns = data._get_numeric_data().columns.difference(by) + naxes = len(columns) + fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True, + figsize=figsize, ax=ax, layout=layout) + + _axes = _flatten(axes) + + result = Series() + ax_values = [] + + for i, col in enumerate(columns): + ax = _axes[i] + gp_col = grouped[col] + keys, values = zip(*gp_col) + re_plotf = plotf(keys, values, ax, **kwargs) + ax.set_title(col) + ax.set_xlabel(pprint_thing(by)) + ax_values.append(re_plotf) + ax.grid(grid) + + result = Series(ax_values, index=columns) + + # Return axes in multiplot case, maybe revisit later # 985 + if return_type is None: + result = axes + + byline = by[0] if len(by) == 1 else by + fig.suptitle('Boxplot grouped by %s' % byline) + fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) + + return result + + +class BasePlotMethods(PandasObject): + + def __init__(self, data): + self._data = data + + def __call__(self, *args, **kwargs): + raise NotImplementedError + + +class SeriesPlotMethods(BasePlotMethods): + """Series plotting accessor and method + + Examples + -------- + >>> s.plot.line() + >>> s.plot.bar() + >>> s.plot.hist() + + Plotting methods can also be accessed by calling the accessor as a method + with the ``kind`` argument: + ``s.plot(kind='line')`` is equivalent to ``s.plot.line()`` + """ + + def __call__(self, kind='line', ax=None, + figsize=None, use_index=True, title=None, grid=None, + legend=False, style=None, logx=False, logy=False, + loglog=False, xticks=None, yticks=None, + xlim=None, ylim=None, + rot=None, fontsize=None, colormap=None, table=False, + yerr=None, xerr=None, + label=None, secondary_y=False, **kwds): + return plot_series(self._data, kind=kind, ax=ax, figsize=figsize, + use_index=use_index, title=title, grid=grid, + legend=legend, style=style, logx=logx, logy=logy, + loglog=loglog, xticks=xticks, yticks=yticks, + xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, + colormap=colormap, table=table, yerr=yerr, + xerr=xerr, label=label, secondary_y=secondary_y, + **kwds) + __call__.__doc__ = plot_series.__doc__ + + def line(self, **kwds): + """ + Line plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='line', **kwds) + + def bar(self, **kwds): + """ + Vertical bar plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='bar', **kwds) + + def barh(self, **kwds): + """ + Horizontal bar plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='barh', **kwds) + + def box(self, **kwds): + """ + Boxplot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='box', **kwds) + + def hist(self, bins=10, **kwds): + """ + Histogram + + .. versionadded:: 0.17.0 + + Parameters + ---------- + bins: integer, default 10 + Number of histogram bins to be used + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='hist', bins=bins, **kwds) + + def kde(self, **kwds): + """ + Kernel Density Estimate plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='kde', **kwds) + + density = kde + + def area(self, **kwds): + """ + Area plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='area', **kwds) + + def pie(self, **kwds): + """ + Pie chart + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='pie', **kwds) + + +class FramePlotMethods(BasePlotMethods): + """DataFrame plotting accessor and method + + Examples + -------- + >>> df.plot.line() + >>> df.plot.scatter('x', 'y') + >>> df.plot.hexbin() + + These plotting methods can also be accessed by calling the accessor as a + method with the ``kind`` argument: + ``df.plot(kind='line')`` is equivalent to ``df.plot.line()`` + """ + + def __call__(self, x=None, y=None, kind='line', ax=None, + subplots=False, sharex=None, sharey=False, layout=None, + figsize=None, use_index=True, title=None, grid=None, + legend=True, style=None, logx=False, logy=False, loglog=False, + xticks=None, yticks=None, xlim=None, ylim=None, + rot=None, fontsize=None, colormap=None, table=False, + yerr=None, xerr=None, + secondary_y=False, sort_columns=False, **kwds): + return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax, + subplots=subplots, sharex=sharex, sharey=sharey, + layout=layout, figsize=figsize, use_index=use_index, + title=title, grid=grid, legend=legend, style=style, + logx=logx, logy=logy, loglog=loglog, xticks=xticks, + yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, + fontsize=fontsize, colormap=colormap, table=table, + yerr=yerr, xerr=xerr, secondary_y=secondary_y, + sort_columns=sort_columns, **kwds) + __call__.__doc__ = plot_frame.__doc__ + + def line(self, x=None, y=None, **kwds): + """ + Line plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='line', x=x, y=y, **kwds) + + def bar(self, x=None, y=None, **kwds): + """ + Vertical bar plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='bar', x=x, y=y, **kwds) + + def barh(self, x=None, y=None, **kwds): + """ + Horizontal bar plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='barh', x=x, y=y, **kwds) + + def box(self, by=None, **kwds): + """ + Boxplot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + by : string or sequence + Column in the DataFrame to group by. + \*\*kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='box', by=by, **kwds) + + def hist(self, by=None, bins=10, **kwds): + """ + Histogram + + .. versionadded:: 0.17.0 + + Parameters + ---------- + by : string or sequence + Column in the DataFrame to group by. + bins: integer, default 10 + Number of histogram bins to be used + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='hist', by=by, bins=bins, **kwds) + + def kde(self, **kwds): + """ + Kernel Density Estimate plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='kde', **kwds) + + density = kde + + def area(self, x=None, y=None, **kwds): + """ + Area plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='area', x=x, y=y, **kwds) + + def pie(self, y=None, **kwds): + """ + Pie chart + + .. versionadded:: 0.17.0 + + Parameters + ---------- + y : label or position, optional + Column to plot. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='pie', y=y, **kwds) + + def scatter(self, x, y, s=None, c=None, **kwds): + """ + Scatter plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + s : scalar or array_like, optional + Size of each point. + c : label or position, optional + Color of each point. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds) + + def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, + **kwds): + """ + Hexbin plot + + .. versionadded:: 0.17.0 + + Parameters + ---------- + x, y : label or position, optional + Coordinates for each point. + C : label or position, optional + The value at each `(x, y)` point. + reduce_C_function : callable, optional + Function of one argument that reduces all the values in a bin to + a single number (e.g. `mean`, `max`, `sum`, `std`). + gridsize : int, optional + Number of bins. + **kwds : optional + Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + + Returns + ------- + axes : matplotlib.AxesSubplot or np.array of them + """ + if reduce_C_function is not None: + kwds['reduce_C_function'] = reduce_C_function + if gridsize is not None: + kwds['gridsize'] = gridsize + return self(kind='hexbin', x=x, y=y, C=C, **kwds) diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py new file mode 100644 index 0000000000000..2c32a532dd2e2 --- /dev/null +++ b/pandas/plotting/_misc.py @@ -0,0 +1,573 @@ +# being a bit too dynamic +# pylint: disable=E1101 +from __future__ import division + +import numpy as np + +from pandas.util.decorators import deprecate_kwarg +from pandas.types.missing import notnull +from pandas.compat import range, lrange, lmap, zip +from pandas.formats.printing import pprint_thing + + +from pandas.plotting._style import _get_standard_colors +from pandas.plotting._tools import _subplots, _set_ticks_props + + +def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, + diagonal='hist', marker='.', density_kwds=None, + hist_kwds=None, range_padding=0.05, **kwds): + """ + Draw a matrix of scatter plots. + + Parameters + ---------- + frame : DataFrame + alpha : float, optional + amount of transparency applied + figsize : (float,float), optional + a tuple (width, height) in inches + ax : Matplotlib axis object, optional + grid : bool, optional + setting this to True will show the grid + diagonal : {'hist', 'kde'} + pick between 'kde' and 'hist' for + either Kernel Density Estimation or Histogram + plot in the diagonal + marker : str, optional + Matplotlib marker type, default '.' + hist_kwds : other plotting keyword arguments + To be passed to hist function + density_kwds : other plotting keyword arguments + To be passed to kernel density estimate plot + range_padding : float, optional + relative extension of axis range in x and y + with respect to (x_max - x_min) or (y_max - y_min), + default 0.05 + kwds : other plotting keyword arguments + To be passed to scatter function + + Examples + -------- + >>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D']) + >>> scatter_matrix(df, alpha=0.2) + """ + + df = frame._get_numeric_data() + n = df.columns.size + naxes = n * n + fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax, + squeeze=False) + + # no gaps between subplots + fig.subplots_adjust(wspace=0, hspace=0) + + mask = notnull(df) + + marker = _get_marker_compat(marker) + + hist_kwds = hist_kwds or {} + density_kwds = density_kwds or {} + + # GH 14855 + kwds.setdefault('edgecolors', 'none') + + boundaries_list = [] + for a in df.columns: + values = df[a].values[mask[a].values] + rmin_, rmax_ = np.min(values), np.max(values) + rdelta_ext = (rmax_ - rmin_) * range_padding / 2. + boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext)) + + for i, a in zip(lrange(n), df.columns): + for j, b in zip(lrange(n), df.columns): + ax = axes[i, j] + + if i == j: + values = df[a].values[mask[a].values] + + # Deal with the diagonal by drawing a histogram there. + if diagonal == 'hist': + ax.hist(values, **hist_kwds) + + elif diagonal in ('kde', 'density'): + from scipy.stats import gaussian_kde + y = values + gkde = gaussian_kde(y) + ind = np.linspace(y.min(), y.max(), 1000) + ax.plot(ind, gkde.evaluate(ind), **density_kwds) + + ax.set_xlim(boundaries_list[i]) + + else: + common = (mask[a] & mask[b]).values + + ax.scatter(df[b][common], df[a][common], + marker=marker, alpha=alpha, **kwds) + + ax.set_xlim(boundaries_list[j]) + ax.set_ylim(boundaries_list[i]) + + ax.set_xlabel(b) + ax.set_ylabel(a) + + if j != 0: + ax.yaxis.set_visible(False) + if i != n - 1: + ax.xaxis.set_visible(False) + + if len(df.columns) > 1: + lim1 = boundaries_list[0] + locs = axes[0][1].yaxis.get_majorticklocs() + locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])] + adj = (locs - lim1[0]) / (lim1[1] - lim1[0]) + + lim0 = axes[0][0].get_ylim() + adj = adj * (lim0[1] - lim0[0]) + lim0[0] + axes[0][0].yaxis.set_ticks(adj) + + if np.all(locs == locs.astype(int)): + # if all ticks are int + locs = locs.astype(int) + axes[0][0].yaxis.set_ticklabels(locs) + + _set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + + return axes + + +def _get_marker_compat(marker): + import matplotlib.lines as mlines + import matplotlib as mpl + if mpl.__version__ < '1.1.0' and marker == '.': + return 'o' + if marker not in mlines.lineMarkers: + return 'o' + return marker + + +def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds): + """RadViz - a multivariate data visualization algorithm + + Parameters: + ----------- + frame: DataFrame + class_column: str + Column name containing class names + ax: Matplotlib axis object, optional + color: list or tuple, optional + Colors to use for the different classes + colormap : str or matplotlib colormap object, default None + Colormap to select colors from. If string, load colormap with that name + from matplotlib. + kwds: keywords + Options to pass to matplotlib scatter plotting method + + Returns: + -------- + ax: Matplotlib axis object + """ + import matplotlib.pyplot as plt + import matplotlib.patches as patches + + def normalize(series): + a = min(series) + b = max(series) + return (series - a) / (b - a) + + n = len(frame) + classes = frame[class_column].drop_duplicates() + class_col = frame[class_column] + df = frame.drop(class_column, axis=1).apply(normalize) + + if ax is None: + ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1]) + + to_plot = {} + colors = _get_standard_colors(num_colors=len(classes), colormap=colormap, + color_type='random', color=color) + + for kls in classes: + to_plot[kls] = [[], []] + + m = len(frame.columns) - 1 + s = np.array([(np.cos(t), np.sin(t)) + for t in [2.0 * np.pi * (i / float(m)) + for i in range(m)]]) + + for i in range(n): + row = df.iloc[i].values + row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1) + y = (s * row_).sum(axis=0) / row.sum() + kls = class_col.iat[i] + to_plot[kls][0].append(y[0]) + to_plot[kls][1].append(y[1]) + + for i, kls in enumerate(classes): + ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i], + label=pprint_thing(kls), **kwds) + ax.legend() + + ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none')) + + for xy, name in zip(s, df.columns): + + ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray')) + + if xy[0] < 0.0 and xy[1] < 0.0: + ax.text(xy[0] - 0.025, xy[1] - 0.025, name, + ha='right', va='top', size='small') + elif xy[0] < 0.0 and xy[1] >= 0.0: + ax.text(xy[0] - 0.025, xy[1] + 0.025, name, + ha='right', va='bottom', size='small') + elif xy[0] >= 0.0 and xy[1] < 0.0: + ax.text(xy[0] + 0.025, xy[1] - 0.025, name, + ha='left', va='top', size='small') + elif xy[0] >= 0.0 and xy[1] >= 0.0: + ax.text(xy[0] + 0.025, xy[1] + 0.025, name, + ha='left', va='bottom', size='small') + + ax.axis('equal') + return ax + + +@deprecate_kwarg(old_arg_name='data', new_arg_name='frame') +def andrews_curves(frame, class_column, ax=None, samples=200, color=None, + colormap=None, **kwds): + """ + Generates a matplotlib plot of Andrews curves, for visualising clusters of + multivariate data. + + Andrews curves have the functional form: + + f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + + x_4 sin(2t) + x_5 cos(2t) + ... + + Where x coefficients correspond to the values of each dimension and t is + linearly spaced between -pi and +pi. Each row of frame then corresponds to + a single curve. + + Parameters: + ----------- + frame : DataFrame + Data to be plotted, preferably normalized to (0.0, 1.0) + class_column : Name of the column containing class names + ax : matplotlib axes object, default None + samples : Number of points to plot in each curve + color: list or tuple, optional + Colors to use for the different classes + colormap : str or matplotlib colormap object, default None + Colormap to select colors from. If string, load colormap with that name + from matplotlib. + kwds: keywords + Options to pass to matplotlib plotting method + + Returns: + -------- + ax: Matplotlib axis object + + """ + from math import sqrt, pi + import matplotlib.pyplot as plt + + def function(amplitudes): + def f(t): + x1 = amplitudes[0] + result = x1 / sqrt(2.0) + + # Take the rest of the coefficients and resize them + # appropriately. Take a copy of amplitudes as otherwise numpy + # deletes the element from amplitudes itself. + coeffs = np.delete(np.copy(amplitudes), 0) + coeffs.resize(int((coeffs.size + 1) / 2), 2) + + # Generate the harmonics and arguments for the sin and cos + # functions. + harmonics = np.arange(0, coeffs.shape[0]) + 1 + trig_args = np.outer(harmonics, t) + + result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) + + coeffs[:, 1, np.newaxis] * np.cos(trig_args), + axis=0) + return result + return f + + n = len(frame) + class_col = frame[class_column] + classes = frame[class_column].drop_duplicates() + df = frame.drop(class_column, axis=1) + t = np.linspace(-pi, pi, samples) + used_legends = set([]) + + color_values = _get_standard_colors(num_colors=len(classes), + colormap=colormap, color_type='random', + color=color) + colors = dict(zip(classes, color_values)) + if ax is None: + ax = plt.gca(xlim=(-pi, pi)) + for i in range(n): + row = df.iloc[i].values + f = function(row) + y = f(t) + kls = class_col.iat[i] + label = pprint_thing(kls) + if label not in used_legends: + used_legends.add(label) + ax.plot(t, y, color=colors[kls], label=label, **kwds) + else: + ax.plot(t, y, color=colors[kls], **kwds) + + ax.legend(loc='upper right') + ax.grid() + return ax + + +def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): + """Bootstrap plot. + + Parameters: + ----------- + series: Time series + fig: matplotlib figure object, optional + size: number of data points to consider during each sampling + samples: number of times the bootstrap procedure is performed + kwds: optional keyword arguments for plotting commands, must be accepted + by both hist and plot + + Returns: + -------- + fig: matplotlib figure + """ + import random + import matplotlib.pyplot as plt + + # random.sample(ndarray, int) fails on python 3.3, sigh + data = list(series.values) + samplings = [random.sample(data, size) for _ in range(samples)] + + means = np.array([np.mean(sampling) for sampling in samplings]) + medians = np.array([np.median(sampling) for sampling in samplings]) + midranges = np.array([(min(sampling) + max(sampling)) * 0.5 + for sampling in samplings]) + if fig is None: + fig = plt.figure() + x = lrange(samples) + axes = [] + ax1 = fig.add_subplot(2, 3, 1) + ax1.set_xlabel("Sample") + axes.append(ax1) + ax1.plot(x, means, **kwds) + ax2 = fig.add_subplot(2, 3, 2) + ax2.set_xlabel("Sample") + axes.append(ax2) + ax2.plot(x, medians, **kwds) + ax3 = fig.add_subplot(2, 3, 3) + ax3.set_xlabel("Sample") + axes.append(ax3) + ax3.plot(x, midranges, **kwds) + ax4 = fig.add_subplot(2, 3, 4) + ax4.set_xlabel("Mean") + axes.append(ax4) + ax4.hist(means, **kwds) + ax5 = fig.add_subplot(2, 3, 5) + ax5.set_xlabel("Median") + axes.append(ax5) + ax5.hist(medians, **kwds) + ax6 = fig.add_subplot(2, 3, 6) + ax6.set_xlabel("Midrange") + axes.append(ax6) + ax6.hist(midranges, **kwds) + for axis in axes: + plt.setp(axis.get_xticklabels(), fontsize=8) + plt.setp(axis.get_yticklabels(), fontsize=8) + return fig + + +@deprecate_kwarg(old_arg_name='colors', new_arg_name='color') +@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3) +def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None, + use_columns=False, xticks=None, colormap=None, + axvlines=True, axvlines_kwds=None, sort_labels=False, + **kwds): + """Parallel coordinates plotting. + + Parameters + ---------- + frame: DataFrame + class_column: str + Column name containing class names + cols: list, optional + A list of column names to use + ax: matplotlib.axis, optional + matplotlib axis object + color: list or tuple, optional + Colors to use for the different classes + use_columns: bool, optional + If true, columns will be used as xticks + xticks: list or tuple, optional + A list of values to use for xticks + colormap: str or matplotlib colormap, default None + Colormap to use for line colors. + axvlines: bool, optional + If true, vertical lines will be added at each xtick + axvlines_kwds: keywords, optional + Options to be passed to axvline method for vertical lines + sort_labels: bool, False + Sort class_column labels, useful when assigning colours + + .. versionadded:: 0.20.0 + + kwds: keywords + Options to pass to matplotlib plotting method + + Returns + ------- + ax: matplotlib axis object + + Examples + -------- + >>> from pandas import read_csv + >>> from pandas.tools.plotting import parallel_coordinates + >>> from matplotlib import pyplot as plt + >>> df = read_csv('https://raw.github.com/pandas-dev/pandas/master' + '/pandas/tests/data/iris.csv') + >>> parallel_coordinates(df, 'Name', color=('#556270', + '#4ECDC4', '#C7F464')) + >>> plt.show() + """ + if axvlines_kwds is None: + axvlines_kwds = {'linewidth': 1, 'color': 'black'} + import matplotlib.pyplot as plt + + n = len(frame) + classes = frame[class_column].drop_duplicates() + class_col = frame[class_column] + + if cols is None: + df = frame.drop(class_column, axis=1) + else: + df = frame[cols] + + used_legends = set([]) + + ncols = len(df.columns) + + # determine values to use for xticks + if use_columns is True: + if not np.all(np.isreal(list(df.columns))): + raise ValueError('Columns must be numeric to be used as xticks') + x = df.columns + elif xticks is not None: + if not np.all(np.isreal(xticks)): + raise ValueError('xticks specified must be numeric') + elif len(xticks) != ncols: + raise ValueError('Length of xticks must match number of columns') + x = xticks + else: + x = lrange(ncols) + + if ax is None: + ax = plt.gca() + + color_values = _get_standard_colors(num_colors=len(classes), + colormap=colormap, color_type='random', + color=color) + + if sort_labels: + classes = sorted(classes) + color_values = sorted(color_values) + colors = dict(zip(classes, color_values)) + + for i in range(n): + y = df.iloc[i].values + kls = class_col.iat[i] + label = pprint_thing(kls) + if label not in used_legends: + used_legends.add(label) + ax.plot(x, y, color=colors[kls], label=label, **kwds) + else: + ax.plot(x, y, color=colors[kls], **kwds) + + if axvlines: + for i in x: + ax.axvline(i, **axvlines_kwds) + + ax.set_xticks(x) + ax.set_xticklabels(df.columns) + ax.set_xlim(x[0], x[-1]) + ax.legend(loc='upper right') + ax.grid() + return ax + + +def lag_plot(series, lag=1, ax=None, **kwds): + """Lag plot for time series. + + Parameters: + ----------- + series: Time series + lag: lag of the scatter plot, default 1 + ax: Matplotlib axis object, optional + kwds: Matplotlib scatter method keyword arguments, optional + + Returns: + -------- + ax: Matplotlib axis object + """ + import matplotlib.pyplot as plt + + # workaround because `c='b'` is hardcoded in matplotlibs scatter method + kwds.setdefault('c', plt.rcParams['patch.facecolor']) + + data = series.values + y1 = data[:-lag] + y2 = data[lag:] + if ax is None: + ax = plt.gca() + ax.set_xlabel("y(t)") + ax.set_ylabel("y(t + %s)" % lag) + ax.scatter(y1, y2, **kwds) + return ax + + +def autocorrelation_plot(series, ax=None, **kwds): + """Autocorrelation plot for time series. + + Parameters: + ----------- + series: Time series + ax: Matplotlib axis object, optional + kwds : keywords + Options to pass to matplotlib plotting method + + Returns: + ----------- + ax: Matplotlib axis object + """ + import matplotlib.pyplot as plt + n = len(series) + data = np.asarray(series) + if ax is None: + ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0)) + mean = np.mean(data) + c0 = np.sum((data - mean) ** 2) / float(n) + + def r(h): + return ((data[:n - h] - mean) * + (data[h:] - mean)).sum() / float(n) / c0 + x = np.arange(n) + 1 + y = lmap(r, x) + z95 = 1.959963984540054 + z99 = 2.5758293035489004 + ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey') + ax.axhline(y=z95 / np.sqrt(n), color='grey') + ax.axhline(y=0.0, color='black') + ax.axhline(y=-z95 / np.sqrt(n), color='grey') + ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey') + ax.set_xlabel("Lag") + ax.set_ylabel("Autocorrelation") + ax.plot(x, y, **kwds) + if 'label' in kwds: + ax.legend() + ax.grid() + return ax diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py new file mode 100644 index 0000000000000..5d6dc7cbcdfc6 --- /dev/null +++ b/pandas/plotting/_style.py @@ -0,0 +1,246 @@ +# being a bit too dynamic +# pylint: disable=E1101 +from __future__ import division + +import warnings +from contextlib import contextmanager +import re + +import numpy as np + +from pandas.types.common import is_list_like +from pandas.compat import range, lrange, lmap +import pandas.compat as compat +from pandas.plotting._compat import _mpl_ge_2_0_0 + + +# Extracted from https://gist.github.com/huyng/816622 +# this is the rcParams set when setting display.with_mpl_style +# to True. +mpl_stylesheet = { + 'axes.axisbelow': True, + 'axes.color_cycle': ['#348ABD', + '#7A68A6', + '#A60628', + '#467821', + '#CF4457', + '#188487', + '#E24A33'], + 'axes.edgecolor': '#bcbcbc', + 'axes.facecolor': '#eeeeee', + 'axes.grid': True, + 'axes.labelcolor': '#555555', + 'axes.labelsize': 'large', + 'axes.linewidth': 1.0, + 'axes.titlesize': 'x-large', + 'figure.edgecolor': 'white', + 'figure.facecolor': 'white', + 'figure.figsize': (6.0, 4.0), + 'figure.subplot.hspace': 0.5, + 'font.family': 'monospace', + 'font.monospace': ['Andale Mono', + 'Nimbus Mono L', + 'Courier New', + 'Courier', + 'Fixed', + 'Terminal', + 'monospace'], + 'font.size': 10, + 'interactive': True, + 'keymap.all_axes': ['a'], + 'keymap.back': ['left', 'c', 'backspace'], + 'keymap.forward': ['right', 'v'], + 'keymap.fullscreen': ['f'], + 'keymap.grid': ['g'], + 'keymap.home': ['h', 'r', 'home'], + 'keymap.pan': ['p'], + 'keymap.save': ['s'], + 'keymap.xscale': ['L', 'k'], + 'keymap.yscale': ['l'], + 'keymap.zoom': ['o'], + 'legend.fancybox': True, + 'lines.antialiased': True, + 'lines.linewidth': 1.0, + 'patch.antialiased': True, + 'patch.edgecolor': '#EEEEEE', + 'patch.facecolor': '#348ABD', + 'patch.linewidth': 0.5, + 'toolbar': 'toolbar2', + 'xtick.color': '#555555', + 'xtick.direction': 'in', + 'xtick.major.pad': 6.0, + 'xtick.major.size': 0.0, + 'xtick.minor.pad': 6.0, + 'xtick.minor.size': 0.0, + 'ytick.color': '#555555', + 'ytick.direction': 'in', + 'ytick.major.pad': 6.0, + 'ytick.major.size': 0.0, + 'ytick.minor.pad': 6.0, + 'ytick.minor.size': 0.0 +} + + +def _get_standard_colors(num_colors=None, colormap=None, color_type='default', + color=None): + import matplotlib.pyplot as plt + + if color is None and colormap is not None: + if isinstance(colormap, compat.string_types): + import matplotlib.cm as cm + cmap = colormap + colormap = cm.get_cmap(colormap) + if colormap is None: + raise ValueError("Colormap {0} is not recognized".format(cmap)) + colors = lmap(colormap, np.linspace(0, 1, num=num_colors)) + elif color is not None: + if colormap is not None: + warnings.warn("'color' and 'colormap' cannot be used " + "simultaneously. Using 'color'") + colors = list(color) if is_list_like(color) else color + else: + if color_type == 'default': + # need to call list() on the result to copy so we don't + # modify the global rcParams below + try: + colors = [c['color'] + for c in list(plt.rcParams['axes.prop_cycle'])] + except KeyError: + colors = list(plt.rcParams.get('axes.color_cycle', + list('bgrcmyk'))) + if isinstance(colors, compat.string_types): + colors = list(colors) + elif color_type == 'random': + import random + + def random_color(column): + random.seed(column) + return [random.random() for _ in range(3)] + + colors = lmap(random_color, lrange(num_colors)) + else: + raise ValueError("color_type must be either 'default' or 'random'") + + if isinstance(colors, compat.string_types): + import matplotlib.colors + conv = matplotlib.colors.ColorConverter() + + def _maybe_valid_colors(colors): + try: + [conv.to_rgba(c) for c in colors] + return True + except ValueError: + return False + + # check whether the string can be convertable to single color + maybe_single_color = _maybe_valid_colors([colors]) + # check whether each character can be convertable to colors + maybe_color_cycle = _maybe_valid_colors(list(colors)) + if maybe_single_color and maybe_color_cycle and len(colors) > 1: + # Special case for single str 'CN' match and convert to hex + # for supporting matplotlib < 2.0.0 + if re.match(r'\AC[0-9]\Z', colors) and _mpl_ge_2_0_0(): + hex_color = [c['color'] + for c in list(plt.rcParams['axes.prop_cycle'])] + colors = [hex_color[int(colors[1])]] + else: + # this may no longer be required + msg = ("'{0}' can be parsed as both single color and " + "color cycle. Specify each color using a list " + "like ['{0}'] or {1}") + raise ValueError(msg.format(colors, list(colors))) + elif maybe_single_color: + colors = [colors] + else: + # ``colors`` is regarded as color cycle. + # mpl will raise error any of them is invalid + pass + + if len(colors) != num_colors: + try: + multiple = num_colors // len(colors) - 1 + except ZeroDivisionError: + raise ValueError("Invalid color argument: ''") + mod = num_colors % len(colors) + + colors += multiple * colors + colors += colors[:mod] + + return colors + + +class _Options(dict): + """ + Stores pandas plotting options. + Allows for parameter aliasing so you can just use parameter names that are + the same as the plot function parameters, but is stored in a canonical + format that makes it easy to breakdown into groups later + """ + + # alias so the names are same as plotting method parameter names + _ALIASES = {'x_compat': 'xaxis.compat'} + _DEFAULT_KEYS = ['xaxis.compat'] + + def __init__(self, deprecated=False): + self._deprecated = deprecated + # self['xaxis.compat'] = False + super(_Options, self).__setitem__('xaxis.compat', False) + + def _warn_if_deprecated(self): + if self._deprecated: + warnings.warn("'pandas.plot_params' is deprecated. Use " + "'pandas.plotting.plot_params' instead", + FutureWarning, stacklevel=3) + + def __getitem__(self, key): + self._warn_if_deprecated() + key = self._get_canonical_key(key) + if key not in self: + raise ValueError('%s is not a valid pandas plotting option' % key) + return super(_Options, self).__getitem__(key) + + def __setitem__(self, key, value): + self._warn_if_deprecated() + key = self._get_canonical_key(key) + return super(_Options, self).__setitem__(key, value) + + def __delitem__(self, key): + key = self._get_canonical_key(key) + if key in self._DEFAULT_KEYS: + raise ValueError('Cannot remove default parameter %s' % key) + return super(_Options, self).__delitem__(key) + + def __contains__(self, key): + key = self._get_canonical_key(key) + return super(_Options, self).__contains__(key) + + def reset(self): + """ + Reset the option store to its initial state + + Returns + ------- + None + """ + self._warn_if_deprecated() + self.__init__() + + def _get_canonical_key(self, key): + return self._ALIASES.get(key, key) + + @contextmanager + def use(self, key, value): + """ + Temporarily set a parameter value using the with statement. + Aliasing allowed. + """ + self._warn_if_deprecated() + old_value = self[key] + try: + self[key] = value + yield self + finally: + self[key] = old_value + + +plot_params = _Options() diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py new file mode 100644 index 0000000000000..7533e417b0026 --- /dev/null +++ b/pandas/plotting/_timeseries.py @@ -0,0 +1,339 @@ +# TODO: Use the fact that axis can have units to simplify the process + +import numpy as np + +from matplotlib import pylab +from pandas.tseries.period import Period +from pandas.tseries.offsets import DateOffset +import pandas.tseries.frequencies as frequencies +from pandas.tseries.index import DatetimeIndex +from pandas.tseries.period import PeriodIndex +from pandas.tseries.tdi import TimedeltaIndex +from pandas.formats.printing import pprint_thing +import pandas.compat as compat + +from pandas.plotting._converter import (TimeSeries_DateLocator, + TimeSeries_DateFormatter, + TimeSeries_TimedeltaFormatter) + +# --------------------------------------------------------------------- +# Plotting functions and monkey patches + + +def tsplot(series, plotf, ax=None, **kwargs): + """ + Plots a Series on the given Matplotlib axes or the current axes + + Parameters + ---------- + axes : Axes + series : Series + + Notes + _____ + Supports same kwargs as Axes.plot + + """ + # Used inferred freq is possible, need a test case for inferred + if ax is None: + import matplotlib.pyplot as plt + ax = plt.gca() + + freq, series = _maybe_resample(series, ax, kwargs) + + # Set ax with freq info + _decorate_axes(ax, freq, kwargs) + ax._plot_data.append((series, plotf, kwargs)) + lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs) + + # set date formatter, locators and rescale limits + format_dateaxis(ax, ax.freq, series.index) + return lines + + +def _maybe_resample(series, ax, kwargs): + # resample against axes freq if necessary + freq, ax_freq = _get_freq(ax, series) + + if freq is None: # pragma: no cover + raise ValueError('Cannot use dynamic axis without frequency info') + + # Convert DatetimeIndex to PeriodIndex + if isinstance(series.index, DatetimeIndex): + series = series.to_period(freq=freq) + + if ax_freq is not None and freq != ax_freq: + if frequencies.is_superperiod(freq, ax_freq): # upsample input + series = series.copy() + series.index = series.index.asfreq(ax_freq, how='s') + freq = ax_freq + elif _is_sup(freq, ax_freq): # one is weekly + how = kwargs.pop('how', 'last') + series = getattr(series.resample('D'), how)().dropna() + series = getattr(series.resample(ax_freq), how)().dropna() + freq = ax_freq + elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): + _upsample_others(ax, freq, kwargs) + ax_freq = freq + else: # pragma: no cover + raise ValueError('Incompatible frequency conversion') + return freq, series + + +def _is_sub(f1, f2): + return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or + (f2.startswith('W') and frequencies.is_subperiod(f1, 'D'))) + + +def _is_sup(f1, f2): + return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or + (f2.startswith('W') and frequencies.is_superperiod(f1, 'D'))) + + +def _upsample_others(ax, freq, kwargs): + legend = ax.get_legend() + lines, labels = _replot_ax(ax, freq, kwargs) + _replot_ax(ax, freq, kwargs) + + other_ax = None + if hasattr(ax, 'left_ax'): + other_ax = ax.left_ax + if hasattr(ax, 'right_ax'): + other_ax = ax.right_ax + + if other_ax is not None: + rlines, rlabels = _replot_ax(other_ax, freq, kwargs) + lines.extend(rlines) + labels.extend(rlabels) + + if (legend is not None and kwargs.get('legend', True) and + len(lines) > 0): + title = legend.get_title().get_text() + if title == 'None': + title = None + ax.legend(lines, labels, loc='best', title=title) + + +def _replot_ax(ax, freq, kwargs): + data = getattr(ax, '_plot_data', None) + + # clear current axes and data + ax._plot_data = [] + ax.clear() + + _decorate_axes(ax, freq, kwargs) + + lines = [] + labels = [] + if data is not None: + for series, plotf, kwds in data: + series = series.copy() + idx = series.index.asfreq(freq, how='S') + series.index = idx + ax._plot_data.append((series, plotf, kwds)) + + # for tsplot + if isinstance(plotf, compat.string_types): + from pandas.plotting._core import _plot_klass + plotf = _plot_klass[plotf]._plot + + lines.append(plotf(ax, series.index._mpl_repr(), + series.values, **kwds)[0]) + labels.append(pprint_thing(series.name)) + + return lines, labels + + +def _decorate_axes(ax, freq, kwargs): + """Initialize axes for time-series plotting""" + if not hasattr(ax, '_plot_data'): + ax._plot_data = [] + + ax.freq = freq + xaxis = ax.get_xaxis() + xaxis.freq = freq + if not hasattr(ax, 'legendlabels'): + ax.legendlabels = [kwargs.get('label', None)] + else: + ax.legendlabels.append(kwargs.get('label', None)) + ax.view_interval = None + ax.date_axis_info = None + + +def _get_ax_freq(ax): + """ + Get the freq attribute of the ax object if set. + Also checks shared axes (eg when using secondary yaxis, sharex=True + or twinx) + """ + ax_freq = getattr(ax, 'freq', None) + if ax_freq is None: + # check for left/right ax in case of secondary yaxis + if hasattr(ax, 'left_ax'): + ax_freq = getattr(ax.left_ax, 'freq', None) + elif hasattr(ax, 'right_ax'): + ax_freq = getattr(ax.right_ax, 'freq', None) + if ax_freq is None: + # check if a shared ax (sharex/twinx) has already freq set + shared_axes = ax.get_shared_x_axes().get_siblings(ax) + if len(shared_axes) > 1: + for shared_ax in shared_axes: + ax_freq = getattr(shared_ax, 'freq', None) + if ax_freq is not None: + break + return ax_freq + + +def _get_freq(ax, series): + # get frequency from data + freq = getattr(series.index, 'freq', None) + if freq is None: + freq = getattr(series.index, 'inferred_freq', None) + + ax_freq = _get_ax_freq(ax) + + # use axes freq if no data freq + if freq is None: + freq = ax_freq + + # get the period frequency + if isinstance(freq, DateOffset): + freq = freq.rule_code + else: + freq = frequencies.get_base_alias(freq) + + freq = frequencies.get_period_alias(freq) + return freq, ax_freq + + +def _use_dynamic_x(ax, data): + freq = _get_index_freq(data) + ax_freq = _get_ax_freq(ax) + + if freq is None: # convert irregular if axes has freq info + freq = ax_freq + else: # do not use tsplot if irregular was plotted first + if (ax_freq is None) and (len(ax.get_lines()) > 0): + return False + + if freq is None: + return False + + if isinstance(freq, DateOffset): + freq = freq.rule_code + else: + freq = frequencies.get_base_alias(freq) + freq = frequencies.get_period_alias(freq) + + if freq is None: + return False + + # hack this for 0.10.1, creating more technical debt...sigh + if isinstance(data.index, DatetimeIndex): + base = frequencies.get_freq(freq) + x = data.index + if (base <= frequencies.FreqGroup.FR_DAY): + return x[:1].is_normalized + return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0] + return True + + +def _get_index_freq(data): + freq = getattr(data.index, 'freq', None) + if freq is None: + freq = getattr(data.index, 'inferred_freq', None) + if freq == 'B': + weekdays = np.unique(data.index.dayofweek) + if (5 in weekdays) or (6 in weekdays): + freq = None + return freq + + +def _maybe_convert_index(ax, data): + # tsplot converts automatically, but don't want to convert index + # over and over for DataFrames + if isinstance(data.index, DatetimeIndex): + freq = getattr(data.index, 'freq', None) + + if freq is None: + freq = getattr(data.index, 'inferred_freq', None) + if isinstance(freq, DateOffset): + freq = freq.rule_code + + if freq is None: + freq = _get_ax_freq(ax) + + if freq is None: + raise ValueError('Could not get frequency alias for plotting') + + freq = frequencies.get_base_alias(freq) + freq = frequencies.get_period_alias(freq) + + data = data.to_period(freq=freq) + return data + + +# Patch methods for subplot. Only format_dateaxis is currently used. +# Do we need the rest for convenience? + +def format_timedelta_ticks(x, pos, n_decimals): + """ + Convert seconds to 'D days HH:MM:SS.F' + """ + s, ns = divmod(x, 1e9) + m, s = divmod(s, 60) + h, m = divmod(m, 60) + d, h = divmod(h, 24) + decimals = int(ns * 10**(n_decimals - 9)) + s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)) + if n_decimals > 0: + s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals) + if d != 0: + s = '{:d} days '.format(int(d)) + s + return s + + +def format_dateaxis(subplot, freq, index): + """ + Pretty-formats the date axis (x-axis). + + Major and minor ticks are automatically set for the frequency of the + current underlying series. As the dynamic mode is activated by + default, changing the limits of the x axis will intelligently change + the positions of the ticks. + """ + + # handle index specific formatting + # Note: DatetimeIndex does not use this + # interface. DatetimeIndex uses matplotlib.date directly + if isinstance(index, PeriodIndex): + + majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, + minor_locator=False, + plot_obj=subplot) + minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, + minor_locator=True, + plot_obj=subplot) + subplot.xaxis.set_major_locator(majlocator) + subplot.xaxis.set_minor_locator(minlocator) + + majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, + minor_locator=False, + plot_obj=subplot) + minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, + minor_locator=True, + plot_obj=subplot) + subplot.xaxis.set_major_formatter(majformatter) + subplot.xaxis.set_minor_formatter(minformatter) + + # x and y coord info + subplot.format_coord = lambda t, y: ( + "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y)) + + elif isinstance(index, TimedeltaIndex): + subplot.xaxis.set_major_formatter( + TimeSeries_TimedeltaFormatter()) + else: + raise TypeError('index type not supported') + + pylab.draw_if_interactive() diff --git a/pandas/plotting/_tools.py b/pandas/plotting/_tools.py new file mode 100644 index 0000000000000..720f776279869 --- /dev/null +++ b/pandas/plotting/_tools.py @@ -0,0 +1,383 @@ +# being a bit too dynamic +# pylint: disable=E1101 +from __future__ import division + +import warnings +from math import ceil + +import numpy as np + +from pandas.types.common import is_list_like +from pandas.core.index import Index +from pandas.core.series import Series +from pandas.compat import range + + +def format_date_labels(ax, rot): + # mini version of autofmt_xdate + try: + for label in ax.get_xticklabels(): + label.set_ha('right') + label.set_rotation(rot) + fig = ax.get_figure() + fig.subplots_adjust(bottom=0.2) + except Exception: # pragma: no cover + pass + + +def table(ax, data, rowLabels=None, colLabels=None, + **kwargs): + """ + Helper function to convert DataFrame and Series to matplotlib.table + + Parameters + ---------- + `ax`: Matplotlib axes object + `data`: DataFrame or Series + data for table contents + `kwargs`: keywords, optional + keyword arguments which passed to matplotlib.table.table. + If `rowLabels` or `colLabels` is not specified, data index or column + name will be used. + + Returns + ------- + matplotlib table object + """ + from pandas import DataFrame + if isinstance(data, Series): + data = DataFrame(data, columns=[data.name]) + elif isinstance(data, DataFrame): + pass + else: + raise ValueError('Input data must be DataFrame or Series') + + if rowLabels is None: + rowLabels = data.index + + if colLabels is None: + colLabels = data.columns + + cellText = data.values + + import matplotlib.table + table = matplotlib.table.table(ax, cellText=cellText, + rowLabels=rowLabels, + colLabels=colLabels, **kwargs) + return table + + +def _get_layout(nplots, layout=None, layout_type='box'): + if layout is not None: + if not isinstance(layout, (tuple, list)) or len(layout) != 2: + raise ValueError('Layout must be a tuple of (rows, columns)') + + nrows, ncols = layout + + # Python 2 compat + ceil_ = lambda x: int(ceil(x)) + if nrows == -1 and ncols > 0: + layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols) + elif ncols == -1 and nrows > 0: + layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows)) + elif ncols <= 0 and nrows <= 0: + msg = "At least one dimension of layout must be positive" + raise ValueError(msg) + + if nrows * ncols < nplots: + raise ValueError('Layout of %sx%s must be larger than ' + 'required size %s' % (nrows, ncols, nplots)) + + return layout + + if layout_type == 'single': + return (1, 1) + elif layout_type == 'horizontal': + return (1, nplots) + elif layout_type == 'vertical': + return (nplots, 1) + + layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)} + try: + return layouts[nplots] + except KeyError: + k = 1 + while k ** 2 < nplots: + k += 1 + + if (k - 1) * k >= nplots: + return k, (k - 1) + else: + return k, k + +# copied from matplotlib/pyplot.py and modified for pandas.plotting + + +def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True, + subplot_kw=None, ax=None, layout=None, layout_type='box', + **fig_kw): + """Create a figure with a set of subplots already made. + + This utility wrapper makes it convenient to create common layouts of + subplots, including the enclosing figure object, in a single call. + + Keyword arguments: + + naxes : int + Number of required axes. Exceeded axes are set invisible. Default is + nrows * ncols. + + sharex : bool + If True, the X axis will be shared amongst all subplots. + + sharey : bool + If True, the Y axis will be shared amongst all subplots. + + squeeze : bool + + If True, extra dimensions are squeezed out from the returned axis object: + - if only one subplot is constructed (nrows=ncols=1), the resulting + single Axis object is returned as a scalar. + - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object + array of Axis objects are returned as numpy 1-d arrays. + - for NxM subplots with N>1 and M>1 are returned as a 2d array. + + If False, no squeezing at all is done: the returned axis object is always + a 2-d array containing Axis instances, even if it ends up being 1x1. + + subplot_kw : dict + Dict with keywords passed to the add_subplot() call used to create each + subplots. + + ax : Matplotlib axis object, optional + + layout : tuple + Number of rows and columns of the subplot grid. + If not specified, calculated from naxes and layout_type + + layout_type : {'box', 'horziontal', 'vertical'}, default 'box' + Specify how to layout the subplot grid. + + fig_kw : Other keyword arguments to be passed to the figure() call. + Note that all keywords not recognized above will be + automatically included here. + + Returns: + + fig, ax : tuple + - fig is the Matplotlib Figure object + - ax can be either a single axis object or an array of axis objects if + more than one subplot was created. The dimensions of the resulting array + can be controlled with the squeeze keyword, see above. + + **Examples:** + + x = np.linspace(0, 2*np.pi, 400) + y = np.sin(x**2) + + # Just a figure and one subplot + f, ax = plt.subplots() + ax.plot(x, y) + ax.set_title('Simple plot') + + # Two subplots, unpack the output array immediately + f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) + ax1.plot(x, y) + ax1.set_title('Sharing Y axis') + ax2.scatter(x, y) + + # Four polar axes + plt.subplots(2, 2, subplot_kw=dict(polar=True)) + """ + import matplotlib.pyplot as plt + + if subplot_kw is None: + subplot_kw = {} + + if ax is None: + fig = plt.figure(**fig_kw) + else: + if is_list_like(ax): + ax = _flatten(ax) + if layout is not None: + warnings.warn("When passing multiple axes, layout keyword is " + "ignored", UserWarning) + if sharex or sharey: + warnings.warn("When passing multiple axes, sharex and sharey " + "are ignored. These settings must be specified " + "when creating axes", UserWarning, + stacklevel=4) + if len(ax) == naxes: + fig = ax[0].get_figure() + return fig, ax + else: + raise ValueError("The number of passed axes must be {0}, the " + "same as the output plot".format(naxes)) + + fig = ax.get_figure() + # if ax is passed and a number of subplots is 1, return ax as it is + if naxes == 1: + if squeeze: + return fig, ax + else: + return fig, _flatten(ax) + else: + warnings.warn("To output multiple subplots, the figure containing " + "the passed axes is being cleared", UserWarning, + stacklevel=4) + fig.clear() + + nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) + nplots = nrows * ncols + + # Create empty object array to hold all axes. It's easiest to make it 1-d + # so we can just append subplots upon creation, and then + axarr = np.empty(nplots, dtype=object) + + # Create first subplot separately, so we can share it if requested + ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) + + if sharex: + subplot_kw['sharex'] = ax0 + if sharey: + subplot_kw['sharey'] = ax0 + axarr[0] = ax0 + + # Note off-by-one counting because add_subplot uses the MATLAB 1-based + # convention. + for i in range(1, nplots): + kwds = subplot_kw.copy() + # Set sharex and sharey to None for blank/dummy axes, these can + # interfere with proper axis limits on the visible axes if + # they share axes e.g. issue #7528 + if i >= naxes: + kwds['sharex'] = None + kwds['sharey'] = None + ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) + axarr[i] = ax + + if naxes != nplots: + for ax in axarr[naxes:]: + ax.set_visible(False) + + _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) + + if squeeze: + # Reshape the array to have the final desired dimension (nrow,ncol), + # though discarding unneeded dimensions that equal 1. If we only have + # one subplot, just return it instead of a 1-element array. + if nplots == 1: + axes = axarr[0] + else: + axes = axarr.reshape(nrows, ncols).squeeze() + else: + # returned axis array will be always 2-d, even if nrows=ncols=1 + axes = axarr.reshape(nrows, ncols) + + return fig, axes + + +def _remove_labels_from_axis(axis): + for t in axis.get_majorticklabels(): + t.set_visible(False) + + try: + # set_visible will not be effective if + # minor axis has NullLocator and NullFormattor (default) + import matplotlib.ticker as ticker + if isinstance(axis.get_minor_locator(), ticker.NullLocator): + axis.set_minor_locator(ticker.AutoLocator()) + if isinstance(axis.get_minor_formatter(), ticker.NullFormatter): + axis.set_minor_formatter(ticker.FormatStrFormatter('')) + for t in axis.get_minorticklabels(): + t.set_visible(False) + except Exception: # pragma no cover + raise + axis.get_label().set_visible(False) + + +def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey): + if nplots > 1: + + if nrows > 1: + try: + # first find out the ax layout, + # so that we can correctly handle 'gaps" + layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool) + for ax in axarr: + layout[ax.rowNum, ax.colNum] = ax.get_visible() + + for ax in axarr: + # only the last row of subplots should get x labels -> all + # other off layout handles the case that the subplot is + # the last in the column, because below is no subplot/gap. + if not layout[ax.rowNum + 1, ax.colNum]: + continue + if sharex or len(ax.get_shared_x_axes() + .get_siblings(ax)) > 1: + _remove_labels_from_axis(ax.xaxis) + + except IndexError: + # if gridspec is used, ax.rowNum and ax.colNum may different + # from layout shape. in this case, use last_row logic + for ax in axarr: + if ax.is_last_row(): + continue + if sharex or len(ax.get_shared_x_axes() + .get_siblings(ax)) > 1: + _remove_labels_from_axis(ax.xaxis) + + if ncols > 1: + for ax in axarr: + # only the first column should get y labels -> set all other to + # off as we only have labels in teh first column and we always + # have a subplot there, we can skip the layout test + if ax.is_first_col(): + continue + if sharey or len(ax.get_shared_y_axes().get_siblings(ax)) > 1: + _remove_labels_from_axis(ax.yaxis) + + +def _flatten(axes): + if not is_list_like(axes): + return np.array([axes]) + elif isinstance(axes, (np.ndarray, Index)): + return axes.ravel() + return np.array(axes) + + +def _get_all_lines(ax): + lines = ax.get_lines() + + if hasattr(ax, 'right_ax'): + lines += ax.right_ax.get_lines() + + if hasattr(ax, 'left_ax'): + lines += ax.left_ax.get_lines() + + return lines + + +def _get_xlim(lines): + left, right = np.inf, -np.inf + for l in lines: + x = l.get_xdata(orig=False) + left = min(x[0], left) + right = max(x[-1], right) + return left, right + + +def _set_ticks_props(axes, xlabelsize=None, xrot=None, + ylabelsize=None, yrot=None): + import matplotlib.pyplot as plt + + for ax in _flatten(axes): + if xlabelsize is not None: + plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) + if xrot is not None: + plt.setp(ax.get_xticklabels(), rotation=xrot) + if ylabelsize is not None: + plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) + if yrot is not None: + plt.setp(ax.get_yticklabels(), rotation=yrot) + return axes diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 6d92898042b23..02734189ca340 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -31,7 +31,7 @@ class TestPDApi(Base, tm.TestCase): # top-level sub-packages lib = ['api', 'compat', 'computation', 'core', - 'indexes', 'formats', 'errors', 'pandas', + 'indexes', 'formats', 'errors', 'pandas', 'plotting', 'test', 'tools', 'tseries', 'sparse', 'types', 'util', 'options', 'io'] @@ -70,8 +70,7 @@ class TestPDApi(Base, tm.TestCase): 'melt', 'notnull', 'offsets', 'merge', 'merge_ordered', 'merge_asof', 'period_range', - 'pivot', 'pivot_table', 'plot_params', 'qcut', - 'scatter_matrix', + 'pivot', 'pivot_table', 'qcut', 'show_versions', 'timedelta_range', 'unique', 'value_counts', 'wide_to_long'] @@ -104,7 +103,8 @@ class TestPDApi(Base, tm.TestCase): 'rolling_median', 'rolling_min', 'rolling_quantile', 'rolling_skew', 'rolling_std', 'rolling_sum', 'rolling_var', 'rolling_window', 'ordered_merge', - 'pnow', 'match', 'groupby', 'get_store'] + 'pnow', 'match', 'groupby', 'get_store', + 'plot_params', 'scatter_matrix'] def test_api(self): diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index c31d8b539ae6f..d81f73e73ae69 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -16,7 +16,8 @@ import numpy as np from numpy import random -import pandas.tools.plotting as plotting +import pandas.plotting as plotting +from pandas.plotting._tools import _flatten """ @@ -48,12 +49,12 @@ def setUp(self): import matplotlib as mpl mpl.rcdefaults() - self.mpl_le_1_2_1 = plotting._mpl_le_1_2_1() - self.mpl_ge_1_3_1 = plotting._mpl_ge_1_3_1() - self.mpl_ge_1_4_0 = plotting._mpl_ge_1_4_0() - self.mpl_ge_1_5_0 = plotting._mpl_ge_1_5_0() - self.mpl_ge_2_0_0 = plotting._mpl_ge_2_0_0() - self.mpl_ge_2_0_1 = plotting._mpl_ge_2_0_1() + self.mpl_le_1_2_1 = plotting._compat._mpl_le_1_2_1() + self.mpl_ge_1_3_1 = plotting._compat._mpl_ge_1_3_1() + self.mpl_ge_1_4_0 = plotting._compat._mpl_ge_1_4_0() + self.mpl_ge_1_5_0 = plotting._compat._mpl_ge_1_5_0() + self.mpl_ge_2_0_0 = plotting._compat._mpl_ge_2_0_0() + self.mpl_ge_2_0_1 = plotting._compat._mpl_ge_2_0_1() if self.mpl_ge_1_4_0: self.bp_n_objects = 7 @@ -73,7 +74,8 @@ def setUp(self): self.default_tick_position = 'left' if self.mpl_ge_2_0_0 else 'default' # common test data from pandas import read_csv - path = os.path.join(os.path.dirname(curpath()), 'data', 'iris.csv') + base = os.path.join(os.path.dirname(curpath()), os.pardir) + path = os.path.join(base, 'tests', 'data', 'iris.csv') self.iris = read_csv(path) n = 100 @@ -353,7 +355,7 @@ def _check_axes_shape(self, axes, axes_num=None, layout=None, self.assertTrue(len(ax.get_children()) > 0) if layout is not None: - result = self._get_axes_layout(plotting._flatten(axes)) + result = self._get_axes_layout(_flatten(axes)) self.assertEqual(result, layout) self.assert_numpy_array_equal( @@ -379,7 +381,7 @@ def _flatten_visible(self, axes): axes : matplotlib Axes object, or its list-like """ - axes = plotting._flatten(axes) + axes = _flatten(axes) axes = [ax for ax in axes if ax.get_visible()] return axes diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 31c150bc1e64f..5b9c13bd26708 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -14,7 +14,7 @@ from numpy import random from numpy.random import randn -import pandas.tools.plotting as plotting +import pandas.plotting as plotting from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works) @@ -54,7 +54,8 @@ def test_boxplot_legacy(self): _check_plot_works(df.boxplot, by='indic') with tm.assert_produces_warning(UserWarning): _check_plot_works(df.boxplot, by=['indic', 'indic2']) - _check_plot_works(plotting.boxplot, data=df['one'], return_type='dict') + _check_plot_works(plotting._core.boxplot, data=df['one'], + return_type='dict') _check_plot_works(df.boxplot, notch=1, return_type='dict') with tm.assert_produces_warning(UserWarning): _check_plot_works(df.boxplot, by='indic', notch=1) diff --git a/pandas/tests/tseries/test_converter.py b/pandas/tests/plotting/test_converter.py similarity index 99% rename from pandas/tests/tseries/test_converter.py rename to pandas/tests/plotting/test_converter.py index 5351e26f0e62b..4629103d033f5 100644 --- a/pandas/tests/tseries/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -8,7 +8,7 @@ from pandas.tseries.offsets import Second, Milli, Micro, Day from pandas.compat.numpy import np_datetime64_compat -converter = pytest.importorskip('pandas.tseries.converter') +converter = pytest.importorskip('pandas.plotting._converter') def test_timtetonum_accepts_unicode(): diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 673c34903b259..b3692c5a8d2d2 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -144,7 +144,7 @@ def test_high_freq(self): _check_plot_works(ser.plot) def test_get_datevalue(self): - from pandas.tseries.converter import get_datevalue + from pandas.plotting._converter import get_datevalue self.assertIsNone(get_datevalue(None, 'D')) self.assertEqual(get_datevalue(1987, 'A'), 1987) self.assertEqual(get_datevalue(Period(1987, 'A'), 'M'), @@ -243,7 +243,7 @@ def test_plot_multiple_inferred_freq(self): @slow def test_uhf(self): - import pandas.tseries.converter as conv + import pandas.plotting._converter as conv import matplotlib.pyplot as plt fig = plt.gcf() plt.clf() @@ -387,7 +387,7 @@ def _test(ax): _test(ax) def test_get_finder(self): - import pandas.tseries.converter as conv + import pandas.plotting._converter as conv self.assertEqual(conv.get_finder('B'), conv._daily_finder) self.assertEqual(conv.get_finder('D'), conv._daily_finder) diff --git a/pandas/tests/plotting/test_deprecated.py b/pandas/tests/plotting/test_deprecated.py new file mode 100644 index 0000000000000..d7eaa69460a3a --- /dev/null +++ b/pandas/tests/plotting/test_deprecated.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +import string + +import pandas as pd +import pandas.util.testing as tm +from pandas.util.testing import slow + +from numpy.random import randn + +import pandas.tools.plotting as plotting + +from pandas.tests.plotting.common import TestPlotBase + + +""" +Test cases for plot functions imported from deprecated +pandas.tools.plotting +""" + + +@tm.mplskip +class TestDeprecatedNameSpace(TestPlotBase): + + @slow + def test_scatter_plot_legacy(self): + tm._skip_if_no_scipy() + + df = pd.DataFrame(randn(100, 2)) + + with tm.assert_produces_warning(FutureWarning): + plotting.scatter_matrix(df) + + with tm.assert_produces_warning(FutureWarning): + pd.scatter_matrix(df) + + @slow + def test_boxplot_deprecated(self): + df = pd.DataFrame(randn(6, 4), + index=list(string.ascii_letters[:6]), + columns=['one', 'two', 'three', 'four']) + df['indic'] = ['foo', 'bar'] * 3 + + with tm.assert_produces_warning(FutureWarning): + plotting.boxplot(df, column=['one', 'two'], + by='indic') + + @slow + def test_radviz_deprecated(self): + df = self.iris + with tm.assert_produces_warning(FutureWarning): + plotting.radviz(frame=df, class_column='Name') + + @slow + def test_plot_params(self): + + with tm.assert_produces_warning(FutureWarning): + pd.plot_params['xaxis.compat'] = True diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 8090b9cc44ca3..404752b567f63 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -22,7 +22,7 @@ import numpy as np from numpy.random import rand, randn -import pandas.tools.plotting as plotting +import pandas.plotting as plotting from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works, _skip_if_no_scipy_gaussian_kde, _ok_for_gaussian_kde) @@ -240,13 +240,13 @@ def test_xcompat(self): self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex) tm.close() - pd.plot_params['xaxis.compat'] = True + pd.plotting.plot_params['xaxis.compat'] = True ax = df.plot() lines = ax.get_lines() self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex) tm.close() - pd.plot_params['x_compat'] = False + pd.plotting.plot_params['x_compat'] = False ax = df.plot() lines = ax.get_lines() self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex) @@ -254,7 +254,7 @@ def test_xcompat(self): tm.close() # useful if you're plotting a bunch together - with pd.plot_params.use('x_compat', True): + with pd.plotting.plot_params.use('x_compat', True): ax = df.plot() lines = ax.get_lines() self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex) @@ -1979,7 +1979,7 @@ def test_unordered_ts(self): def test_kind_both_ways(self): df = DataFrame({'x': [1, 2, 3]}) - for kind in plotting._common_kinds: + for kind in plotting._core._common_kinds: if not _ok_for_gaussian_kde(kind): continue df.plot(kind=kind) @@ -1990,7 +1990,7 @@ def test_kind_both_ways(self): def test_all_invalid_plot_data(self): df = DataFrame(list('abcd')) - for kind in plotting._common_kinds: + for kind in plotting._core._common_kinds: if not _ok_for_gaussian_kde(kind): continue with tm.assertRaises(TypeError): @@ -2001,7 +2001,7 @@ def test_partially_invalid_plot_data(self): with tm.RNGContext(42): df = DataFrame(randn(10, 2), dtype=object) df[np.random.rand(df.shape[0]) > 0.5] = 'a' - for kind in plotting._common_kinds: + for kind in plotting._core._common_kinds: if not _ok_for_gaussian_kde(kind): continue with tm.assertRaises(TypeError): @@ -2454,7 +2454,7 @@ def test_memory_leak(self): import gc results = {} - for kind in plotting._plot_klass.keys(): + for kind in plotting._core._plot_klass.keys(): if not _ok_for_gaussian_kde(kind): continue args = {} @@ -2653,7 +2653,7 @@ def test_df_grid_settings(self): # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 self._check_grid_settings( DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4]}), - plotting._dataframe_kinds, kws={'x': 'a', 'y': 'b'}) + plotting._core._dataframe_kinds, kws={'x': 'a', 'y': 'b'}) def test_option_mpl_style(self): with tm.assert_produces_warning(FutureWarning, diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 380bdc12abce4..0a13a6e9893a8 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -9,7 +9,7 @@ import numpy as np from numpy.random import randn -import pandas.tools.plotting as plotting +from pandas.plotting._core import grouped_hist from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works) @@ -260,7 +260,7 @@ def test_grouped_hist_legacy(self): df['C'] = np.random.randint(0, 4, 500) df['D'] = ['X'] * 500 - axes = plotting.grouped_hist(df.A, by=df.C) + axes = grouped_hist(df.A, by=df.C) self._check_axes_shape(axes, axes_num=4, layout=(2, 2)) tm.close() @@ -277,10 +277,9 @@ def test_grouped_hist_legacy(self): # make sure kwargs to hist are handled xf, yf = 20, 18 xrot, yrot = 30, 40 - axes = plotting.grouped_hist(df.A, by=df.C, normed=True, - cumulative=True, bins=4, - xlabelsize=xf, xrot=xrot, - ylabelsize=yf, yrot=yrot) + axes = grouped_hist(df.A, by=df.C, normed=True, cumulative=True, + bins=4, xlabelsize=xf, xrot=xrot, + ylabelsize=yf, yrot=yrot) # height of last bin (index 5) must be 1.0 for ax in axes.ravel(): rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] @@ -290,14 +289,14 @@ def test_grouped_hist_legacy(self): ylabelsize=yf, yrot=yrot) tm.close() - axes = plotting.grouped_hist(df.A, by=df.C, log=True) + axes = grouped_hist(df.A, by=df.C, log=True) # scale of y must be 'log' self._check_ax_scales(axes, yaxis='log') tm.close() # propagate attr exception from matplotlib.Axes.hist with tm.assertRaises(AttributeError): - plotting.grouped_hist(df.A, by=df.C, foo='bar') + grouped_hist(df.A, by=df.C, foo='bar') with tm.assert_produces_warning(FutureWarning): df.hist(by='C', figsize='default') diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 504c55bcfcfd0..fe0b6c103a0e1 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -11,7 +11,7 @@ from numpy import random from numpy.random import randn -import pandas.tools.plotting as plotting +import pandas.plotting as plotting from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works, _ok_for_gaussian_kde) @@ -29,7 +29,7 @@ def setUp(self): @slow def test_autocorrelation_plot(self): - from pandas.tools.plotting import autocorrelation_plot + from pandas.plotting import autocorrelation_plot _check_plot_works(autocorrelation_plot, series=self.ts) _check_plot_works(autocorrelation_plot, series=self.ts.values) @@ -38,13 +38,13 @@ def test_autocorrelation_plot(self): @slow def test_lag_plot(self): - from pandas.tools.plotting import lag_plot + from pandas.plotting import lag_plot _check_plot_works(lag_plot, series=self.ts) _check_plot_works(lag_plot, series=self.ts, lag=5) @slow def test_bootstrap_plot(self): - from pandas.tools.plotting import bootstrap_plot + from pandas.plotting import bootstrap_plot _check_plot_works(bootstrap_plot, series=self.ts, size=10) @@ -84,7 +84,7 @@ def scat(**kwds): _check_plot_works(scat, facecolor='rgb') def scat2(x, y, by=None, ax=None, figsize=None): - return plotting.scatter_plot(df, x, y, by, ax, figsize=None) + return plotting._core.scatter_plot(df, x, y, by, ax, figsize=None) _check_plot_works(scat2, x=0, y=1) grouper = Series(np.repeat([1, 2, 3, 4, 5], 20), df.index) @@ -130,7 +130,7 @@ def test_scatter_matrix_axis(self): @slow def test_andrews_curves(self): - from pandas.tools.plotting import andrews_curves + from pandas.plotting import andrews_curves from matplotlib import cm df = self.iris @@ -195,7 +195,7 @@ def test_andrews_curves(self): @slow def test_parallel_coordinates(self): - from pandas.tools.plotting import parallel_coordinates + from pandas.plotting import parallel_coordinates from matplotlib import cm df = self.iris @@ -263,7 +263,7 @@ def test_parallel_coordinates_with_sorted_labels(self): @slow def test_radviz(self): - from pandas.tools.plotting import radviz + from pandas.plotting import radviz from matplotlib import cm df = self.iris diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 8c00d606059a4..c3bc3ca6bf414 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -16,7 +16,7 @@ import numpy as np from numpy.random import randn -import pandas.tools.plotting as plotting +import pandas.plotting as plotting from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works, _skip_if_no_scipy_gaussian_kde, _ok_for_gaussian_kde) @@ -622,7 +622,9 @@ def test_boxplot_series(self): @slow def test_kind_both_ways(self): s = Series(range(3)) - for kind in plotting._common_kinds + plotting._series_kinds: + kinds = (plotting._core._common_kinds + + plotting._core._series_kinds) + for kind in kinds: if not _ok_for_gaussian_kde(kind): continue s.plot(kind=kind) @@ -631,7 +633,7 @@ def test_kind_both_ways(self): @slow def test_invalid_plot_data(self): s = Series(list('abcd')) - for kind in plotting._common_kinds: + for kind in plotting._core._common_kinds: if not _ok_for_gaussian_kde(kind): continue with tm.assertRaises(TypeError): @@ -640,14 +642,14 @@ def test_invalid_plot_data(self): @slow def test_valid_object_plot(self): s = Series(lrange(10), dtype=object) - for kind in plotting._common_kinds: + for kind in plotting._core._common_kinds: if not _ok_for_gaussian_kde(kind): continue _check_plot_works(s.plot, kind=kind) def test_partially_invalid_plot_data(self): s = Series(['a', 'b', 1.0, 2]) - for kind in plotting._common_kinds: + for kind in plotting._core._common_kinds: if not _ok_for_gaussian_kde(kind): continue with tm.assertRaises(TypeError): @@ -718,54 +720,57 @@ def test_table(self): def test_series_grid_settings(self): # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 self._check_grid_settings(Series([1, 2, 3]), - plotting._series_kinds + - plotting._common_kinds) + plotting._core._series_kinds + + plotting._core._common_kinds) @slow def test_standard_colors(self): + from pandas.plotting._style import _get_standard_colors + for c in ['r', 'red', 'green', '#FF0000']: - result = plotting._get_standard_colors(1, color=c) + result = _get_standard_colors(1, color=c) self.assertEqual(result, [c]) - result = plotting._get_standard_colors(1, color=[c]) + result = _get_standard_colors(1, color=[c]) self.assertEqual(result, [c]) - result = plotting._get_standard_colors(3, color=c) + result = _get_standard_colors(3, color=c) self.assertEqual(result, [c] * 3) - result = plotting._get_standard_colors(3, color=[c]) + result = _get_standard_colors(3, color=[c]) self.assertEqual(result, [c] * 3) @slow def test_standard_colors_all(self): import matplotlib.colors as colors + from pandas.plotting._style import _get_standard_colors # multiple colors like mediumaquamarine for c in colors.cnames: - result = plotting._get_standard_colors(num_colors=1, color=c) + result = _get_standard_colors(num_colors=1, color=c) self.assertEqual(result, [c]) - result = plotting._get_standard_colors(num_colors=1, color=[c]) + result = _get_standard_colors(num_colors=1, color=[c]) self.assertEqual(result, [c]) - result = plotting._get_standard_colors(num_colors=3, color=c) + result = _get_standard_colors(num_colors=3, color=c) self.assertEqual(result, [c] * 3) - result = plotting._get_standard_colors(num_colors=3, color=[c]) + result = _get_standard_colors(num_colors=3, color=[c]) self.assertEqual(result, [c] * 3) # single letter colors like k for c in colors.ColorConverter.colors: - result = plotting._get_standard_colors(num_colors=1, color=c) + result = _get_standard_colors(num_colors=1, color=c) self.assertEqual(result, [c]) - result = plotting._get_standard_colors(num_colors=1, color=[c]) + result = _get_standard_colors(num_colors=1, color=[c]) self.assertEqual(result, [c]) - result = plotting._get_standard_colors(num_colors=3, color=c) + result = _get_standard_colors(num_colors=3, color=c) self.assertEqual(result, [c] * 3) - result = plotting._get_standard_colors(num_colors=3, color=[c]) + result = _get_standard_colors(num_colors=3, color=[c]) self.assertEqual(result, [c] * 3) def test_series_plot_color_kwargs(self): diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 141e3c74b91c4..a68da67a219e2 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1,4032 +1,20 @@ -# being a bit too dynamic -# pylint: disable=E1101 -from __future__ import division - +import sys import warnings -import re -from math import ceil -from collections import namedtuple -from contextlib import contextmanager -from distutils.version import LooseVersion - -import numpy as np - -from pandas.types.common import (is_list_like, - is_integer, - is_number, - is_hashable, - is_iterator) -from pandas.types.missing import isnull, notnull - -from pandas.util.decorators import cache_readonly, deprecate_kwarg -from pandas.core.base import PandasObject - -from pandas.core.common import AbstractMethodError, _try_sort -from pandas.core.generic import _shared_docs, _shared_doc_kwargs -from pandas.core.index import Index, MultiIndex -from pandas.core.series import Series, remove_na -from pandas.tseries.period import PeriodIndex -from pandas.compat import range, lrange, lmap, map, zip, string_types -import pandas.compat as compat -from pandas.formats.printing import pprint_thing -from pandas.util.decorators import Appender -try: # mpl optional - import pandas.tseries.converter as conv - conv.register() # needs to override so set_xlim works with str/number -except ImportError: - pass - - -# Extracted from https://gist.github.com/huyng/816622 -# this is the rcParams set when setting display.with_mpl_style -# to True. -mpl_stylesheet = { - 'axes.axisbelow': True, - 'axes.color_cycle': ['#348ABD', - '#7A68A6', - '#A60628', - '#467821', - '#CF4457', - '#188487', - '#E24A33'], - 'axes.edgecolor': '#bcbcbc', - 'axes.facecolor': '#eeeeee', - 'axes.grid': True, - 'axes.labelcolor': '#555555', - 'axes.labelsize': 'large', - 'axes.linewidth': 1.0, - 'axes.titlesize': 'x-large', - 'figure.edgecolor': 'white', - 'figure.facecolor': 'white', - 'figure.figsize': (6.0, 4.0), - 'figure.subplot.hspace': 0.5, - 'font.family': 'monospace', - 'font.monospace': ['Andale Mono', - 'Nimbus Mono L', - 'Courier New', - 'Courier', - 'Fixed', - 'Terminal', - 'monospace'], - 'font.size': 10, - 'interactive': True, - 'keymap.all_axes': ['a'], - 'keymap.back': ['left', 'c', 'backspace'], - 'keymap.forward': ['right', 'v'], - 'keymap.fullscreen': ['f'], - 'keymap.grid': ['g'], - 'keymap.home': ['h', 'r', 'home'], - 'keymap.pan': ['p'], - 'keymap.save': ['s'], - 'keymap.xscale': ['L', 'k'], - 'keymap.yscale': ['l'], - 'keymap.zoom': ['o'], - 'legend.fancybox': True, - 'lines.antialiased': True, - 'lines.linewidth': 1.0, - 'patch.antialiased': True, - 'patch.edgecolor': '#EEEEEE', - 'patch.facecolor': '#348ABD', - 'patch.linewidth': 0.5, - 'toolbar': 'toolbar2', - 'xtick.color': '#555555', - 'xtick.direction': 'in', - 'xtick.major.pad': 6.0, - 'xtick.major.size': 0.0, - 'xtick.minor.pad': 6.0, - 'xtick.minor.size': 0.0, - 'ytick.color': '#555555', - 'ytick.direction': 'in', - 'ytick.major.pad': 6.0, - 'ytick.major.size': 0.0, - 'ytick.minor.pad': 6.0, - 'ytick.minor.size': 0.0 -} - - -def _mpl_le_1_2_1(): - try: - import matplotlib as mpl - return (str(mpl.__version__) <= LooseVersion('1.2.1') and - str(mpl.__version__)[0] != '0') - except ImportError: - return False - - -def _mpl_ge_1_3_1(): - try: - import matplotlib - # The or v[0] == '0' is because their versioneer is - # messed up on dev - return (matplotlib.__version__ >= LooseVersion('1.3.1') or - matplotlib.__version__[0] == '0') - except ImportError: - return False - - -def _mpl_ge_1_4_0(): - try: - import matplotlib - return (matplotlib.__version__ >= LooseVersion('1.4') or - matplotlib.__version__[0] == '0') - except ImportError: - return False - - -def _mpl_ge_1_5_0(): - try: - import matplotlib - return (matplotlib.__version__ >= LooseVersion('1.5') or - matplotlib.__version__[0] == '0') - except ImportError: - return False - - -def _mpl_ge_2_0_0(): - try: - import matplotlib - return matplotlib.__version__ >= LooseVersion('2.0') - except ImportError: - return False - - -def _mpl_ge_2_0_1(): - try: - import matplotlib - return matplotlib.__version__ >= LooseVersion('2.0.1') - except ImportError: - return False - - -if _mpl_ge_1_5_0(): - # Compat with mp 1.5, which uses cycler. - import cycler - colors = mpl_stylesheet.pop('axes.color_cycle') - mpl_stylesheet['axes.prop_cycle'] = cycler.cycler('color', colors) - - -def _get_standard_kind(kind): - return {'density': 'kde'}.get(kind, kind) - - -def _get_standard_colors(num_colors=None, colormap=None, color_type='default', - color=None): - import matplotlib.pyplot as plt - - if color is None and colormap is not None: - if isinstance(colormap, compat.string_types): - import matplotlib.cm as cm - cmap = colormap - colormap = cm.get_cmap(colormap) - if colormap is None: - raise ValueError("Colormap {0} is not recognized".format(cmap)) - colors = lmap(colormap, np.linspace(0, 1, num=num_colors)) - elif color is not None: - if colormap is not None: - warnings.warn("'color' and 'colormap' cannot be used " - "simultaneously. Using 'color'") - colors = list(color) if is_list_like(color) else color - else: - if color_type == 'default': - # need to call list() on the result to copy so we don't - # modify the global rcParams below - try: - colors = [c['color'] - for c in list(plt.rcParams['axes.prop_cycle'])] - except KeyError: - colors = list(plt.rcParams.get('axes.color_cycle', - list('bgrcmyk'))) - if isinstance(colors, compat.string_types): - colors = list(colors) - elif color_type == 'random': - import random - - def random_color(column): - random.seed(column) - return [random.random() for _ in range(3)] - - colors = lmap(random_color, lrange(num_colors)) - else: - raise ValueError("color_type must be either 'default' or 'random'") - - if isinstance(colors, compat.string_types): - import matplotlib.colors - conv = matplotlib.colors.ColorConverter() - - def _maybe_valid_colors(colors): - try: - [conv.to_rgba(c) for c in colors] - return True - except ValueError: - return False - - # check whether the string can be convertable to single color - maybe_single_color = _maybe_valid_colors([colors]) - # check whether each character can be convertable to colors - maybe_color_cycle = _maybe_valid_colors(list(colors)) - if maybe_single_color and maybe_color_cycle and len(colors) > 1: - # Special case for single str 'CN' match and convert to hex - # for supporting matplotlib < 2.0.0 - if re.match(r'\AC[0-9]\Z', colors) and _mpl_ge_2_0_0(): - hex_color = [c['color'] - for c in list(plt.rcParams['axes.prop_cycle'])] - colors = [hex_color[int(colors[1])]] - else: - # this may no longer be required - msg = ("'{0}' can be parsed as both single color and " - "color cycle. Specify each color using a list " - "like ['{0}'] or {1}") - raise ValueError(msg.format(colors, list(colors))) - elif maybe_single_color: - colors = [colors] - else: - # ``colors`` is regarded as color cycle. - # mpl will raise error any of them is invalid - pass - - if len(colors) != num_colors: - try: - multiple = num_colors // len(colors) - 1 - except ZeroDivisionError: - raise ValueError("Invalid color argument: ''") - mod = num_colors % len(colors) - - colors += multiple * colors - colors += colors[:mod] - - return colors - - -class _Options(dict): - """ - Stores pandas plotting options. - Allows for parameter aliasing so you can just use parameter names that are - the same as the plot function parameters, but is stored in a canonical - format that makes it easy to breakdown into groups later - """ - - # alias so the names are same as plotting method parameter names - _ALIASES = {'x_compat': 'xaxis.compat'} - _DEFAULT_KEYS = ['xaxis.compat'] - - def __init__(self): - self['xaxis.compat'] = False - - def __getitem__(self, key): - key = self._get_canonical_key(key) - if key not in self: - raise ValueError('%s is not a valid pandas plotting option' % key) - return super(_Options, self).__getitem__(key) - - def __setitem__(self, key, value): - key = self._get_canonical_key(key) - return super(_Options, self).__setitem__(key, value) - - def __delitem__(self, key): - key = self._get_canonical_key(key) - if key in self._DEFAULT_KEYS: - raise ValueError('Cannot remove default parameter %s' % key) - return super(_Options, self).__delitem__(key) - - def __contains__(self, key): - key = self._get_canonical_key(key) - return super(_Options, self).__contains__(key) - - def reset(self): - """ - Reset the option store to its initial state - - Returns - ------- - None - """ - self.__init__() - - def _get_canonical_key(self, key): - return self._ALIASES.get(key, key) - - @contextmanager - def use(self, key, value): - """ - Temporarily set a parameter value using the with statement. - Aliasing allowed. - """ - old_value = self[key] - try: - self[key] = value - yield self - finally: - self[key] = old_value - - -plot_params = _Options() - - -def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False, - diagonal='hist', marker='.', density_kwds=None, - hist_kwds=None, range_padding=0.05, **kwds): - """ - Draw a matrix of scatter plots. - - Parameters - ---------- - frame : DataFrame - alpha : float, optional - amount of transparency applied - figsize : (float,float), optional - a tuple (width, height) in inches - ax : Matplotlib axis object, optional - grid : bool, optional - setting this to True will show the grid - diagonal : {'hist', 'kde'} - pick between 'kde' and 'hist' for - either Kernel Density Estimation or Histogram - plot in the diagonal - marker : str, optional - Matplotlib marker type, default '.' - hist_kwds : other plotting keyword arguments - To be passed to hist function - density_kwds : other plotting keyword arguments - To be passed to kernel density estimate plot - range_padding : float, optional - relative extension of axis range in x and y - with respect to (x_max - x_min) or (y_max - y_min), - default 0.05 - kwds : other plotting keyword arguments - To be passed to scatter function - - Examples - -------- - >>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D']) - >>> scatter_matrix(df, alpha=0.2) - """ - - df = frame._get_numeric_data() - n = df.columns.size - naxes = n * n - fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax, - squeeze=False) - - # no gaps between subplots - fig.subplots_adjust(wspace=0, hspace=0) - - mask = notnull(df) - - marker = _get_marker_compat(marker) - - hist_kwds = hist_kwds or {} - density_kwds = density_kwds or {} - - # GH 14855 - kwds.setdefault('edgecolors', 'none') - - boundaries_list = [] - for a in df.columns: - values = df[a].values[mask[a].values] - rmin_, rmax_ = np.min(values), np.max(values) - rdelta_ext = (rmax_ - rmin_) * range_padding / 2. - boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext)) - - for i, a in zip(lrange(n), df.columns): - for j, b in zip(lrange(n), df.columns): - ax = axes[i, j] - - if i == j: - values = df[a].values[mask[a].values] - - # Deal with the diagonal by drawing a histogram there. - if diagonal == 'hist': - ax.hist(values, **hist_kwds) - - elif diagonal in ('kde', 'density'): - from scipy.stats import gaussian_kde - y = values - gkde = gaussian_kde(y) - ind = np.linspace(y.min(), y.max(), 1000) - ax.plot(ind, gkde.evaluate(ind), **density_kwds) - - ax.set_xlim(boundaries_list[i]) - - else: - common = (mask[a] & mask[b]).values - - ax.scatter(df[b][common], df[a][common], - marker=marker, alpha=alpha, **kwds) - - ax.set_xlim(boundaries_list[j]) - ax.set_ylim(boundaries_list[i]) - - ax.set_xlabel(b) - ax.set_ylabel(a) - - if j != 0: - ax.yaxis.set_visible(False) - if i != n - 1: - ax.xaxis.set_visible(False) - - if len(df.columns) > 1: - lim1 = boundaries_list[0] - locs = axes[0][1].yaxis.get_majorticklocs() - locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])] - adj = (locs - lim1[0]) / (lim1[1] - lim1[0]) - - lim0 = axes[0][0].get_ylim() - adj = adj * (lim0[1] - lim0[0]) + lim0[0] - axes[0][0].yaxis.set_ticks(adj) - - if np.all(locs == locs.astype(int)): - # if all ticks are int - locs = locs.astype(int) - axes[0][0].yaxis.set_ticklabels(locs) - - _set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) - - return axes - - -def _gca(): - import matplotlib.pyplot as plt - return plt.gca() - - -def _gcf(): - import matplotlib.pyplot as plt - return plt.gcf() - - -def _get_marker_compat(marker): - import matplotlib.lines as mlines - import matplotlib as mpl - if mpl.__version__ < '1.1.0' and marker == '.': - return 'o' - if marker not in mlines.lineMarkers: - return 'o' - return marker - - -def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds): - """RadViz - a multivariate data visualization algorithm - - Parameters: - ----------- - frame: DataFrame - class_column: str - Column name containing class names - ax: Matplotlib axis object, optional - color: list or tuple, optional - Colors to use for the different classes - colormap : str or matplotlib colormap object, default None - Colormap to select colors from. If string, load colormap with that name - from matplotlib. - kwds: keywords - Options to pass to matplotlib scatter plotting method - - Returns: - -------- - ax: Matplotlib axis object - """ - import matplotlib.pyplot as plt - import matplotlib.patches as patches - - def normalize(series): - a = min(series) - b = max(series) - return (series - a) / (b - a) - - n = len(frame) - classes = frame[class_column].drop_duplicates() - class_col = frame[class_column] - df = frame.drop(class_column, axis=1).apply(normalize) - - if ax is None: - ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1]) - - to_plot = {} - colors = _get_standard_colors(num_colors=len(classes), colormap=colormap, - color_type='random', color=color) - - for kls in classes: - to_plot[kls] = [[], []] - - m = len(frame.columns) - 1 - s = np.array([(np.cos(t), np.sin(t)) - for t in [2.0 * np.pi * (i / float(m)) - for i in range(m)]]) - - for i in range(n): - row = df.iloc[i].values - row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1) - y = (s * row_).sum(axis=0) / row.sum() - kls = class_col.iat[i] - to_plot[kls][0].append(y[0]) - to_plot[kls][1].append(y[1]) - - for i, kls in enumerate(classes): - ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i], - label=pprint_thing(kls), **kwds) - ax.legend() - - ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none')) - - for xy, name in zip(s, df.columns): - - ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray')) - - if xy[0] < 0.0 and xy[1] < 0.0: - ax.text(xy[0] - 0.025, xy[1] - 0.025, name, - ha='right', va='top', size='small') - elif xy[0] < 0.0 and xy[1] >= 0.0: - ax.text(xy[0] - 0.025, xy[1] + 0.025, name, - ha='right', va='bottom', size='small') - elif xy[0] >= 0.0 and xy[1] < 0.0: - ax.text(xy[0] + 0.025, xy[1] - 0.025, name, - ha='left', va='top', size='small') - elif xy[0] >= 0.0 and xy[1] >= 0.0: - ax.text(xy[0] + 0.025, xy[1] + 0.025, name, - ha='left', va='bottom', size='small') - - ax.axis('equal') - return ax - - -@deprecate_kwarg(old_arg_name='data', new_arg_name='frame') -def andrews_curves(frame, class_column, ax=None, samples=200, color=None, - colormap=None, **kwds): - """ - Generates a matplotlib plot of Andrews curves, for visualising clusters of - multivariate data. - - Andrews curves have the functional form: - - f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + - x_4 sin(2t) + x_5 cos(2t) + ... - - Where x coefficients correspond to the values of each dimension and t is - linearly spaced between -pi and +pi. Each row of frame then corresponds to - a single curve. - - Parameters: - ----------- - frame : DataFrame - Data to be plotted, preferably normalized to (0.0, 1.0) - class_column : Name of the column containing class names - ax : matplotlib axes object, default None - samples : Number of points to plot in each curve - color: list or tuple, optional - Colors to use for the different classes - colormap : str or matplotlib colormap object, default None - Colormap to select colors from. If string, load colormap with that name - from matplotlib. - kwds: keywords - Options to pass to matplotlib plotting method - - Returns: - -------- - ax: Matplotlib axis object - - """ - from math import sqrt, pi - import matplotlib.pyplot as plt - - def function(amplitudes): - def f(t): - x1 = amplitudes[0] - result = x1 / sqrt(2.0) - - # Take the rest of the coefficients and resize them - # appropriately. Take a copy of amplitudes as otherwise numpy - # deletes the element from amplitudes itself. - coeffs = np.delete(np.copy(amplitudes), 0) - coeffs.resize(int((coeffs.size + 1) / 2), 2) - - # Generate the harmonics and arguments for the sin and cos - # functions. - harmonics = np.arange(0, coeffs.shape[0]) + 1 - trig_args = np.outer(harmonics, t) - - result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) + - coeffs[:, 1, np.newaxis] * np.cos(trig_args), - axis=0) - return result - return f - - n = len(frame) - class_col = frame[class_column] - classes = frame[class_column].drop_duplicates() - df = frame.drop(class_column, axis=1) - t = np.linspace(-pi, pi, samples) - used_legends = set([]) - - color_values = _get_standard_colors(num_colors=len(classes), - colormap=colormap, color_type='random', - color=color) - colors = dict(zip(classes, color_values)) - if ax is None: - ax = plt.gca(xlim=(-pi, pi)) - for i in range(n): - row = df.iloc[i].values - f = function(row) - y = f(t) - kls = class_col.iat[i] - label = pprint_thing(kls) - if label not in used_legends: - used_legends.add(label) - ax.plot(t, y, color=colors[kls], label=label, **kwds) - else: - ax.plot(t, y, color=colors[kls], **kwds) - - ax.legend(loc='upper right') - ax.grid() - return ax - - -def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): - """Bootstrap plot. - - Parameters: - ----------- - series: Time series - fig: matplotlib figure object, optional - size: number of data points to consider during each sampling - samples: number of times the bootstrap procedure is performed - kwds: optional keyword arguments for plotting commands, must be accepted - by both hist and plot - - Returns: - -------- - fig: matplotlib figure - """ - import random - import matplotlib.pyplot as plt - - # random.sample(ndarray, int) fails on python 3.3, sigh - data = list(series.values) - samplings = [random.sample(data, size) for _ in range(samples)] - - means = np.array([np.mean(sampling) for sampling in samplings]) - medians = np.array([np.median(sampling) for sampling in samplings]) - midranges = np.array([(min(sampling) + max(sampling)) * 0.5 - for sampling in samplings]) - if fig is None: - fig = plt.figure() - x = lrange(samples) - axes = [] - ax1 = fig.add_subplot(2, 3, 1) - ax1.set_xlabel("Sample") - axes.append(ax1) - ax1.plot(x, means, **kwds) - ax2 = fig.add_subplot(2, 3, 2) - ax2.set_xlabel("Sample") - axes.append(ax2) - ax2.plot(x, medians, **kwds) - ax3 = fig.add_subplot(2, 3, 3) - ax3.set_xlabel("Sample") - axes.append(ax3) - ax3.plot(x, midranges, **kwds) - ax4 = fig.add_subplot(2, 3, 4) - ax4.set_xlabel("Mean") - axes.append(ax4) - ax4.hist(means, **kwds) - ax5 = fig.add_subplot(2, 3, 5) - ax5.set_xlabel("Median") - axes.append(ax5) - ax5.hist(medians, **kwds) - ax6 = fig.add_subplot(2, 3, 6) - ax6.set_xlabel("Midrange") - axes.append(ax6) - ax6.hist(midranges, **kwds) - for axis in axes: - plt.setp(axis.get_xticklabels(), fontsize=8) - plt.setp(axis.get_yticklabels(), fontsize=8) - return fig - - -@deprecate_kwarg(old_arg_name='colors', new_arg_name='color') -@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3) -def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None, - use_columns=False, xticks=None, colormap=None, - axvlines=True, axvlines_kwds=None, sort_labels=False, - **kwds): - """Parallel coordinates plotting. - - Parameters - ---------- - frame: DataFrame - class_column: str - Column name containing class names - cols: list, optional - A list of column names to use - ax: matplotlib.axis, optional - matplotlib axis object - color: list or tuple, optional - Colors to use for the different classes - use_columns: bool, optional - If true, columns will be used as xticks - xticks: list or tuple, optional - A list of values to use for xticks - colormap: str or matplotlib colormap, default None - Colormap to use for line colors. - axvlines: bool, optional - If true, vertical lines will be added at each xtick - axvlines_kwds: keywords, optional - Options to be passed to axvline method for vertical lines - sort_labels: bool, False - Sort class_column labels, useful when assigning colours - - .. versionadded:: 0.20.0 - - kwds: keywords - Options to pass to matplotlib plotting method - - Returns - ------- - ax: matplotlib axis object - - Examples - -------- - >>> from pandas import read_csv - >>> from pandas.tools.plotting import parallel_coordinates - >>> from matplotlib import pyplot as plt - >>> df = read_csv('https://raw.github.com/pandas-dev/pandas/master' - '/pandas/tests/data/iris.csv') - >>> parallel_coordinates(df, 'Name', color=('#556270', - '#4ECDC4', '#C7F464')) - >>> plt.show() - """ - if axvlines_kwds is None: - axvlines_kwds = {'linewidth': 1, 'color': 'black'} - import matplotlib.pyplot as plt - - n = len(frame) - classes = frame[class_column].drop_duplicates() - class_col = frame[class_column] - - if cols is None: - df = frame.drop(class_column, axis=1) - else: - df = frame[cols] - - used_legends = set([]) - - ncols = len(df.columns) - - # determine values to use for xticks - if use_columns is True: - if not np.all(np.isreal(list(df.columns))): - raise ValueError('Columns must be numeric to be used as xticks') - x = df.columns - elif xticks is not None: - if not np.all(np.isreal(xticks)): - raise ValueError('xticks specified must be numeric') - elif len(xticks) != ncols: - raise ValueError('Length of xticks must match number of columns') - x = xticks - else: - x = lrange(ncols) - - if ax is None: - ax = plt.gca() - - color_values = _get_standard_colors(num_colors=len(classes), - colormap=colormap, color_type='random', - color=color) - - if sort_labels: - classes = sorted(classes) - color_values = sorted(color_values) - colors = dict(zip(classes, color_values)) - - for i in range(n): - y = df.iloc[i].values - kls = class_col.iat[i] - label = pprint_thing(kls) - if label not in used_legends: - used_legends.add(label) - ax.plot(x, y, color=colors[kls], label=label, **kwds) - else: - ax.plot(x, y, color=colors[kls], **kwds) - - if axvlines: - for i in x: - ax.axvline(i, **axvlines_kwds) - - ax.set_xticks(x) - ax.set_xticklabels(df.columns) - ax.set_xlim(x[0], x[-1]) - ax.legend(loc='upper right') - ax.grid() - return ax - - -def lag_plot(series, lag=1, ax=None, **kwds): - """Lag plot for time series. - - Parameters: - ----------- - series: Time series - lag: lag of the scatter plot, default 1 - ax: Matplotlib axis object, optional - kwds: Matplotlib scatter method keyword arguments, optional - - Returns: - -------- - ax: Matplotlib axis object - """ - import matplotlib.pyplot as plt - - # workaround because `c='b'` is hardcoded in matplotlibs scatter method - kwds.setdefault('c', plt.rcParams['patch.facecolor']) - - data = series.values - y1 = data[:-lag] - y2 = data[lag:] - if ax is None: - ax = plt.gca() - ax.set_xlabel("y(t)") - ax.set_ylabel("y(t + %s)" % lag) - ax.scatter(y1, y2, **kwds) - return ax - - -def autocorrelation_plot(series, ax=None, **kwds): - """Autocorrelation plot for time series. - - Parameters: - ----------- - series: Time series - ax: Matplotlib axis object, optional - kwds : keywords - Options to pass to matplotlib plotting method - - Returns: - ----------- - ax: Matplotlib axis object - """ - import matplotlib.pyplot as plt - n = len(series) - data = np.asarray(series) - if ax is None: - ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0)) - mean = np.mean(data) - c0 = np.sum((data - mean) ** 2) / float(n) - - def r(h): - return ((data[:n - h] - mean) * - (data[h:] - mean)).sum() / float(n) / c0 - x = np.arange(n) + 1 - y = lmap(r, x) - z95 = 1.959963984540054 - z99 = 2.5758293035489004 - ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey') - ax.axhline(y=z95 / np.sqrt(n), color='grey') - ax.axhline(y=0.0, color='black') - ax.axhline(y=-z95 / np.sqrt(n), color='grey') - ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey') - ax.set_xlabel("Lag") - ax.set_ylabel("Autocorrelation") - ax.plot(x, y, **kwds) - if 'label' in kwds: - ax.legend() - ax.grid() - return ax - - -class MPLPlot(object): - """ - Base class for assembling a pandas plot using matplotlib - - Parameters - ---------- - data : - - """ - - @property - def _kind(self): - """Specify kind str. Must be overridden in child class""" - raise NotImplementedError - - _layout_type = 'vertical' - _default_rot = 0 - orientation = None - _pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog', - 'mark_right', 'stacked'] - _attr_defaults = {'logy': False, 'logx': False, 'loglog': False, - 'mark_right': True, 'stacked': False} - - def __init__(self, data, kind=None, by=None, subplots=False, sharex=None, - sharey=False, use_index=True, - figsize=None, grid=None, legend=True, rot=None, - ax=None, fig=None, title=None, xlim=None, ylim=None, - xticks=None, yticks=None, - sort_columns=False, fontsize=None, - secondary_y=False, colormap=None, - table=False, layout=None, **kwds): - - self.data = data - self.by = by - - self.kind = kind - - self.sort_columns = sort_columns - - self.subplots = subplots - - if sharex is None: - if ax is None: - self.sharex = True - else: - # if we get an axis, the users should do the visibility - # setting... - self.sharex = False - else: - self.sharex = sharex - - self.sharey = sharey - self.figsize = figsize - self.layout = layout - - self.xticks = xticks - self.yticks = yticks - self.xlim = xlim - self.ylim = ylim - self.title = title - self.use_index = use_index - - self.fontsize = fontsize - - if rot is not None: - self.rot = rot - # need to know for format_date_labels since it's rotated to 30 by - # default - self._rot_set = True - else: - self._rot_set = False - self.rot = self._default_rot - - if grid is None: - grid = False if secondary_y else self.plt.rcParams['axes.grid'] - - self.grid = grid - self.legend = legend - self.legend_handles = [] - self.legend_labels = [] - - for attr in self._pop_attributes: - value = kwds.pop(attr, self._attr_defaults.get(attr, None)) - setattr(self, attr, value) - - self.ax = ax - self.fig = fig - self.axes = None - - # parse errorbar input if given - xerr = kwds.pop('xerr', None) - yerr = kwds.pop('yerr', None) - self.errors = {} - for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]): - self.errors[kw] = self._parse_errorbars(kw, err) - - if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)): - secondary_y = [secondary_y] - self.secondary_y = secondary_y - - # ugly TypeError if user passes matplotlib's `cmap` name. - # Probably better to accept either. - if 'cmap' in kwds and colormap: - raise TypeError("Only specify one of `cmap` and `colormap`.") - elif 'cmap' in kwds: - self.colormap = kwds.pop('cmap') - else: - self.colormap = colormap - - self.table = table - - self.kwds = kwds - - self._validate_color_args() - - def _validate_color_args(self): - if 'color' not in self.kwds and 'colors' in self.kwds: - warnings.warn(("'colors' is being deprecated. Please use 'color'" - "instead of 'colors'")) - colors = self.kwds.pop('colors') - self.kwds['color'] = colors - - if ('color' in self.kwds and self.nseries == 1): - # support series.plot(color='green') - self.kwds['color'] = [self.kwds['color']] - - if ('color' in self.kwds or 'colors' in self.kwds) and \ - self.colormap is not None: - warnings.warn("'color' and 'colormap' cannot be used " - "simultaneously. Using 'color'") - - if 'color' in self.kwds and self.style is not None: - if is_list_like(self.style): - styles = self.style - else: - styles = [self.style] - # need only a single match - for s in styles: - if re.match('^[a-z]+?', s) is not None: - raise ValueError( - "Cannot pass 'style' string with a color " - "symbol and 'color' keyword argument. Please" - " use one or the other or pass 'style' " - "without a color symbol") - - def _iter_data(self, data=None, keep_index=False, fillna=None): - if data is None: - data = self.data - if fillna is not None: - data = data.fillna(fillna) - - # TODO: unused? - # if self.sort_columns: - # columns = _try_sort(data.columns) - # else: - # columns = data.columns - - for col, values in data.iteritems(): - if keep_index is True: - yield col, values - else: - yield col, values.values - - @property - def nseries(self): - if self.data.ndim == 1: - return 1 - else: - return self.data.shape[1] - - def draw(self): - self.plt.draw_if_interactive() - - def generate(self): - self._args_adjust() - self._compute_plot_data() - self._setup_subplots() - self._make_plot() - self._add_table() - self._make_legend() - self._adorn_subplots() - - for ax in self.axes: - self._post_plot_logic_common(ax, self.data) - self._post_plot_logic(ax, self.data) - - def _args_adjust(self): - pass - - def _has_plotted_object(self, ax): - """check whether ax has data""" - return (len(ax.lines) != 0 or - len(ax.artists) != 0 or - len(ax.containers) != 0) - - def _maybe_right_yaxis(self, ax, axes_num): - if not self.on_right(axes_num): - # secondary axes may be passed via ax kw - return self._get_ax_layer(ax) - - if hasattr(ax, 'right_ax'): - # if it has right_ax proparty, ``ax`` must be left axes - return ax.right_ax - elif hasattr(ax, 'left_ax'): - # if it has left_ax proparty, ``ax`` must be right axes - return ax - else: - # otherwise, create twin axes - orig_ax, new_ax = ax, ax.twinx() - # TODO: use Matplotlib public API when available - new_ax._get_lines = orig_ax._get_lines - new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill - orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax - - if not self._has_plotted_object(orig_ax): # no data on left y - orig_ax.get_yaxis().set_visible(False) - return new_ax - - def _setup_subplots(self): - if self.subplots: - fig, axes = _subplots(naxes=self.nseries, - sharex=self.sharex, sharey=self.sharey, - figsize=self.figsize, ax=self.ax, - layout=self.layout, - layout_type=self._layout_type) - else: - if self.ax is None: - fig = self.plt.figure(figsize=self.figsize) - axes = fig.add_subplot(111) - else: - fig = self.ax.get_figure() - if self.figsize is not None: - fig.set_size_inches(self.figsize) - axes = self.ax - - axes = _flatten(axes) - - if self.logx or self.loglog: - [a.set_xscale('log') for a in axes] - if self.logy or self.loglog: - [a.set_yscale('log') for a in axes] - - self.fig = fig - self.axes = axes - - @property - def result(self): - """ - Return result axes - """ - if self.subplots: - if self.layout is not None and not is_list_like(self.ax): - return self.axes.reshape(*self.layout) - else: - return self.axes - else: - sec_true = isinstance(self.secondary_y, bool) and self.secondary_y - all_sec = (is_list_like(self.secondary_y) and - len(self.secondary_y) == self.nseries) - if (sec_true or all_sec): - # if all data is plotted on secondary, return right axes - return self._get_ax_layer(self.axes[0], primary=False) - else: - return self.axes[0] - - def _compute_plot_data(self): - data = self.data - - if isinstance(data, Series): - label = self.label - if label is None and data.name is None: - label = 'None' - data = data.to_frame(name=label) - - numeric_data = data._convert(datetime=True)._get_numeric_data() - - try: - is_empty = numeric_data.empty - except AttributeError: - is_empty = not len(numeric_data) - - # no empty frames or series allowed - if is_empty: - raise TypeError('Empty {0!r}: no numeric data to ' - 'plot'.format(numeric_data.__class__.__name__)) - - self.data = numeric_data - - def _make_plot(self): - raise AbstractMethodError(self) - - def _add_table(self): - if self.table is False: - return - elif self.table is True: - data = self.data.transpose() - else: - data = self.table - ax = self._get_ax(0) - table(ax, data) - - def _post_plot_logic_common(self, ax, data): - """Common post process for each axes""" - labels = [pprint_thing(key) for key in data.index] - labels = dict(zip(range(len(data.index)), labels)) - - if self.orientation == 'vertical' or self.orientation is None: - if self._need_to_set_index: - xticklabels = [labels.get(x, '') for x in ax.get_xticks()] - ax.set_xticklabels(xticklabels) - self._apply_axis_properties(ax.xaxis, rot=self.rot, - fontsize=self.fontsize) - self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize) - elif self.orientation == 'horizontal': - if self._need_to_set_index: - yticklabels = [labels.get(y, '') for y in ax.get_yticks()] - ax.set_yticklabels(yticklabels) - self._apply_axis_properties(ax.yaxis, rot=self.rot, - fontsize=self.fontsize) - self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize) - else: # pragma no cover - raise ValueError - - def _post_plot_logic(self, ax, data): - """Post process for each axes. Overridden in child classes""" - pass - - def _adorn_subplots(self): - """Common post process unrelated to data""" - if len(self.axes) > 0: - all_axes = self._get_subplots() - nrows, ncols = self._get_axes_layout() - _handle_shared_axes(axarr=all_axes, nplots=len(all_axes), - naxes=nrows * ncols, nrows=nrows, - ncols=ncols, sharex=self.sharex, - sharey=self.sharey) - - for ax in self.axes: - if self.yticks is not None: - ax.set_yticks(self.yticks) - - if self.xticks is not None: - ax.set_xticks(self.xticks) - - if self.ylim is not None: - ax.set_ylim(self.ylim) - - if self.xlim is not None: - ax.set_xlim(self.xlim) - - ax.grid(self.grid) - - if self.title: - if self.subplots: - if is_list_like(self.title): - if len(self.title) != self.nseries: - msg = ('The length of `title` must equal the number ' - 'of columns if using `title` of type `list` ' - 'and `subplots=True`.\n' - 'length of title = {}\n' - 'number of columns = {}').format( - len(self.title), self.nseries) - raise ValueError(msg) - - for (ax, title) in zip(self.axes, self.title): - ax.set_title(title) - else: - self.fig.suptitle(self.title) - else: - if is_list_like(self.title): - msg = ('Using `title` of type `list` is not supported ' - 'unless `subplots=True` is passed') - raise ValueError(msg) - self.axes[0].set_title(self.title) - - def _apply_axis_properties(self, axis, rot=None, fontsize=None): - labels = axis.get_majorticklabels() + axis.get_minorticklabels() - for label in labels: - if rot is not None: - label.set_rotation(rot) - if fontsize is not None: - label.set_fontsize(fontsize) - - @property - def legend_title(self): - if not isinstance(self.data.columns, MultiIndex): - name = self.data.columns.name - if name is not None: - name = pprint_thing(name) - return name - else: - stringified = map(pprint_thing, - self.data.columns.names) - return ','.join(stringified) - - def _add_legend_handle(self, handle, label, index=None): - if label is not None: - if self.mark_right and index is not None: - if self.on_right(index): - label = label + ' (right)' - self.legend_handles.append(handle) - self.legend_labels.append(label) - - def _make_legend(self): - ax, leg = self._get_ax_legend(self.axes[0]) - - handles = [] - labels = [] - title = '' - - if not self.subplots: - if leg is not None: - title = leg.get_title().get_text() - handles = leg.legendHandles - labels = [x.get_text() for x in leg.get_texts()] - - if self.legend: - if self.legend == 'reverse': - self.legend_handles = reversed(self.legend_handles) - self.legend_labels = reversed(self.legend_labels) - - handles += self.legend_handles - labels += self.legend_labels - if self.legend_title is not None: - title = self.legend_title - - if len(handles) > 0: - ax.legend(handles, labels, loc='best', title=title) - - elif self.subplots and self.legend: - for ax in self.axes: - if ax.get_visible(): - ax.legend(loc='best') - - def _get_ax_legend(self, ax): - leg = ax.get_legend() - other_ax = (getattr(ax, 'left_ax', None) or - getattr(ax, 'right_ax', None)) - other_leg = None - if other_ax is not None: - other_leg = other_ax.get_legend() - if leg is None and other_leg is not None: - leg = other_leg - ax = other_ax - return ax, leg - - @cache_readonly - def plt(self): - import matplotlib.pyplot as plt - return plt - - @staticmethod - def mpl_ge_1_3_1(): - return _mpl_ge_1_3_1() - - @staticmethod - def mpl_ge_1_5_0(): - return _mpl_ge_1_5_0() - - _need_to_set_index = False - - def _get_xticks(self, convert_period=False): - index = self.data.index - is_datetype = index.inferred_type in ('datetime', 'date', - 'datetime64', 'time') - - if self.use_index: - if convert_period and isinstance(index, PeriodIndex): - self.data = self.data.reindex(index=index.sort_values()) - x = self.data.index.to_timestamp()._mpl_repr() - elif index.is_numeric(): - """ - Matplotlib supports numeric values or datetime objects as - xaxis values. Taking LBYL approach here, by the time - matplotlib raises exception when using non numeric/datetime - values for xaxis, several actions are already taken by plt. - """ - x = index._mpl_repr() - elif is_datetype: - self.data = self.data.sort_index() - x = self.data.index._mpl_repr() - else: - self._need_to_set_index = True - x = lrange(len(index)) - else: - x = lrange(len(index)) - - return x - - @classmethod - def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): - mask = isnull(y) - if mask.any(): - y = np.ma.array(y) - y = np.ma.masked_where(mask, y) - - if isinstance(x, Index): - x = x._mpl_repr() - - if is_errorbar: - if 'xerr' in kwds: - kwds['xerr'] = np.array(kwds.get('xerr')) - if 'yerr' in kwds: - kwds['yerr'] = np.array(kwds.get('yerr')) - return ax.errorbar(x, y, **kwds) - else: - # prevent style kwarg from going to errorbar, where it is - # unsupported - if style is not None: - args = (x, y, style) - else: - args = (x, y) - return ax.plot(*args, **kwds) - - def _get_index_name(self): - if isinstance(self.data.index, MultiIndex): - name = self.data.index.names - if any(x is not None for x in name): - name = ','.join([pprint_thing(x) for x in name]) - else: - name = None - else: - name = self.data.index.name - if name is not None: - name = pprint_thing(name) - - return name - - @classmethod - def _get_ax_layer(cls, ax, primary=True): - """get left (primary) or right (secondary) axes""" - if primary: - return getattr(ax, 'left_ax', ax) - else: - return getattr(ax, 'right_ax', ax) - - def _get_ax(self, i): - # get the twinx ax if appropriate - if self.subplots: - ax = self.axes[i] - ax = self._maybe_right_yaxis(ax, i) - self.axes[i] = ax - else: - ax = self.axes[0] - ax = self._maybe_right_yaxis(ax, i) - - ax.get_yaxis().set_visible(True) - return ax - - def on_right(self, i): - if isinstance(self.secondary_y, bool): - return self.secondary_y - - if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)): - return self.data.columns[i] in self.secondary_y - - def _apply_style_colors(self, colors, kwds, col_num, label): - """ - Manage style and color based on column number and its label. - Returns tuple of appropriate style and kwds which "color" may be added. - """ - style = None - if self.style is not None: - if isinstance(self.style, list): - try: - style = self.style[col_num] - except IndexError: - pass - elif isinstance(self.style, dict): - style = self.style.get(label, style) - else: - style = self.style - - has_color = 'color' in kwds or self.colormap is not None - nocolor_style = style is None or re.match('[a-z]+', style) is None - if (has_color or self.subplots) and nocolor_style: - kwds['color'] = colors[col_num % len(colors)] - return style, kwds - - def _get_colors(self, num_colors=None, color_kwds='color'): - if num_colors is None: - num_colors = self.nseries - - return _get_standard_colors(num_colors=num_colors, - colormap=self.colormap, - color=self.kwds.get(color_kwds)) - - def _parse_errorbars(self, label, err): - """ - Look for error keyword arguments and return the actual errorbar data - or return the error DataFrame/dict - - Error bars can be specified in several ways: - Series: the user provides a pandas.Series object of the same - length as the data - ndarray: provides a np.ndarray of the same length as the data - DataFrame/dict: error values are paired with keys matching the - key in the plotted DataFrame - str: the name of the column within the plotted DataFrame - """ - - if err is None: - return None - - from pandas import DataFrame, Series - - def match_labels(data, e): - e = e.reindex_axis(data.index) - return e - - # key-matched DataFrame - if isinstance(err, DataFrame): - - err = match_labels(self.data, err) - # key-matched dict - elif isinstance(err, dict): - pass - - # Series of error values - elif isinstance(err, Series): - # broadcast error series across data - err = match_labels(self.data, err) - err = np.atleast_2d(err) - err = np.tile(err, (self.nseries, 1)) - - # errors are a column in the dataframe - elif isinstance(err, string_types): - evalues = self.data[err].values - self.data = self.data[self.data.columns.drop(err)] - err = np.atleast_2d(evalues) - err = np.tile(err, (self.nseries, 1)) - - elif is_list_like(err): - if is_iterator(err): - err = np.atleast_2d(list(err)) - else: - # raw error values - err = np.atleast_2d(err) - - err_shape = err.shape - - # asymmetrical error bars - if err.ndim == 3: - if (err_shape[0] != self.nseries) or \ - (err_shape[1] != 2) or \ - (err_shape[2] != len(self.data)): - msg = "Asymmetrical error bars should be provided " + \ - "with the shape (%u, 2, %u)" % \ - (self.nseries, len(self.data)) - raise ValueError(msg) - - # broadcast errors to each data series - if len(err) == 1: - err = np.tile(err, (self.nseries, 1)) - - elif is_number(err): - err = np.tile([err], (self.nseries, len(self.data))) - - else: - msg = "No valid %s detected" % label - raise ValueError(msg) - - return err - - def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True): - from pandas import DataFrame - errors = {} - - for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]): - if flag: - err = self.errors[kw] - # user provided label-matched dataframe of errors - if isinstance(err, (DataFrame, dict)): - if label is not None and label in err.keys(): - err = err[label] - else: - err = None - elif index is not None and err is not None: - err = err[index] - - if err is not None: - errors[kw] = err - return errors - - def _get_subplots(self): - from matplotlib.axes import Subplot - return [ax for ax in self.axes[0].get_figure().get_axes() - if isinstance(ax, Subplot)] - - def _get_axes_layout(self): - axes = self._get_subplots() - x_set = set() - y_set = set() - for ax in axes: - # check axes coordinates to estimate layout - points = ax.get_position().get_points() - x_set.add(points[0][0]) - y_set.add(points[0][1]) - return (len(y_set), len(x_set)) - - -class PlanePlot(MPLPlot): - """ - Abstract class for plotting on plane, currently scatter and hexbin. - """ - - _layout_type = 'single' - - def __init__(self, data, x, y, **kwargs): - MPLPlot.__init__(self, data, **kwargs) - if x is None or y is None: - raise ValueError(self._kind + ' requires and x and y column') - if is_integer(x) and not self.data.columns.holds_integer(): - x = self.data.columns[x] - if is_integer(y) and not self.data.columns.holds_integer(): - y = self.data.columns[y] - self.x = x - self.y = y - - @property - def nseries(self): - return 1 - - def _post_plot_logic(self, ax, data): - x, y = self.x, self.y - ax.set_ylabel(pprint_thing(y)) - ax.set_xlabel(pprint_thing(x)) - - -class ScatterPlot(PlanePlot): - _kind = 'scatter' - - def __init__(self, data, x, y, s=None, c=None, **kwargs): - if s is None: - # hide the matplotlib default for size, in case we want to change - # the handling of this argument later - s = 20 - super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs) - if is_integer(c) and not self.data.columns.holds_integer(): - c = self.data.columns[c] - self.c = c - - def _make_plot(self): - x, y, c, data = self.x, self.y, self.c, self.data - ax = self.axes[0] - - c_is_column = is_hashable(c) and c in self.data.columns - - # plot a colorbar only if a colormap is provided or necessary - cb = self.kwds.pop('colorbar', self.colormap or c_is_column) - - # pandas uses colormap, matplotlib uses cmap. - cmap = self.colormap or 'Greys' - cmap = self.plt.cm.get_cmap(cmap) - color = self.kwds.pop("color", None) - if c is not None and color is not None: - raise TypeError('Specify exactly one of `c` and `color`') - elif c is None and color is None: - c_values = self.plt.rcParams['patch.facecolor'] - elif color is not None: - c_values = color - elif c_is_column: - c_values = self.data[c].values - else: - c_values = c - - if self.legend and hasattr(self, 'label'): - label = self.label - else: - label = None - scatter = ax.scatter(data[x].values, data[y].values, c=c_values, - label=label, cmap=cmap, **self.kwds) - if cb: - img = ax.collections[0] - kws = dict(ax=ax) - if self.mpl_ge_1_3_1(): - kws['label'] = c if c_is_column else '' - self.fig.colorbar(img, **kws) - - if label is not None: - self._add_legend_handle(scatter, label) - else: - self.legend = False - - errors_x = self._get_errorbars(label=x, index=0, yerr=False) - errors_y = self._get_errorbars(label=y, index=0, xerr=False) - if len(errors_x) > 0 or len(errors_y) > 0: - err_kwds = dict(errors_x, **errors_y) - err_kwds['ecolor'] = scatter.get_facecolor()[0] - ax.errorbar(data[x].values, data[y].values, - linestyle='none', **err_kwds) - - -class HexBinPlot(PlanePlot): - _kind = 'hexbin' - - def __init__(self, data, x, y, C=None, **kwargs): - super(HexBinPlot, self).__init__(data, x, y, **kwargs) - if is_integer(C) and not self.data.columns.holds_integer(): - C = self.data.columns[C] - self.C = C - - def _make_plot(self): - x, y, data, C = self.x, self.y, self.data, self.C - ax = self.axes[0] - # pandas uses colormap, matplotlib uses cmap. - cmap = self.colormap or 'BuGn' - cmap = self.plt.cm.get_cmap(cmap) - cb = self.kwds.pop('colorbar', True) - - if C is None: - c_values = None - else: - c_values = data[C].values - - ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap, - **self.kwds) - if cb: - img = ax.collections[0] - self.fig.colorbar(img, ax=ax) - - def _make_legend(self): - pass - - -class LinePlot(MPLPlot): - _kind = 'line' - _default_rot = 0 - orientation = 'vertical' - - def __init__(self, data, **kwargs): - MPLPlot.__init__(self, data, **kwargs) - if self.stacked: - self.data = self.data.fillna(value=0) - self.x_compat = plot_params['x_compat'] - if 'x_compat' in self.kwds: - self.x_compat = bool(self.kwds.pop('x_compat')) - - def _is_ts_plot(self): - # this is slightly deceptive - return not self.x_compat and self.use_index and self._use_dynamic_x() - - def _use_dynamic_x(self): - from pandas.tseries.plotting import _use_dynamic_x - return _use_dynamic_x(self._get_ax(0), self.data) - - def _make_plot(self): - if self._is_ts_plot(): - from pandas.tseries.plotting import _maybe_convert_index - data = _maybe_convert_index(self._get_ax(0), self.data) - - x = data.index # dummy, not used - plotf = self._ts_plot - it = self._iter_data(data=data, keep_index=True) - else: - x = self._get_xticks(convert_period=True) - plotf = self._plot - it = self._iter_data() - - stacking_id = self._get_stacking_id() - is_errorbar = any(e is not None for e in self.errors.values()) - - colors = self._get_colors() - for i, (label, y) in enumerate(it): - ax = self._get_ax(i) - kwds = self.kwds.copy() - style, kwds = self._apply_style_colors(colors, kwds, i, label) - - errors = self._get_errorbars(label=label, index=i) - kwds = dict(kwds, **errors) - - label = pprint_thing(label) # .encode('utf-8') - kwds['label'] = label - - newlines = plotf(ax, x, y, style=style, column_num=i, - stacking_id=stacking_id, - is_errorbar=is_errorbar, - **kwds) - self._add_legend_handle(newlines[0], label, index=i) - - lines = _get_all_lines(ax) - left, right = _get_xlim(lines) - ax.set_xlim(left, right) - - @classmethod - def _plot(cls, ax, x, y, style=None, column_num=None, - stacking_id=None, **kwds): - # column_num is used to get the target column from protf in line and - # area plots - if column_num == 0: - cls._initialize_stacker(ax, stacking_id, len(y)) - y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label']) - lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds) - cls._update_stacker(ax, stacking_id, y) - return lines - - @classmethod - def _ts_plot(cls, ax, x, data, style=None, **kwds): - from pandas.tseries.plotting import (_maybe_resample, - _decorate_axes, - format_dateaxis) - # accept x to be consistent with normal plot func, - # x is not passed to tsplot as it uses data.index as x coordinate - # column_num must be in kwds for stacking purpose - freq, data = _maybe_resample(data, ax, kwds) - - # Set ax with freq info - _decorate_axes(ax, freq, kwds) - # digging deeper - if hasattr(ax, 'left_ax'): - _decorate_axes(ax.left_ax, freq, kwds) - if hasattr(ax, 'right_ax'): - _decorate_axes(ax.right_ax, freq, kwds) - ax._plot_data.append((data, cls._kind, kwds)) - - lines = cls._plot(ax, data.index, data.values, style=style, **kwds) - # set date formatter, locators and rescale limits - format_dateaxis(ax, ax.freq, data.index) - return lines - - def _get_stacking_id(self): - if self.stacked: - return id(self.data) - else: - return None - - @classmethod - def _initialize_stacker(cls, ax, stacking_id, n): - if stacking_id is None: - return - if not hasattr(ax, '_stacker_pos_prior'): - ax._stacker_pos_prior = {} - if not hasattr(ax, '_stacker_neg_prior'): - ax._stacker_neg_prior = {} - ax._stacker_pos_prior[stacking_id] = np.zeros(n) - ax._stacker_neg_prior[stacking_id] = np.zeros(n) - - @classmethod - def _get_stacked_values(cls, ax, stacking_id, values, label): - if stacking_id is None: - return values - if not hasattr(ax, '_stacker_pos_prior'): - # stacker may not be initialized for subplots - cls._initialize_stacker(ax, stacking_id, len(values)) - - if (values >= 0).all(): - return ax._stacker_pos_prior[stacking_id] + values - elif (values <= 0).all(): - return ax._stacker_neg_prior[stacking_id] + values - - raise ValueError('When stacked is True, each column must be either ' - 'all positive or negative.' - '{0} contains both positive and negative values' - .format(label)) - - @classmethod - def _update_stacker(cls, ax, stacking_id, values): - if stacking_id is None: - return - if (values >= 0).all(): - ax._stacker_pos_prior[stacking_id] += values - elif (values <= 0).all(): - ax._stacker_neg_prior[stacking_id] += values - - def _post_plot_logic(self, ax, data): - condition = (not self._use_dynamic_x() and - data.index.is_all_dates and - not self.subplots or - (self.subplots and self.sharex)) - - index_name = self._get_index_name() - - if condition: - # irregular TS rotated 30 deg. by default - # probably a better place to check / set this. - if not self._rot_set: - self.rot = 30 - format_date_labels(ax, rot=self.rot) - - if index_name is not None and self.use_index: - ax.set_xlabel(index_name) - - -class AreaPlot(LinePlot): - _kind = 'area' - - def __init__(self, data, **kwargs): - kwargs.setdefault('stacked', True) - data = data.fillna(value=0) - LinePlot.__init__(self, data, **kwargs) - - if not self.stacked: - # use smaller alpha to distinguish overlap - self.kwds.setdefault('alpha', 0.5) - - if self.logy or self.loglog: - raise ValueError("Log-y scales are not supported in area plot") - - @classmethod - def _plot(cls, ax, x, y, style=None, column_num=None, - stacking_id=None, is_errorbar=False, **kwds): - - if column_num == 0: - cls._initialize_stacker(ax, stacking_id, len(y)) - y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label']) - - # need to remove label, because subplots uses mpl legend as it is - line_kwds = kwds.copy() - if cls.mpl_ge_1_5_0(): - line_kwds.pop('label') - lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds) - - # get data from the line to get coordinates for fill_between - xdata, y_values = lines[0].get_data(orig=False) - - # unable to use ``_get_stacked_values`` here to get starting point - if stacking_id is None: - start = np.zeros(len(y)) - elif (y >= 0).all(): - start = ax._stacker_pos_prior[stacking_id] - elif (y <= 0).all(): - start = ax._stacker_neg_prior[stacking_id] - else: - start = np.zeros(len(y)) - - if 'color' not in kwds: - kwds['color'] = lines[0].get_color() - - rect = ax.fill_between(xdata, start, y_values, **kwds) - cls._update_stacker(ax, stacking_id, y) - - # LinePlot expects list of artists - res = [rect] if cls.mpl_ge_1_5_0() else lines - return res - - def _add_legend_handle(self, handle, label, index=None): - if not self.mpl_ge_1_5_0(): - from matplotlib.patches import Rectangle - # Because fill_between isn't supported in legend, - # specifically add Rectangle handle here - alpha = self.kwds.get('alpha', None) - handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), - alpha=alpha) - LinePlot._add_legend_handle(self, handle, label, index=index) - - def _post_plot_logic(self, ax, data): - LinePlot._post_plot_logic(self, ax, data) - - if self.ylim is None: - if (data >= 0).all().all(): - ax.set_ylim(0, None) - elif (data <= 0).all().all(): - ax.set_ylim(None, 0) - - -class BarPlot(MPLPlot): - _kind = 'bar' - _default_rot = 90 - orientation = 'vertical' - - def __init__(self, data, **kwargs): - self.bar_width = kwargs.pop('width', 0.5) - pos = kwargs.pop('position', 0.5) - kwargs.setdefault('align', 'center') - self.tick_pos = np.arange(len(data)) - - self.bottom = kwargs.pop('bottom', 0) - self.left = kwargs.pop('left', 0) - - self.log = kwargs.pop('log', False) - MPLPlot.__init__(self, data, **kwargs) - - if self.stacked or self.subplots: - self.tickoffset = self.bar_width * pos - if kwargs['align'] == 'edge': - self.lim_offset = self.bar_width / 2 - else: - self.lim_offset = 0 - else: - if kwargs['align'] == 'edge': - w = self.bar_width / self.nseries - self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5 - self.lim_offset = w * 0.5 - else: - self.tickoffset = self.bar_width * pos - self.lim_offset = 0 - - self.ax_pos = self.tick_pos - self.tickoffset - - def _args_adjust(self): - if is_list_like(self.bottom): - self.bottom = np.array(self.bottom) - if is_list_like(self.left): - self.left = np.array(self.left) - - @classmethod - def _plot(cls, ax, x, y, w, start=0, log=False, **kwds): - return ax.bar(x, y, w, bottom=start, log=log, **kwds) - - @property - def _start_base(self): - return self.bottom - - def _make_plot(self): - import matplotlib as mpl - - colors = self._get_colors() - ncolors = len(colors) - - pos_prior = neg_prior = np.zeros(len(self.data)) - K = self.nseries - - for i, (label, y) in enumerate(self._iter_data(fillna=0)): - ax = self._get_ax(i) - kwds = self.kwds.copy() - kwds['color'] = colors[i % ncolors] - - errors = self._get_errorbars(label=label, index=i) - kwds = dict(kwds, **errors) - - label = pprint_thing(label) - - if (('yerr' in kwds) or ('xerr' in kwds)) \ - and (kwds.get('ecolor') is None): - kwds['ecolor'] = mpl.rcParams['xtick.color'] - - start = 0 - if self.log and (y >= 1).all(): - start = 1 - start = start + self._start_base - - if self.subplots: - w = self.bar_width / 2 - rect = self._plot(ax, self.ax_pos + w, y, self.bar_width, - start=start, label=label, - log=self.log, **kwds) - ax.set_title(label) - elif self.stacked: - mask = y > 0 - start = np.where(mask, pos_prior, neg_prior) + self._start_base - w = self.bar_width / 2 - rect = self._plot(ax, self.ax_pos + w, y, self.bar_width, - start=start, label=label, - log=self.log, **kwds) - pos_prior = pos_prior + np.where(mask, y, 0) - neg_prior = neg_prior + np.where(mask, 0, y) - else: - w = self.bar_width / K - rect = self._plot(ax, self.ax_pos + (i + 0.5) * w, y, w, - start=start, label=label, - log=self.log, **kwds) - self._add_legend_handle(rect, label, index=i) - - def _post_plot_logic(self, ax, data): - if self.use_index: - str_index = [pprint_thing(key) for key in data.index] - else: - str_index = [pprint_thing(key) for key in range(data.shape[0])] - name = self._get_index_name() - - s_edge = self.ax_pos[0] - 0.25 + self.lim_offset - e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset - - self._decorate_ticks(ax, name, str_index, s_edge, e_edge) - - def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge): - ax.set_xlim((start_edge, end_edge)) - ax.set_xticks(self.tick_pos) - ax.set_xticklabels(ticklabels) - if name is not None and self.use_index: - ax.set_xlabel(name) - - -class BarhPlot(BarPlot): - _kind = 'barh' - _default_rot = 0 - orientation = 'horizontal' - - @property - def _start_base(self): - return self.left - - @classmethod - def _plot(cls, ax, x, y, w, start=0, log=False, **kwds): - return ax.barh(x, y, w, left=start, log=log, **kwds) - - def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge): - # horizontal bars - ax.set_ylim((start_edge, end_edge)) - ax.set_yticks(self.tick_pos) - ax.set_yticklabels(ticklabels) - if name is not None and self.use_index: - ax.set_ylabel(name) - - -class HistPlot(LinePlot): - _kind = 'hist' - - def __init__(self, data, bins=10, bottom=0, **kwargs): - self.bins = bins # use mpl default - self.bottom = bottom - # Do not call LinePlot.__init__ which may fill nan - MPLPlot.__init__(self, data, **kwargs) - - def _args_adjust(self): - if is_integer(self.bins): - # create common bin edge - values = (self.data._convert(datetime=True)._get_numeric_data()) - values = np.ravel(values) - values = values[~isnull(values)] - - hist, self.bins = np.histogram( - values, bins=self.bins, - range=self.kwds.get('range', None), - weights=self.kwds.get('weights', None)) - - if is_list_like(self.bottom): - self.bottom = np.array(self.bottom) - - @classmethod - def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, - stacking_id=None, **kwds): - if column_num == 0: - cls._initialize_stacker(ax, stacking_id, len(bins) - 1) - y = y[~isnull(y)] - - base = np.zeros(len(bins) - 1) - bottom = bottom + \ - cls._get_stacked_values(ax, stacking_id, base, kwds['label']) - # ignore style - n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds) - cls._update_stacker(ax, stacking_id, n) - return patches - - def _make_plot(self): - colors = self._get_colors() - stacking_id = self._get_stacking_id() - - for i, (label, y) in enumerate(self._iter_data()): - ax = self._get_ax(i) - - kwds = self.kwds.copy() - - label = pprint_thing(label) - kwds['label'] = label - - style, kwds = self._apply_style_colors(colors, kwds, i, label) - if style is not None: - kwds['style'] = style - - kwds = self._make_plot_keywords(kwds, y) - artists = self._plot(ax, y, column_num=i, - stacking_id=stacking_id, **kwds) - self._add_legend_handle(artists[0], label, index=i) - - def _make_plot_keywords(self, kwds, y): - """merge BoxPlot/KdePlot properties to passed kwds""" - # y is required for KdePlot - kwds['bottom'] = self.bottom - kwds['bins'] = self.bins - return kwds - - def _post_plot_logic(self, ax, data): - if self.orientation == 'horizontal': - ax.set_xlabel('Frequency') - else: - ax.set_ylabel('Frequency') - - @property - def orientation(self): - if self.kwds.get('orientation', None) == 'horizontal': - return 'horizontal' - else: - return 'vertical' - - -class KdePlot(HistPlot): - _kind = 'kde' - orientation = 'vertical' - - def __init__(self, data, bw_method=None, ind=None, **kwargs): - MPLPlot.__init__(self, data, **kwargs) - self.bw_method = bw_method - self.ind = ind - - def _args_adjust(self): - pass - - def _get_ind(self, y): - if self.ind is None: - # np.nanmax() and np.nanmin() ignores the missing values - sample_range = np.nanmax(y) - np.nanmin(y) - ind = np.linspace(np.nanmin(y) - 0.5 * sample_range, - np.nanmax(y) + 0.5 * sample_range, 1000) - else: - ind = self.ind - return ind - - @classmethod - def _plot(cls, ax, y, style=None, bw_method=None, ind=None, - column_num=None, stacking_id=None, **kwds): - from scipy.stats import gaussian_kde - from scipy import __version__ as spv - - y = remove_na(y) - - if LooseVersion(spv) >= '0.11.0': - gkde = gaussian_kde(y, bw_method=bw_method) - else: - gkde = gaussian_kde(y) - if bw_method is not None: - msg = ('bw_method was added in Scipy 0.11.0.' + - ' Scipy version in use is %s.' % spv) - warnings.warn(msg) - - y = gkde.evaluate(ind) - lines = MPLPlot._plot(ax, ind, y, style=style, **kwds) - return lines - - def _make_plot_keywords(self, kwds, y): - kwds['bw_method'] = self.bw_method - kwds['ind'] = self._get_ind(y) - return kwds - - def _post_plot_logic(self, ax, data): - ax.set_ylabel('Density') - - -class PiePlot(MPLPlot): - _kind = 'pie' - _layout_type = 'horizontal' - - def __init__(self, data, kind=None, **kwargs): - data = data.fillna(value=0) - if (data < 0).any().any(): - raise ValueError("{0} doesn't allow negative values".format(kind)) - MPLPlot.__init__(self, data, kind=kind, **kwargs) - - def _args_adjust(self): - self.grid = False - self.logy = False - self.logx = False - self.loglog = False - - def _validate_color_args(self): - pass - - def _make_plot(self): - colors = self._get_colors( - num_colors=len(self.data), color_kwds='colors') - self.kwds.setdefault('colors', colors) - - for i, (label, y) in enumerate(self._iter_data()): - ax = self._get_ax(i) - if label is not None: - label = pprint_thing(label) - ax.set_ylabel(label) - - kwds = self.kwds.copy() - - def blank_labeler(label, value): - if value == 0: - return '' - else: - return label - - idx = [pprint_thing(v) for v in self.data.index] - labels = kwds.pop('labels', idx) - # labels is used for each wedge's labels - # Blank out labels for values of 0 so they don't overlap - # with nonzero wedges - if labels is not None: - blabels = [blank_labeler(l, value) for - l, value in zip(labels, y)] - else: - blabels = None - results = ax.pie(y, labels=blabels, **kwds) - - if kwds.get('autopct', None) is not None: - patches, texts, autotexts = results - else: - patches, texts = results - autotexts = [] - - if self.fontsize is not None: - for t in texts + autotexts: - t.set_fontsize(self.fontsize) - - # leglabels is used for legend labels - leglabels = labels if labels is not None else idx - for p, l in zip(patches, leglabels): - self._add_legend_handle(p, l) - - -class BoxPlot(LinePlot): - _kind = 'box' - _layout_type = 'horizontal' - - _valid_return_types = (None, 'axes', 'dict', 'both') - # namedtuple to hold results - BP = namedtuple("Boxplot", ['ax', 'lines']) - - def __init__(self, data, return_type='axes', **kwargs): - # Do not call LinePlot.__init__ which may fill nan - if return_type not in self._valid_return_types: - raise ValueError( - "return_type must be {None, 'axes', 'dict', 'both'}") - - self.return_type = return_type - MPLPlot.__init__(self, data, **kwargs) - - def _args_adjust(self): - if self.subplots: - # Disable label ax sharing. Otherwise, all subplots shows last - # column label - if self.orientation == 'vertical': - self.sharex = False - else: - self.sharey = False - - @classmethod - def _plot(cls, ax, y, column_num=None, return_type='axes', **kwds): - if y.ndim == 2: - y = [remove_na(v) for v in y] - # Boxplot fails with empty arrays, so need to add a NaN - # if any cols are empty - # GH 8181 - y = [v if v.size > 0 else np.array([np.nan]) for v in y] - else: - y = remove_na(y) - bp = ax.boxplot(y, **kwds) - - if return_type == 'dict': - return bp, bp - elif return_type == 'both': - return cls.BP(ax=ax, lines=bp), bp - else: - return ax, bp - - def _validate_color_args(self): - if 'color' in self.kwds: - if self.colormap is not None: - warnings.warn("'color' and 'colormap' cannot be used " - "simultaneously. Using 'color'") - self.color = self.kwds.pop('color') - - if isinstance(self.color, dict): - valid_keys = ['boxes', 'whiskers', 'medians', 'caps'] - for key, values in compat.iteritems(self.color): - if key not in valid_keys: - raise ValueError("color dict contains invalid " - "key '{0}' " - "The key must be either {1}" - .format(key, valid_keys)) - else: - self.color = None - - # get standard colors for default - colors = _get_standard_colors(num_colors=3, - colormap=self.colormap, - color=None) - # use 2 colors by default, for box/whisker and median - # flier colors isn't needed here - # because it can be specified by ``sym`` kw - self._boxes_c = colors[0] - self._whiskers_c = colors[0] - self._medians_c = colors[2] - self._caps_c = 'k' # mpl default - - def _get_colors(self, num_colors=None, color_kwds='color'): - pass - - def maybe_color_bp(self, bp): - if isinstance(self.color, dict): - boxes = self.color.get('boxes', self._boxes_c) - whiskers = self.color.get('whiskers', self._whiskers_c) - medians = self.color.get('medians', self._medians_c) - caps = self.color.get('caps', self._caps_c) - else: - # Other types are forwarded to matplotlib - # If None, use default colors - boxes = self.color or self._boxes_c - whiskers = self.color or self._whiskers_c - medians = self.color or self._medians_c - caps = self.color or self._caps_c - - from matplotlib.artist import setp - setp(bp['boxes'], color=boxes, alpha=1) - setp(bp['whiskers'], color=whiskers, alpha=1) - setp(bp['medians'], color=medians, alpha=1) - setp(bp['caps'], color=caps, alpha=1) - - def _make_plot(self): - if self.subplots: - self._return_obj = Series() - - for i, (label, y) in enumerate(self._iter_data()): - ax = self._get_ax(i) - kwds = self.kwds.copy() - - ret, bp = self._plot(ax, y, column_num=i, - return_type=self.return_type, **kwds) - self.maybe_color_bp(bp) - self._return_obj[label] = ret - - label = [pprint_thing(label)] - self._set_ticklabels(ax, label) - else: - y = self.data.values.T - ax = self._get_ax(0) - kwds = self.kwds.copy() - - ret, bp = self._plot(ax, y, column_num=0, - return_type=self.return_type, **kwds) - self.maybe_color_bp(bp) - self._return_obj = ret - - labels = [l for l, _ in self._iter_data()] - labels = [pprint_thing(l) for l in labels] - if not self.use_index: - labels = [pprint_thing(key) for key in range(len(labels))] - self._set_ticklabels(ax, labels) - - def _set_ticklabels(self, ax, labels): - if self.orientation == 'vertical': - ax.set_xticklabels(labels) - else: - ax.set_yticklabels(labels) - - def _make_legend(self): - pass - - def _post_plot_logic(self, ax, data): - pass - - @property - def orientation(self): - if self.kwds.get('vert', True): - return 'vertical' - else: - return 'horizontal' - - @property - def result(self): - if self.return_type is None: - return super(BoxPlot, self).result - else: - return self._return_obj - - -# kinds supported by both dataframe and series -_common_kinds = ['line', 'bar', 'barh', - 'kde', 'density', 'area', 'hist', 'box'] -# kinds supported by dataframe -_dataframe_kinds = ['scatter', 'hexbin'] -# kinds supported only by series or dataframe single column -_series_kinds = ['pie'] -_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds - -_klasses = [LinePlot, BarPlot, BarhPlot, KdePlot, HistPlot, BoxPlot, - ScatterPlot, HexBinPlot, AreaPlot, PiePlot] - -_plot_klass = {} -for klass in _klasses: - _plot_klass[klass._kind] = klass - - -def _plot(data, x=None, y=None, subplots=False, - ax=None, kind='line', **kwds): - kind = _get_standard_kind(kind.lower().strip()) - if kind in _all_kinds: - klass = _plot_klass[kind] - else: - raise ValueError("%r is not a valid plot kind" % kind) - - from pandas import DataFrame - if kind in _dataframe_kinds: - if isinstance(data, DataFrame): - plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax, - kind=kind, **kwds) - else: - raise ValueError("plot kind %r can only be used for data frames" - % kind) - - elif kind in _series_kinds: - if isinstance(data, DataFrame): - if y is None and subplots is False: - msg = "{0} requires either y column or 'subplots=True'" - raise ValueError(msg.format(kind)) - elif y is not None: - if is_integer(y) and not data.columns.holds_integer(): - y = data.columns[y] - # converted to series actually. copy to not modify - data = data[y].copy() - data.index.name = y - plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds) - else: - if isinstance(data, DataFrame): - if x is not None: - if is_integer(x) and not data.columns.holds_integer(): - x = data.columns[x] - data = data.set_index(x) - - if y is not None: - if is_integer(y) and not data.columns.holds_integer(): - y = data.columns[y] - label = kwds['label'] if 'label' in kwds else y - series = data[y].copy() # Don't modify - series.name = label - - for kw in ['xerr', 'yerr']: - if (kw in kwds) and \ - (isinstance(kwds[kw], string_types) or - is_integer(kwds[kw])): - try: - kwds[kw] = data[kwds[kw]] - except (IndexError, KeyError, TypeError): - pass - data = series - plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds) - - plot_obj.generate() - plot_obj.draw() - return plot_obj.result - - -df_kind = """- 'scatter' : scatter plot - - 'hexbin' : hexbin plot""" -series_kind = "" - -df_coord = """x : label or position, default None - y : label or position, default None - Allows plotting of one column versus another""" -series_coord = "" - -df_unique = """stacked : boolean, default False in line and - bar plots, and True in area plot. If True, create stacked plot. - sort_columns : boolean, default False - Sort column names to determine plot ordering - secondary_y : boolean or sequence, default False - Whether to plot on the secondary y-axis - If a list/tuple, which columns to plot on secondary y-axis""" -series_unique = """label : label argument to provide to plot - secondary_y : boolean or sequence of ints, default False - If True then y-axis will be on the right""" - -df_ax = """ax : matplotlib axes object, default None - subplots : boolean, default False - Make separate subplots for each column - sharex : boolean, default True if ax is None else False - In case subplots=True, share x axis and set some x axis labels to - invisible; defaults to True if ax is None otherwise False if an ax - is passed in; Be aware, that passing in both an ax and sharex=True - will alter all x axis labels for all axis in a figure! - sharey : boolean, default False - In case subplots=True, share y axis and set some y axis labels to - invisible - layout : tuple (optional) - (rows, columns) for the layout of subplots""" -series_ax = """ax : matplotlib axes object - If not passed, uses gca()""" - -df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe - column, the values of that column are used to color each point. - - If `kind` = 'hexbin', you can control the size of the bins with the - `gridsize` argument. By default, a histogram of the counts around each - `(x, y)` point is computed. You can specify alternative aggregations - by passing values to the `C` and `reduce_C_function` arguments. - `C` specifies the value at each `(x, y)` point and `reduce_C_function` - is a function of one argument that reduces all the values in a bin to - a single number (e.g. `mean`, `max`, `sum`, `std`).""" -series_note = "" - -_shared_doc_df_kwargs = dict(klass='DataFrame', klass_obj='df', - klass_kind=df_kind, klass_coord=df_coord, - klass_ax=df_ax, klass_unique=df_unique, - klass_note=df_note) -_shared_doc_series_kwargs = dict(klass='Series', klass_obj='s', - klass_kind=series_kind, - klass_coord=series_coord, klass_ax=series_ax, - klass_unique=series_unique, - klass_note=series_note) - -_shared_docs['plot'] = """ - Make plots of %(klass)s using matplotlib / pylab. - - *New in version 0.17.0:* Each plot kind has a corresponding method on the - ``%(klass)s.plot`` accessor: - ``%(klass_obj)s.plot(kind='line')`` is equivalent to - ``%(klass_obj)s.plot.line()``. - - Parameters - ---------- - data : %(klass)s - %(klass_coord)s - kind : str - - 'line' : line plot (default) - - 'bar' : vertical bar plot - - 'barh' : horizontal bar plot - - 'hist' : histogram - - 'box' : boxplot - - 'kde' : Kernel Density Estimation plot - - 'density' : same as 'kde' - - 'area' : area plot - - 'pie' : pie plot - %(klass_kind)s - %(klass_ax)s - figsize : a tuple (width, height) in inches - use_index : boolean, default True - Use index as ticks for x axis - title : string or list - Title to use for the plot. If a string is passed, print the string at - the top of the figure. If a list is passed and `subplots` is True, - print each item in the list above the corresponding subplot. - grid : boolean, default None (matlab style default) - Axis grid lines - legend : False/True/'reverse' - Place legend on axis subplots - style : list or dict - matplotlib line style per column - logx : boolean, default False - Use log scaling on x axis - logy : boolean, default False - Use log scaling on y axis - loglog : boolean, default False - Use log scaling on both x and y axes - xticks : sequence - Values to use for the xticks - yticks : sequence - Values to use for the yticks - xlim : 2-tuple/list - ylim : 2-tuple/list - rot : int, default None - Rotation for ticks (xticks for vertical, yticks for horizontal plots) - fontsize : int, default None - Font size for xticks and yticks - colormap : str or matplotlib colormap object, default None - Colormap to select colors from. If string, load colormap with that name - from matplotlib. - colorbar : boolean, optional - If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots) - position : float - Specify relative alignments for bar plot layout. - From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) - layout : tuple (optional) - (rows, columns) for the layout of the plot - table : boolean, Series or DataFrame, default False - If True, draw a table using the data in the DataFrame and the data will - be transposed to meet matplotlib's default layout. - If a Series or DataFrame is passed, use passed data to draw a table. - yerr : DataFrame, Series, array-like, dict and str - See :ref:`Plotting with Error Bars ` for - detail. - xerr : same types as yerr. - %(klass_unique)s - mark_right : boolean, default True - When using a secondary_y axis, automatically mark the column - labels with "(right)" in the legend - kwds : keywords - Options to pass to matplotlib plotting method - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - - Notes - ----- - - - See matplotlib documentation online for more on this subject - - If `kind` = 'bar' or 'barh', you can specify relative alignments - for bar plot layout by `position` keyword. - From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) - %(klass_note)s - - """ - - -@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs) -def plot_frame(data, x=None, y=None, kind='line', ax=None, - subplots=False, sharex=None, sharey=False, layout=None, - figsize=None, use_index=True, title=None, grid=None, - legend=True, style=None, logx=False, logy=False, loglog=False, - xticks=None, yticks=None, xlim=None, ylim=None, - rot=None, fontsize=None, colormap=None, table=False, - yerr=None, xerr=None, - secondary_y=False, sort_columns=False, - **kwds): - return _plot(data, kind=kind, x=x, y=y, ax=ax, - subplots=subplots, sharex=sharex, sharey=sharey, - layout=layout, figsize=figsize, use_index=use_index, - title=title, grid=grid, legend=legend, - style=style, logx=logx, logy=logy, loglog=loglog, - xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, - rot=rot, fontsize=fontsize, colormap=colormap, table=table, - yerr=yerr, xerr=xerr, - secondary_y=secondary_y, sort_columns=sort_columns, - **kwds) - - -@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs) -def plot_series(data, kind='line', ax=None, # Series unique - figsize=None, use_index=True, title=None, grid=None, - legend=False, style=None, logx=False, logy=False, loglog=False, - xticks=None, yticks=None, xlim=None, ylim=None, - rot=None, fontsize=None, colormap=None, table=False, - yerr=None, xerr=None, - label=None, secondary_y=False, # Series unique - **kwds): - - import matplotlib.pyplot as plt - """ - If no axes is specified, check whether there are existing figures - If there is no existing figures, _gca() will - create a figure with the default figsize, causing the figsize=parameter to - be ignored. - """ - if ax is None and len(plt.get_fignums()) > 0: - ax = _gca() - ax = MPLPlot._get_ax_layer(ax) - return _plot(data, kind=kind, ax=ax, - figsize=figsize, use_index=use_index, title=title, - grid=grid, legend=legend, - style=style, logx=logx, logy=logy, loglog=loglog, - xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim, - rot=rot, fontsize=fontsize, colormap=colormap, table=table, - yerr=yerr, xerr=xerr, - label=label, secondary_y=secondary_y, - **kwds) - - -_shared_docs['boxplot'] = """ - Make a box plot from DataFrame column optionally grouped by some columns or - other inputs - - Parameters - ---------- - data : the pandas object holding the data - column : column name or list of names, or vector - Can be any valid input to groupby - by : string or sequence - Column in the DataFrame to group by - ax : Matplotlib axes object, optional - fontsize : int or string - rot : label rotation angle - figsize : A tuple (width, height) in inches - grid : Setting this to True will show the grid - layout : tuple (optional) - (rows, columns) for the layout of the plot - return_type : {None, 'axes', 'dict', 'both'}, default None - The kind of object to return. The default is ``axes`` - 'axes' returns the matplotlib axes the boxplot is drawn on; - 'dict' returns a dictionary whose values are the matplotlib - Lines of the boxplot; - 'both' returns a namedtuple with the axes and dict. - - When grouping with ``by``, a Series mapping columns to ``return_type`` - is returned, unless ``return_type`` is None, in which case a NumPy - array of axes is returned with the same shape as ``layout``. - See the prose documentation for more. - - kwds : other plotting keyword arguments to be passed to matplotlib boxplot - function - - Returns - ------- - lines : dict - ax : matplotlib Axes - (ax, lines): namedtuple - - Notes - ----- - Use ``return_type='dict'`` when you want to tweak the appearance - of the lines after plotting. In this case a dict containing the Lines - making up the boxes, caps, fliers, medians, and whiskers is returned. - """ - - -@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs) -def boxplot(data, column=None, by=None, ax=None, fontsize=None, - rot=0, grid=True, figsize=None, layout=None, return_type=None, - **kwds): - - # validate return_type: - if return_type not in BoxPlot._valid_return_types: - raise ValueError("return_type must be {'axes', 'dict', 'both'}") - - from pandas import Series, DataFrame - if isinstance(data, Series): - data = DataFrame({'x': data}) - column = 'x' - - def _get_colors(): - return _get_standard_colors(color=kwds.get('color'), num_colors=1) - - def maybe_color_bp(bp): - if 'color' not in kwds: - from matplotlib.artist import setp - setp(bp['boxes'], color=colors[0], alpha=1) - setp(bp['whiskers'], color=colors[0], alpha=1) - setp(bp['medians'], color=colors[2], alpha=1) - - def plot_group(keys, values, ax): - keys = [pprint_thing(x) for x in keys] - values = [remove_na(v) for v in values] - bp = ax.boxplot(values, **kwds) - if fontsize is not None: - ax.tick_params(axis='both', labelsize=fontsize) - if kwds.get('vert', 1): - ax.set_xticklabels(keys, rotation=rot) - else: - ax.set_yticklabels(keys, rotation=rot) - maybe_color_bp(bp) - - # Return axes in multiplot case, maybe revisit later # 985 - if return_type == 'dict': - return bp - elif return_type == 'both': - return BoxPlot.BP(ax=ax, lines=bp) - else: - return ax - - colors = _get_colors() - if column is None: - columns = None - else: - if isinstance(column, (list, tuple)): - columns = column - else: - columns = [column] - - if by is not None: - # Prefer array return type for 2-D plots to match the subplot layout - # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580 - result = _grouped_plot_by_column(plot_group, data, columns=columns, - by=by, grid=grid, figsize=figsize, - ax=ax, layout=layout, - return_type=return_type) - else: - if return_type is None: - return_type = 'axes' - if layout is not None: - raise ValueError("The 'layout' keyword is not supported when " - "'by' is None") - - if ax is None: - ax = _gca() - data = data._get_numeric_data() - if columns is None: - columns = data.columns - else: - data = data[columns] - - result = plot_group(columns, data.values.T, ax) - ax.grid(grid) - - return result - - -def format_date_labels(ax, rot): - # mini version of autofmt_xdate - try: - for label in ax.get_xticklabels(): - label.set_ha('right') - label.set_rotation(rot) - fig = ax.get_figure() - fig.subplots_adjust(bottom=0.2) - except Exception: # pragma: no cover - pass - - -def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, - **kwargs): - """ - Make a scatter plot from two DataFrame columns - - Parameters - ---------- - data : DataFrame - x : Column name for the x-axis values - y : Column name for the y-axis values - ax : Matplotlib axis object - figsize : A tuple (width, height) in inches - grid : Setting this to True will show the grid - kwargs : other plotting keyword arguments - To be passed to scatter function - - Returns - ------- - fig : matplotlib.Figure - """ - import matplotlib.pyplot as plt - - kwargs.setdefault('edgecolors', 'none') - - def plot_group(group, ax): - xvals = group[x].values - yvals = group[y].values - ax.scatter(xvals, yvals, **kwargs) - ax.grid(grid) - - if by is not None: - fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax) - else: - if ax is None: - fig = plt.figure() - ax = fig.add_subplot(111) - else: - fig = ax.get_figure() - plot_group(data, ax) - ax.set_ylabel(pprint_thing(y)) - ax.set_xlabel(pprint_thing(x)) - - ax.grid(grid) - - return fig - - -def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, - xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False, - sharey=False, figsize=None, layout=None, bins=10, **kwds): - """ - Draw histogram of the DataFrame's series using matplotlib / pylab. - - Parameters - ---------- - data : DataFrame - column : string or sequence - If passed, will be used to limit data to a subset of columns - by : object, optional - If passed, then used to form histograms for separate groups - grid : boolean, default True - Whether to show axis grid lines - xlabelsize : int, default None - If specified changes the x-axis label size - xrot : float, default None - rotation of x axis labels - ylabelsize : int, default None - If specified changes the y-axis label size - yrot : float, default None - rotation of y axis labels - ax : matplotlib axes object, default None - sharex : boolean, default True if ax is None else False - In case subplots=True, share x axis and set some x axis labels to - invisible; defaults to True if ax is None otherwise False if an ax - is passed in; Be aware, that passing in both an ax and sharex=True - will alter all x axis labels for all subplots in a figure! - sharey : boolean, default False - In case subplots=True, share y axis and set some y axis labels to - invisible - figsize : tuple - The size of the figure to create in inches by default - layout : tuple, optional - Tuple of (rows, columns) for the layout of the histograms - bins : integer, default 10 - Number of histogram bins to be used - kwds : other plotting keyword arguments - To be passed to hist function - """ - - if by is not None: - axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, - figsize=figsize, sharex=sharex, sharey=sharey, - layout=layout, bins=bins, xlabelsize=xlabelsize, - xrot=xrot, ylabelsize=ylabelsize, - yrot=yrot, **kwds) - return axes - - if column is not None: - if not isinstance(column, (list, np.ndarray, Index)): - column = [column] - data = data[column] - data = data._get_numeric_data() - naxes = len(data.columns) - - fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False, - sharex=sharex, sharey=sharey, figsize=figsize, - layout=layout) - _axes = _flatten(axes) - - for i, col in enumerate(_try_sort(data.columns)): - ax = _axes[i] - ax.hist(data[col].dropna().values, bins=bins, **kwds) - ax.set_title(col) - ax.grid(grid) - - _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, - ylabelsize=ylabelsize, yrot=yrot) - fig.subplots_adjust(wspace=0.3, hspace=0.3) - - return axes - - -def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, - xrot=None, ylabelsize=None, yrot=None, figsize=None, - bins=10, **kwds): - """ - Draw histogram of the input series using matplotlib - - Parameters - ---------- - by : object, optional - If passed, then used to form histograms for separate groups - ax : matplotlib axis object - If not passed, uses gca() - grid : boolean, default True - Whether to show axis grid lines - xlabelsize : int, default None - If specified changes the x-axis label size - xrot : float, default None - rotation of x axis labels - ylabelsize : int, default None - If specified changes the y-axis label size - yrot : float, default None - rotation of y axis labels - figsize : tuple, default None - figure size in inches by default - bins: integer, default 10 - Number of histogram bins to be used - kwds : keywords - To be passed to the actual plotting function - - Notes - ----- - See matplotlib documentation online for more on this - - """ - import matplotlib.pyplot as plt - - if by is None: - if kwds.get('layout', None) is not None: - raise ValueError("The 'layout' keyword is not supported when " - "'by' is None") - # hack until the plotting interface is a bit more unified - fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else - plt.figure(figsize=figsize)) - if (figsize is not None and tuple(figsize) != - tuple(fig.get_size_inches())): - fig.set_size_inches(*figsize, forward=True) - if ax is None: - ax = fig.gca() - elif ax.get_figure() != fig: - raise AssertionError('passed axis not bound to passed figure') - values = self.dropna().values - - ax.hist(values, bins=bins, **kwds) - ax.grid(grid) - axes = np.array([ax]) - - _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, - ylabelsize=ylabelsize, yrot=yrot) - - else: - if 'figure' in kwds: - raise ValueError("Cannot pass 'figure' when using the " - "'by' argument, since a new 'Figure' instance " - "will be created") - axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize, - bins=bins, xlabelsize=xlabelsize, xrot=xrot, - ylabelsize=ylabelsize, yrot=yrot, **kwds) - - if hasattr(axes, 'ndim'): - if axes.ndim == 1 and len(axes) == 1: - return axes[0] - return axes - - -def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, - layout=None, sharex=False, sharey=False, rot=90, grid=True, - xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, - **kwargs): - """ - Grouped histogram - - Parameters - ---------- - data: Series/DataFrame - column: object, optional - by: object, optional - ax: axes, optional - bins: int, default 50 - figsize: tuple, optional - layout: optional - sharex: boolean, default False - sharey: boolean, default False - rot: int, default 90 - grid: bool, default True - kwargs: dict, keyword arguments passed to matplotlib.Axes.hist - - Returns - ------- - axes: collection of Matplotlib Axes - """ - def plot_group(group, ax): - ax.hist(group.dropna().values, bins=bins, **kwargs) - - xrot = xrot or rot - - fig, axes = _grouped_plot(plot_group, data, column=column, - by=by, sharex=sharex, sharey=sharey, ax=ax, - figsize=figsize, layout=layout, rot=rot) - - _set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, - ylabelsize=ylabelsize, yrot=yrot) - - fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, - hspace=0.5, wspace=0.3) - return axes - - -def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, - rot=0, grid=True, ax=None, figsize=None, - layout=None, **kwds): - """ - Make box plots from DataFrameGroupBy data. - - Parameters - ---------- - grouped : Grouped DataFrame - subplots : - * ``False`` - no subplots will be used - * ``True`` - create a subplot for each group - column : column name or list of names, or vector - Can be any valid input to groupby - fontsize : int or string - rot : label rotation angle - grid : Setting this to True will show the grid - ax : Matplotlib axis object, default None - figsize : A tuple (width, height) in inches - layout : tuple (optional) - (rows, columns) for the layout of the plot - kwds : other plotting keyword arguments to be passed to matplotlib boxplot - function - - Returns - ------- - dict of key/value = group key/DataFrame.boxplot return value - or DataFrame.boxplot return value in case subplots=figures=False - - Examples - -------- - >>> import pandas - >>> import numpy as np - >>> import itertools - >>> - >>> tuples = [t for t in itertools.product(range(1000), range(4))] - >>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1']) - >>> data = np.random.randn(len(index),4) - >>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index) - >>> - >>> grouped = df.groupby(level='lvl1') - >>> boxplot_frame_groupby(grouped) - >>> - >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1) - >>> boxplot_frame_groupby(grouped, subplots=False) - """ - if subplots is True: - naxes = len(grouped) - fig, axes = _subplots(naxes=naxes, squeeze=False, - ax=ax, sharex=False, sharey=True, - figsize=figsize, layout=layout) - axes = _flatten(axes) - - ret = Series() - for (key, group), ax in zip(grouped, axes): - d = group.boxplot(ax=ax, column=column, fontsize=fontsize, - rot=rot, grid=grid, **kwds) - ax.set_title(pprint_thing(key)) - ret.loc[key] = d - fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, - right=0.9, wspace=0.2) - else: - from pandas.tools.concat import concat - keys, frames = zip(*grouped) - if grouped.axis == 0: - df = concat(frames, keys=keys, axis=1) - else: - if len(frames) > 1: - df = frames[0].join(frames[1::]) - else: - df = frames[0] - ret = df.boxplot(column=column, fontsize=fontsize, rot=rot, - grid=grid, ax=ax, figsize=figsize, - layout=layout, **kwds) - return ret - - -def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, - figsize=None, sharex=True, sharey=True, layout=None, - rot=0, ax=None, **kwargs): - from pandas import DataFrame - - if figsize == 'default': - # allowed to specify mpl default with 'default' - warnings.warn("figsize='default' is deprecated. Specify figure" - "size by tuple instead", FutureWarning, stacklevel=4) - figsize = None - - grouped = data.groupby(by) - if column is not None: - grouped = grouped[column] - - naxes = len(grouped) - fig, axes = _subplots(naxes=naxes, figsize=figsize, - sharex=sharex, sharey=sharey, ax=ax, - layout=layout) - - _axes = _flatten(axes) - - for i, (key, group) in enumerate(grouped): - ax = _axes[i] - if numeric_only and isinstance(group, DataFrame): - group = group._get_numeric_data() - plotf(group, ax, **kwargs) - ax.set_title(pprint_thing(key)) - - return fig, axes - - -def _grouped_plot_by_column(plotf, data, columns=None, by=None, - numeric_only=True, grid=False, - figsize=None, ax=None, layout=None, - return_type=None, **kwargs): - grouped = data.groupby(by) - if columns is None: - if not isinstance(by, (list, tuple)): - by = [by] - columns = data._get_numeric_data().columns.difference(by) - naxes = len(columns) - fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True, - figsize=figsize, ax=ax, layout=layout) - - _axes = _flatten(axes) - - result = Series() - ax_values = [] - - for i, col in enumerate(columns): - ax = _axes[i] - gp_col = grouped[col] - keys, values = zip(*gp_col) - re_plotf = plotf(keys, values, ax, **kwargs) - ax.set_title(col) - ax.set_xlabel(pprint_thing(by)) - ax_values.append(re_plotf) - ax.grid(grid) - - result = Series(ax_values, index=columns) - - # Return axes in multiplot case, maybe revisit later # 985 - if return_type is None: - result = axes - - byline = by[0] if len(by) == 1 else by - fig.suptitle('Boxplot grouped by %s' % byline) - fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) - - return result - - -def table(ax, data, rowLabels=None, colLabels=None, - **kwargs): - """ - Helper function to convert DataFrame and Series to matplotlib.table - - Parameters - ---------- - `ax`: Matplotlib axes object - `data`: DataFrame or Series - data for table contents - `kwargs`: keywords, optional - keyword arguments which passed to matplotlib.table.table. - If `rowLabels` or `colLabels` is not specified, data index or column - name will be used. - - Returns - ------- - matplotlib table object - """ - from pandas import DataFrame - if isinstance(data, Series): - data = DataFrame(data, columns=[data.name]) - elif isinstance(data, DataFrame): - pass - else: - raise ValueError('Input data must be DataFrame or Series') - - if rowLabels is None: - rowLabels = data.index - - if colLabels is None: - colLabels = data.columns - - cellText = data.values - - import matplotlib.table - table = matplotlib.table.table(ax, cellText=cellText, - rowLabels=rowLabels, - colLabels=colLabels, **kwargs) - return table - - -def _get_layout(nplots, layout=None, layout_type='box'): - if layout is not None: - if not isinstance(layout, (tuple, list)) or len(layout) != 2: - raise ValueError('Layout must be a tuple of (rows, columns)') - - nrows, ncols = layout - - # Python 2 compat - ceil_ = lambda x: int(ceil(x)) - if nrows == -1 and ncols > 0: - layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols) - elif ncols == -1 and nrows > 0: - layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows)) - elif ncols <= 0 and nrows <= 0: - msg = "At least one dimension of layout must be positive" - raise ValueError(msg) - - if nrows * ncols < nplots: - raise ValueError('Layout of %sx%s must be larger than ' - 'required size %s' % (nrows, ncols, nplots)) - - return layout - - if layout_type == 'single': - return (1, 1) - elif layout_type == 'horizontal': - return (1, nplots) - elif layout_type == 'vertical': - return (nplots, 1) - - layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)} - try: - return layouts[nplots] - except KeyError: - k = 1 - while k ** 2 < nplots: - k += 1 - - if (k - 1) * k >= nplots: - return k, (k - 1) - else: - return k, k - -# copied from matplotlib/pyplot.py and modified for pandas.plotting - - -def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True, - subplot_kw=None, ax=None, layout=None, layout_type='box', - **fig_kw): - """Create a figure with a set of subplots already made. - - This utility wrapper makes it convenient to create common layouts of - subplots, including the enclosing figure object, in a single call. - - Keyword arguments: - - naxes : int - Number of required axes. Exceeded axes are set invisible. Default is - nrows * ncols. - - sharex : bool - If True, the X axis will be shared amongst all subplots. - - sharey : bool - If True, the Y axis will be shared amongst all subplots. - - squeeze : bool - - If True, extra dimensions are squeezed out from the returned axis object: - - if only one subplot is constructed (nrows=ncols=1), the resulting - single Axis object is returned as a scalar. - - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object - array of Axis objects are returned as numpy 1-d arrays. - - for NxM subplots with N>1 and M>1 are returned as a 2d array. - - If False, no squeezing at all is done: the returned axis object is always - a 2-d array containing Axis instances, even if it ends up being 1x1. - - subplot_kw : dict - Dict with keywords passed to the add_subplot() call used to create each - subplots. - - ax : Matplotlib axis object, optional - - layout : tuple - Number of rows and columns of the subplot grid. - If not specified, calculated from naxes and layout_type - - layout_type : {'box', 'horziontal', 'vertical'}, default 'box' - Specify how to layout the subplot grid. - - fig_kw : Other keyword arguments to be passed to the figure() call. - Note that all keywords not recognized above will be - automatically included here. - - Returns: - - fig, ax : tuple - - fig is the Matplotlib Figure object - - ax can be either a single axis object or an array of axis objects if - more than one subplot was created. The dimensions of the resulting array - can be controlled with the squeeze keyword, see above. - - **Examples:** - - x = np.linspace(0, 2*np.pi, 400) - y = np.sin(x**2) - - # Just a figure and one subplot - f, ax = plt.subplots() - ax.plot(x, y) - ax.set_title('Simple plot') - - # Two subplots, unpack the output array immediately - f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) - ax1.plot(x, y) - ax1.set_title('Sharing Y axis') - ax2.scatter(x, y) - - # Four polar axes - plt.subplots(2, 2, subplot_kw=dict(polar=True)) - """ - import matplotlib.pyplot as plt - - if subplot_kw is None: - subplot_kw = {} - - if ax is None: - fig = plt.figure(**fig_kw) - else: - if is_list_like(ax): - ax = _flatten(ax) - if layout is not None: - warnings.warn("When passing multiple axes, layout keyword is " - "ignored", UserWarning) - if sharex or sharey: - warnings.warn("When passing multiple axes, sharex and sharey " - "are ignored. These settings must be specified " - "when creating axes", UserWarning, - stacklevel=4) - if len(ax) == naxes: - fig = ax[0].get_figure() - return fig, ax - else: - raise ValueError("The number of passed axes must be {0}, the " - "same as the output plot".format(naxes)) - - fig = ax.get_figure() - # if ax is passed and a number of subplots is 1, return ax as it is - if naxes == 1: - if squeeze: - return fig, ax - else: - return fig, _flatten(ax) - else: - warnings.warn("To output multiple subplots, the figure containing " - "the passed axes is being cleared", UserWarning, - stacklevel=4) - fig.clear() - - nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) - nplots = nrows * ncols - - # Create empty object array to hold all axes. It's easiest to make it 1-d - # so we can just append subplots upon creation, and then - axarr = np.empty(nplots, dtype=object) - - # Create first subplot separately, so we can share it if requested - ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) - - if sharex: - subplot_kw['sharex'] = ax0 - if sharey: - subplot_kw['sharey'] = ax0 - axarr[0] = ax0 - - # Note off-by-one counting because add_subplot uses the MATLAB 1-based - # convention. - for i in range(1, nplots): - kwds = subplot_kw.copy() - # Set sharex and sharey to None for blank/dummy axes, these can - # interfere with proper axis limits on the visible axes if - # they share axes e.g. issue #7528 - if i >= naxes: - kwds['sharex'] = None - kwds['sharey'] = None - ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) - axarr[i] = ax - - if naxes != nplots: - for ax in axarr[naxes:]: - ax.set_visible(False) - - _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) - - if squeeze: - # Reshape the array to have the final desired dimension (nrow,ncol), - # though discarding unneeded dimensions that equal 1. If we only have - # one subplot, just return it instead of a 1-element array. - if nplots == 1: - axes = axarr[0] - else: - axes = axarr.reshape(nrows, ncols).squeeze() - else: - # returned axis array will be always 2-d, even if nrows=ncols=1 - axes = axarr.reshape(nrows, ncols) - - return fig, axes - - -def _remove_labels_from_axis(axis): - for t in axis.get_majorticklabels(): - t.set_visible(False) - - try: - # set_visible will not be effective if - # minor axis has NullLocator and NullFormattor (default) - import matplotlib.ticker as ticker - if isinstance(axis.get_minor_locator(), ticker.NullLocator): - axis.set_minor_locator(ticker.AutoLocator()) - if isinstance(axis.get_minor_formatter(), ticker.NullFormatter): - axis.set_minor_formatter(ticker.FormatStrFormatter('')) - for t in axis.get_minorticklabels(): - t.set_visible(False) - except Exception: # pragma no cover - raise - axis.get_label().set_visible(False) - - -def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey): - if nplots > 1: - - if nrows > 1: - try: - # first find out the ax layout, - # so that we can correctly handle 'gaps" - layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool) - for ax in axarr: - layout[ax.rowNum, ax.colNum] = ax.get_visible() - - for ax in axarr: - # only the last row of subplots should get x labels -> all - # other off layout handles the case that the subplot is - # the last in the column, because below is no subplot/gap. - if not layout[ax.rowNum + 1, ax.colNum]: - continue - if sharex or len(ax.get_shared_x_axes() - .get_siblings(ax)) > 1: - _remove_labels_from_axis(ax.xaxis) - - except IndexError: - # if gridspec is used, ax.rowNum and ax.colNum may different - # from layout shape. in this case, use last_row logic - for ax in axarr: - if ax.is_last_row(): - continue - if sharex or len(ax.get_shared_x_axes() - .get_siblings(ax)) > 1: - _remove_labels_from_axis(ax.xaxis) - - if ncols > 1: - for ax in axarr: - # only the first column should get y labels -> set all other to - # off as we only have labels in teh first column and we always - # have a subplot there, we can skip the layout test - if ax.is_first_col(): - continue - if sharey or len(ax.get_shared_y_axes().get_siblings(ax)) > 1: - _remove_labels_from_axis(ax.yaxis) - - -def _flatten(axes): - if not is_list_like(axes): - return np.array([axes]) - elif isinstance(axes, (np.ndarray, Index)): - return axes.ravel() - return np.array(axes) - - -def _get_all_lines(ax): - lines = ax.get_lines() - - if hasattr(ax, 'right_ax'): - lines += ax.right_ax.get_lines() - - if hasattr(ax, 'left_ax'): - lines += ax.left_ax.get_lines() - - return lines - - -def _get_xlim(lines): - left, right = np.inf, -np.inf - for l in lines: - x = l.get_xdata(orig=False) - left = min(x[0], left) - right = max(x[-1], right) - return left, right - - -def _set_ticks_props(axes, xlabelsize=None, xrot=None, - ylabelsize=None, yrot=None): - import matplotlib.pyplot as plt - - for ax in _flatten(axes): - if xlabelsize is not None: - plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) - if xrot is not None: - plt.setp(ax.get_xticklabels(), rotation=xrot) - if ylabelsize is not None: - plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) - if yrot is not None: - plt.setp(ax.get_yticklabels(), rotation=yrot) - return axes - - -class BasePlotMethods(PandasObject): - - def __init__(self, data): - self._data = data - - def __call__(self, *args, **kwargs): - raise NotImplementedError - - -class SeriesPlotMethods(BasePlotMethods): - """Series plotting accessor and method - - Examples - -------- - >>> s.plot.line() - >>> s.plot.bar() - >>> s.plot.hist() - - Plotting methods can also be accessed by calling the accessor as a method - with the ``kind`` argument: - ``s.plot(kind='line')`` is equivalent to ``s.plot.line()`` - """ - - def __call__(self, kind='line', ax=None, - figsize=None, use_index=True, title=None, grid=None, - legend=False, style=None, logx=False, logy=False, - loglog=False, xticks=None, yticks=None, - xlim=None, ylim=None, - rot=None, fontsize=None, colormap=None, table=False, - yerr=None, xerr=None, - label=None, secondary_y=False, **kwds): - return plot_series(self._data, kind=kind, ax=ax, figsize=figsize, - use_index=use_index, title=title, grid=grid, - legend=legend, style=style, logx=logx, logy=logy, - loglog=loglog, xticks=xticks, yticks=yticks, - xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize, - colormap=colormap, table=table, yerr=yerr, - xerr=xerr, label=label, secondary_y=secondary_y, - **kwds) - __call__.__doc__ = plot_series.__doc__ - - def line(self, **kwds): - """ - Line plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='line', **kwds) - - def bar(self, **kwds): - """ - Vertical bar plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='bar', **kwds) - - def barh(self, **kwds): - """ - Horizontal bar plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='barh', **kwds) - - def box(self, **kwds): - """ - Boxplot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='box', **kwds) - - def hist(self, bins=10, **kwds): - """ - Histogram - - .. versionadded:: 0.17.0 - - Parameters - ---------- - bins: integer, default 10 - Number of histogram bins to be used - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='hist', bins=bins, **kwds) - - def kde(self, **kwds): - """ - Kernel Density Estimate plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='kde', **kwds) - - density = kde - - def area(self, **kwds): - """ - Area plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='area', **kwds) - - def pie(self, **kwds): - """ - Pie chart - - .. versionadded:: 0.17.0 - - Parameters - ---------- - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='pie', **kwds) - - -class FramePlotMethods(BasePlotMethods): - """DataFrame plotting accessor and method - - Examples - -------- - >>> df.plot.line() - >>> df.plot.scatter('x', 'y') - >>> df.plot.hexbin() - - These plotting methods can also be accessed by calling the accessor as a - method with the ``kind`` argument: - ``df.plot(kind='line')`` is equivalent to ``df.plot.line()`` - """ - - def __call__(self, x=None, y=None, kind='line', ax=None, - subplots=False, sharex=None, sharey=False, layout=None, - figsize=None, use_index=True, title=None, grid=None, - legend=True, style=None, logx=False, logy=False, loglog=False, - xticks=None, yticks=None, xlim=None, ylim=None, - rot=None, fontsize=None, colormap=None, table=False, - yerr=None, xerr=None, - secondary_y=False, sort_columns=False, **kwds): - return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax, - subplots=subplots, sharex=sharex, sharey=sharey, - layout=layout, figsize=figsize, use_index=use_index, - title=title, grid=grid, legend=legend, style=style, - logx=logx, logy=logy, loglog=loglog, xticks=xticks, - yticks=yticks, xlim=xlim, ylim=ylim, rot=rot, - fontsize=fontsize, colormap=colormap, table=table, - yerr=yerr, xerr=xerr, secondary_y=secondary_y, - sort_columns=sort_columns, **kwds) - __call__.__doc__ = plot_frame.__doc__ - - def line(self, x=None, y=None, **kwds): - """ - Line plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - x, y : label or position, optional - Coordinates for each point. - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='line', x=x, y=y, **kwds) - - def bar(self, x=None, y=None, **kwds): - """ - Vertical bar plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - x, y : label or position, optional - Coordinates for each point. - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='bar', x=x, y=y, **kwds) - - def barh(self, x=None, y=None, **kwds): - """ - Horizontal bar plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - x, y : label or position, optional - Coordinates for each point. - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='barh', x=x, y=y, **kwds) - - def box(self, by=None, **kwds): - """ - Boxplot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - by : string or sequence - Column in the DataFrame to group by. - \*\*kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='box', by=by, **kwds) - - def hist(self, by=None, bins=10, **kwds): - """ - Histogram - - .. versionadded:: 0.17.0 - - Parameters - ---------- - by : string or sequence - Column in the DataFrame to group by. - bins: integer, default 10 - Number of histogram bins to be used - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='hist', by=by, bins=bins, **kwds) - - def kde(self, **kwds): - """ - Kernel Density Estimate plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='kde', **kwds) - - density = kde - - def area(self, x=None, y=None, **kwds): - """ - Area plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - x, y : label or position, optional - Coordinates for each point. - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='area', x=x, y=y, **kwds) - - def pie(self, y=None, **kwds): - """ - Pie chart - - .. versionadded:: 0.17.0 - - Parameters - ---------- - y : label or position, optional - Column to plot. - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. - - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='pie', y=y, **kwds) - - def scatter(self, x, y, s=None, c=None, **kwds): - """ - Scatter plot - - .. versionadded:: 0.17.0 - - Parameters - ---------- - x, y : label or position, optional - Coordinates for each point. - s : scalar or array_like, optional - Size of each point. - c : label or position, optional - Color of each point. - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds) +import pandas.plotting as _plotting - def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, - **kwds): - """ - Hexbin plot +# back-compat of public API +# deprecate these functions +m = sys.modules['pandas.tools.plotting'] +for t in [t for t in dir(_plotting) if not t.startswith('_')]: - .. versionadded:: 0.17.0 + def outer(t=t): - Parameters - ---------- - x, y : label or position, optional - Coordinates for each point. - C : label or position, optional - The value at each `(x, y)` point. - reduce_C_function : callable, optional - Function of one argument that reduces all the values in a bin to - a single number (e.g. `mean`, `max`, `sum`, `std`). - gridsize : int, optional - Number of bins. - **kwds : optional - Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. + def wrapper(*args, **kwargs): + warnings.warn("'pandas.tools.plotting.{t}' is deprecated, " + "import 'pandas.plotting.{t}' instead.".format(t=t), + FutureWarning, stacklevel=2) + return getattr(_plotting, t)(*args, **kwargs) + return wrapper - Returns - ------- - axes : matplotlib.AxesSubplot or np.array of them - """ - if reduce_C_function is not None: - kwds['reduce_C_function'] = reduce_C_function - if gridsize is not None: - kwds['gridsize'] = gridsize - return self(kind='hexbin', x=x, y=y, C=C, **kwds) + setattr(m, t, outer(t)) diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py index bc768a8bc5b58..df603c4d880d8 100644 --- a/pandas/tseries/converter.py +++ b/pandas/tseries/converter.py @@ -1,1032 +1,11 @@ -from datetime import datetime, timedelta -import datetime as pydt -import numpy as np - -from dateutil.relativedelta import relativedelta - -import matplotlib.units as units -import matplotlib.dates as dates - -from matplotlib.ticker import Formatter, AutoLocator, Locator -from matplotlib.transforms import nonsingular - - -from pandas.types.common import (is_float, is_integer, - is_integer_dtype, - is_float_dtype, - is_datetime64_ns_dtype, - is_period_arraylike, - ) - -from pandas.compat import lrange -import pandas.compat as compat -import pandas._libs.lib as lib -import pandas.core.common as com -from pandas.core.index import Index - -from pandas.core.series import Series -from pandas.tseries.index import date_range -import pandas.tseries.tools as tools -import pandas.tseries.frequencies as frequencies -from pandas.tseries.frequencies import FreqGroup -from pandas.tseries.period import Period, PeriodIndex - -# constants -HOURS_PER_DAY = 24. -MIN_PER_HOUR = 60. -SEC_PER_MIN = 60. - -SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR -SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY - -MUSEC_PER_DAY = 1e6 * SEC_PER_DAY - - -def _mpl_le_2_0_0(): - try: - import matplotlib - return matplotlib.compare_versions('2.0.0', matplotlib.__version__) - except ImportError: - return False - - -def register(): - units.registry[lib.Timestamp] = DatetimeConverter() - units.registry[Period] = PeriodConverter() - units.registry[pydt.datetime] = DatetimeConverter() - units.registry[pydt.date] = DatetimeConverter() - units.registry[pydt.time] = TimeConverter() - units.registry[np.datetime64] = DatetimeConverter() - - -def _to_ordinalf(tm): - tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second + - float(tm.microsecond / 1e6)) - return tot_sec - - -def time2num(d): - if isinstance(d, compat.string_types): - parsed = tools.to_datetime(d) - if not isinstance(parsed, datetime): - raise ValueError('Could not parse time %s' % d) - return _to_ordinalf(parsed.time()) - if isinstance(d, pydt.time): - return _to_ordinalf(d) - return d - - -class TimeConverter(units.ConversionInterface): - - @staticmethod - def convert(value, unit, axis): - valid_types = (str, pydt.time) - if (isinstance(value, valid_types) or is_integer(value) or - is_float(value)): - return time2num(value) - if isinstance(value, Index): - return value.map(time2num) - if isinstance(value, (list, tuple, np.ndarray, Index)): - return [time2num(x) for x in value] - return value - - @staticmethod - def axisinfo(unit, axis): - if unit != 'time': - return None - - majloc = AutoLocator() - majfmt = TimeFormatter(majloc) - return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='time') - - @staticmethod - def default_units(x, axis): - return 'time' - - -# time formatter -class TimeFormatter(Formatter): - - def __init__(self, locs): - self.locs = locs - - def __call__(self, x, pos=0): - fmt = '%H:%M:%S' - s = int(x) - ms = int((x - s) * 1e3) - us = int((x - s) * 1e6 - ms) - m, s = divmod(s, 60) - h, m = divmod(m, 60) - _, h = divmod(h, 24) - if us != 0: - fmt += '.%6f' - elif ms != 0: - fmt += '.%3f' - - return pydt.time(h, m, s, us).strftime(fmt) - - -# Period Conversion - - -class PeriodConverter(dates.DateConverter): - - @staticmethod - def convert(values, units, axis): - if not hasattr(axis, 'freq'): - raise TypeError('Axis must have `freq` set to convert to Periods') - valid_types = (compat.string_types, datetime, - Period, pydt.date, pydt.time) - if (isinstance(values, valid_types) or is_integer(values) or - is_float(values)): - return get_datevalue(values, axis.freq) - if isinstance(values, PeriodIndex): - return values.asfreq(axis.freq)._values - if isinstance(values, Index): - return values.map(lambda x: get_datevalue(x, axis.freq)) - if is_period_arraylike(values): - return PeriodIndex(values, freq=axis.freq)._values - if isinstance(values, (list, tuple, np.ndarray, Index)): - return [get_datevalue(x, axis.freq) for x in values] - return values - - -def get_datevalue(date, freq): - if isinstance(date, Period): - return date.asfreq(freq).ordinal - elif isinstance(date, (compat.string_types, datetime, - pydt.date, pydt.time)): - return Period(date, freq).ordinal - elif (is_integer(date) or is_float(date) or - (isinstance(date, (np.ndarray, Index)) and (date.size == 1))): - return date - elif date is None: - return None - raise ValueError("Unrecognizable date '%s'" % date) - - -def _dt_to_float_ordinal(dt): - """ - Convert :mod:`datetime` to the Gregorian date as UTC float days, - preserving hours, minutes, seconds and microseconds. Return value - is a :func:`float`. - """ - if (isinstance(dt, (np.ndarray, Index, Series) - ) and is_datetime64_ns_dtype(dt)): - base = dates.epoch2num(dt.asi8 / 1.0E9) - else: - base = dates.date2num(dt) - return base - - -# Datetime Conversion -class DatetimeConverter(dates.DateConverter): - - @staticmethod - def convert(values, unit, axis): - def try_parse(values): - try: - return _dt_to_float_ordinal(tools.to_datetime(values)) - except Exception: - return values - - if isinstance(values, (datetime, pydt.date)): - return _dt_to_float_ordinal(values) - elif isinstance(values, np.datetime64): - return _dt_to_float_ordinal(lib.Timestamp(values)) - elif isinstance(values, pydt.time): - return dates.date2num(values) - elif (is_integer(values) or is_float(values)): - return values - elif isinstance(values, compat.string_types): - return try_parse(values) - elif isinstance(values, (list, tuple, np.ndarray, Index)): - if isinstance(values, Index): - values = values.values - if not isinstance(values, np.ndarray): - values = com._asarray_tuplesafe(values) - - if is_integer_dtype(values) or is_float_dtype(values): - return values - - try: - values = tools.to_datetime(values) - if isinstance(values, Index): - values = _dt_to_float_ordinal(values) - else: - values = [_dt_to_float_ordinal(x) for x in values] - except Exception: - values = _dt_to_float_ordinal(values) - - return values - - @staticmethod - def axisinfo(unit, axis): - """ - Return the :class:`~matplotlib.units.AxisInfo` for *unit*. - - *unit* is a tzinfo instance or None. - The *axis* argument is required but not used. - """ - tz = unit - - majloc = PandasAutoDateLocator(tz=tz) - majfmt = PandasAutoDateFormatter(majloc, tz=tz) - datemin = pydt.date(2000, 1, 1) - datemax = pydt.date(2010, 1, 1) - - return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='', - default_limits=(datemin, datemax)) - - -class PandasAutoDateFormatter(dates.AutoDateFormatter): - - def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'): - dates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt) - # matplotlib.dates._UTC has no _utcoffset called by pandas - if self._tz is dates.UTC: - self._tz._utcoffset = self._tz.utcoffset(None) - - # For mpl > 2.0 the format strings are controlled via rcparams - # so do not mess with them. For mpl < 2.0 change the second - # break point and add a musec break point - if _mpl_le_2_0_0(): - self.scaled[1. / SEC_PER_DAY] = '%H:%M:%S' - self.scaled[1. / MUSEC_PER_DAY] = '%H:%M:%S.%f' - - -class PandasAutoDateLocator(dates.AutoDateLocator): - - def get_locator(self, dmin, dmax): - 'Pick the best locator based on a distance.' - delta = relativedelta(dmax, dmin) - - num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days - num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds - tot_sec = num_days * 86400. + num_sec - - if abs(tot_sec) < self.minticks: - self._freq = -1 - locator = MilliSecondLocator(self.tz) - locator.set_axis(self.axis) - - locator.set_view_interval(*self.axis.get_view_interval()) - locator.set_data_interval(*self.axis.get_data_interval()) - return locator - - return dates.AutoDateLocator.get_locator(self, dmin, dmax) - - def _get_unit(self): - return MilliSecondLocator.get_unit_generic(self._freq) - - -class MilliSecondLocator(dates.DateLocator): - - UNIT = 1. / (24 * 3600 * 1000) - - def __init__(self, tz): - dates.DateLocator.__init__(self, tz) - self._interval = 1. - - def _get_unit(self): - return self.get_unit_generic(-1) - - @staticmethod - def get_unit_generic(freq): - unit = dates.RRuleLocator.get_unit_generic(freq) - if unit < 0: - return MilliSecondLocator.UNIT - return unit - - def __call__(self): - # if no data have been set, this will tank with a ValueError - try: - dmin, dmax = self.viewlim_to_dt() - except ValueError: - return [] - - if dmin > dmax: - dmax, dmin = dmin, dmax - # We need to cap at the endpoints of valid datetime - - # TODO(wesm) unused? - # delta = relativedelta(dmax, dmin) - # try: - # start = dmin - delta - # except ValueError: - # start = _from_ordinal(1.0) - - # try: - # stop = dmax + delta - # except ValueError: - # # The magic number! - # stop = _from_ordinal(3652059.9999999) - - nmax, nmin = dates.date2num((dmax, dmin)) - - num = (nmax - nmin) * 86400 * 1000 - max_millis_ticks = 6 - for interval in [1, 10, 50, 100, 200, 500]: - if num <= interval * (max_millis_ticks - 1): - self._interval = interval - break - else: - # We went through the whole loop without breaking, default to 1 - self._interval = 1000. - - estimate = (nmax - nmin) / (self._get_unit() * self._get_interval()) - - if estimate > self.MAXTICKS * 2: - raise RuntimeError(('MillisecondLocator estimated to generate %d ' - 'ticks from %s to %s: exceeds Locator.MAXTICKS' - '* 2 (%d) ') % - (estimate, dmin, dmax, self.MAXTICKS * 2)) - - freq = '%dL' % self._get_interval() - tz = self.tz.tzname(None) - st = _from_ordinal(dates.date2num(dmin)) # strip tz - ed = _from_ordinal(dates.date2num(dmax)) - all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject - - try: - if len(all_dates) > 0: - locs = self.raise_if_exceeds(dates.date2num(all_dates)) - return locs - except Exception: # pragma: no cover - pass - - lims = dates.date2num([dmin, dmax]) - return lims - - def _get_interval(self): - return self._interval - - def autoscale(self): - """ - Set the view limits to include the data range. - """ - dmin, dmax = self.datalim_to_dt() - if dmin > dmax: - dmax, dmin = dmin, dmax - - # We need to cap at the endpoints of valid datetime - - # TODO(wesm): unused? - - # delta = relativedelta(dmax, dmin) - # try: - # start = dmin - delta - # except ValueError: - # start = _from_ordinal(1.0) - - # try: - # stop = dmax + delta - # except ValueError: - # # The magic number! - # stop = _from_ordinal(3652059.9999999) - - dmin, dmax = self.datalim_to_dt() - - vmin = dates.date2num(dmin) - vmax = dates.date2num(dmax) - - return self.nonsingular(vmin, vmax) - - -def _from_ordinal(x, tz=None): - ix = int(x) - dt = datetime.fromordinal(ix) - remainder = float(x) - ix - hour, remainder = divmod(24 * remainder, 1) - minute, remainder = divmod(60 * remainder, 1) - second, remainder = divmod(60 * remainder, 1) - microsecond = int(1e6 * remainder) - if microsecond < 10: - microsecond = 0 # compensate for rounding errors - dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute), - int(second), microsecond) - if tz is not None: - dt = dt.astimezone(tz) - - if microsecond > 999990: # compensate for rounding errors - dt += timedelta(microseconds=1e6 - microsecond) - - return dt - -# Fixed frequency dynamic tick locators and formatters - -# ------------------------------------------------------------------------- -# --- Locators --- -# ------------------------------------------------------------------------- - - -def _get_default_annual_spacing(nyears): - """ - Returns a default spacing between consecutive ticks for annual data. - """ - if nyears < 11: - (min_spacing, maj_spacing) = (1, 1) - elif nyears < 20: - (min_spacing, maj_spacing) = (1, 2) - elif nyears < 50: - (min_spacing, maj_spacing) = (1, 5) - elif nyears < 100: - (min_spacing, maj_spacing) = (5, 10) - elif nyears < 200: - (min_spacing, maj_spacing) = (5, 25) - elif nyears < 600: - (min_spacing, maj_spacing) = (10, 50) - else: - factor = nyears // 1000 + 1 - (min_spacing, maj_spacing) = (factor * 20, factor * 100) - return (min_spacing, maj_spacing) - - -def period_break(dates, period): - """ - Returns the indices where the given period changes. - - Parameters - ---------- - dates : PeriodIndex - Array of intervals to monitor. - period : string - Name of the period to monitor. - """ - current = getattr(dates, period) - previous = getattr(dates - 1, period) - return np.nonzero(current - previous)[0] - - -def has_level_label(label_flags, vmin): - """ - Returns true if the ``label_flags`` indicate there is at least one label - for this level. - - if the minimum view limit is not an exact integer, then the first tick - label won't be shown, so we must adjust for that. - """ - if label_flags.size == 0 or (label_flags.size == 1 and - label_flags[0] == 0 and - vmin % 1 > 0.0): - return False - else: - return True - - -def _daily_finder(vmin, vmax, freq): - periodsperday = -1 - - if freq >= FreqGroup.FR_HR: - if freq == FreqGroup.FR_NS: - periodsperday = 24 * 60 * 60 * 1000000000 - elif freq == FreqGroup.FR_US: - periodsperday = 24 * 60 * 60 * 1000000 - elif freq == FreqGroup.FR_MS: - periodsperday = 24 * 60 * 60 * 1000 - elif freq == FreqGroup.FR_SEC: - periodsperday = 24 * 60 * 60 - elif freq == FreqGroup.FR_MIN: - periodsperday = 24 * 60 - elif freq == FreqGroup.FR_HR: - periodsperday = 24 - else: # pragma: no cover - raise ValueError("unexpected frequency: %s" % freq) - periodsperyear = 365 * periodsperday - periodspermonth = 28 * periodsperday - - elif freq == FreqGroup.FR_BUS: - periodsperyear = 261 - periodspermonth = 19 - elif freq == FreqGroup.FR_DAY: - periodsperyear = 365 - periodspermonth = 28 - elif frequencies.get_freq_group(freq) == FreqGroup.FR_WK: - periodsperyear = 52 - periodspermonth = 3 - else: # pragma: no cover - raise ValueError("unexpected frequency") - - # save this for later usage - vmin_orig = vmin - - (vmin, vmax) = (Period(ordinal=int(vmin), freq=freq), - Period(ordinal=int(vmax), freq=freq)) - span = vmax.ordinal - vmin.ordinal + 1 - dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq) - # Initialize the output - info = np.zeros(span, - dtype=[('val', np.int64), ('maj', bool), - ('min', bool), ('fmt', '|S20')]) - info['val'][:] = dates_._values - info['fmt'][:] = '' - info['maj'][[0, -1]] = True - # .. and set some shortcuts - info_maj = info['maj'] - info_min = info['min'] - info_fmt = info['fmt'] - - def first_label(label_flags): - if (label_flags[0] == 0) and (label_flags.size > 1) and \ - ((vmin_orig % 1) > 0.0): - return label_flags[1] - else: - return label_flags[0] - - # Case 1. Less than a month - if span <= periodspermonth: - day_start = period_break(dates_, 'day') - month_start = period_break(dates_, 'month') - - def _hour_finder(label_interval, force_year_start): - _hour = dates_.hour - _prev_hour = (dates_ - 1).hour - hour_start = (_hour - _prev_hour) != 0 - info_maj[day_start] = True - info_min[hour_start & (_hour % label_interval == 0)] = True - year_start = period_break(dates_, 'year') - info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M' - info_fmt[day_start] = '%H:%M\n%d-%b' - info_fmt[year_start] = '%H:%M\n%d-%b\n%Y' - if force_year_start and not has_level_label(year_start, vmin_orig): - info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y' - - def _minute_finder(label_interval): - hour_start = period_break(dates_, 'hour') - _minute = dates_.minute - _prev_minute = (dates_ - 1).minute - minute_start = (_minute - _prev_minute) != 0 - info_maj[hour_start] = True - info_min[minute_start & (_minute % label_interval == 0)] = True - year_start = period_break(dates_, 'year') - info_fmt = info['fmt'] - info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M' - info_fmt[day_start] = '%H:%M\n%d-%b' - info_fmt[year_start] = '%H:%M\n%d-%b\n%Y' - - def _second_finder(label_interval): - minute_start = period_break(dates_, 'minute') - _second = dates_.second - _prev_second = (dates_ - 1).second - second_start = (_second - _prev_second) != 0 - info['maj'][minute_start] = True - info['min'][second_start & (_second % label_interval == 0)] = True - year_start = period_break(dates_, 'year') - info_fmt = info['fmt'] - info_fmt[second_start & (_second % - label_interval == 0)] = '%H:%M:%S' - info_fmt[day_start] = '%H:%M:%S\n%d-%b' - info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y' - - if span < periodsperday / 12000.0: - _second_finder(1) - elif span < periodsperday / 6000.0: - _second_finder(2) - elif span < periodsperday / 2400.0: - _second_finder(5) - elif span < periodsperday / 1200.0: - _second_finder(10) - elif span < periodsperday / 800.0: - _second_finder(15) - elif span < periodsperday / 400.0: - _second_finder(30) - elif span < periodsperday / 150.0: - _minute_finder(1) - elif span < periodsperday / 70.0: - _minute_finder(2) - elif span < periodsperday / 24.0: - _minute_finder(5) - elif span < periodsperday / 12.0: - _minute_finder(15) - elif span < periodsperday / 6.0: - _minute_finder(30) - elif span < periodsperday / 2.5: - _hour_finder(1, False) - elif span < periodsperday / 1.5: - _hour_finder(2, False) - elif span < periodsperday * 1.25: - _hour_finder(3, False) - elif span < periodsperday * 2.5: - _hour_finder(6, True) - elif span < periodsperday * 4: - _hour_finder(12, True) - else: - info_maj[month_start] = True - info_min[day_start] = True - year_start = period_break(dates_, 'year') - info_fmt = info['fmt'] - info_fmt[day_start] = '%d' - info_fmt[month_start] = '%d\n%b' - info_fmt[year_start] = '%d\n%b\n%Y' - if not has_level_label(year_start, vmin_orig): - if not has_level_label(month_start, vmin_orig): - info_fmt[first_label(day_start)] = '%d\n%b\n%Y' - else: - info_fmt[first_label(month_start)] = '%d\n%b\n%Y' - - # Case 2. Less than three months - elif span <= periodsperyear // 4: - month_start = period_break(dates_, 'month') - info_maj[month_start] = True - if freq < FreqGroup.FR_HR: - info['min'] = True - else: - day_start = period_break(dates_, 'day') - info['min'][day_start] = True - week_start = period_break(dates_, 'week') - year_start = period_break(dates_, 'year') - info_fmt[week_start] = '%d' - info_fmt[month_start] = '\n\n%b' - info_fmt[year_start] = '\n\n%b\n%Y' - if not has_level_label(year_start, vmin_orig): - if not has_level_label(month_start, vmin_orig): - info_fmt[first_label(week_start)] = '\n\n%b\n%Y' - else: - info_fmt[first_label(month_start)] = '\n\n%b\n%Y' - # Case 3. Less than 14 months ............... - elif span <= 1.15 * periodsperyear: - year_start = period_break(dates_, 'year') - month_start = period_break(dates_, 'month') - week_start = period_break(dates_, 'week') - info_maj[month_start] = True - info_min[week_start] = True - info_min[year_start] = False - info_min[month_start] = False - info_fmt[month_start] = '%b' - info_fmt[year_start] = '%b\n%Y' - if not has_level_label(year_start, vmin_orig): - info_fmt[first_label(month_start)] = '%b\n%Y' - # Case 4. Less than 2.5 years ............... - elif span <= 2.5 * periodsperyear: - year_start = period_break(dates_, 'year') - quarter_start = period_break(dates_, 'quarter') - month_start = period_break(dates_, 'month') - info_maj[quarter_start] = True - info_min[month_start] = True - info_fmt[quarter_start] = '%b' - info_fmt[year_start] = '%b\n%Y' - # Case 4. Less than 4 years ................. - elif span <= 4 * periodsperyear: - year_start = period_break(dates_, 'year') - month_start = period_break(dates_, 'month') - info_maj[year_start] = True - info_min[month_start] = True - info_min[year_start] = False - - month_break = dates_[month_start].month - jan_or_jul = month_start[(month_break == 1) | (month_break == 7)] - info_fmt[jan_or_jul] = '%b' - info_fmt[year_start] = '%b\n%Y' - # Case 5. Less than 11 years ................ - elif span <= 11 * periodsperyear: - year_start = period_break(dates_, 'year') - quarter_start = period_break(dates_, 'quarter') - info_maj[year_start] = True - info_min[quarter_start] = True - info_min[year_start] = False - info_fmt[year_start] = '%Y' - # Case 6. More than 12 years ................ - else: - year_start = period_break(dates_, 'year') - year_break = dates_[year_start].year - nyears = span / periodsperyear - (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) - major_idx = year_start[(year_break % maj_anndef == 0)] - info_maj[major_idx] = True - minor_idx = year_start[(year_break % min_anndef == 0)] - info_min[minor_idx] = True - info_fmt[major_idx] = '%Y' - - return info - - -def _monthly_finder(vmin, vmax, freq): - periodsperyear = 12 - - vmin_orig = vmin - (vmin, vmax) = (int(vmin), int(vmax)) - span = vmax - vmin + 1 - - # Initialize the output - info = np.zeros(span, - dtype=[('val', int), ('maj', bool), ('min', bool), - ('fmt', '|S8')]) - info['val'] = np.arange(vmin, vmax + 1) - dates_ = info['val'] - info['fmt'] = '' - year_start = (dates_ % 12 == 0).nonzero()[0] - info_maj = info['maj'] - info_fmt = info['fmt'] - - if span <= 1.15 * periodsperyear: - info_maj[year_start] = True - info['min'] = True - - info_fmt[:] = '%b' - info_fmt[year_start] = '%b\n%Y' - - if not has_level_label(year_start, vmin_orig): - if dates_.size > 1: - idx = 1 - else: - idx = 0 - info_fmt[idx] = '%b\n%Y' - - elif span <= 2.5 * periodsperyear: - quarter_start = (dates_ % 3 == 0).nonzero() - info_maj[year_start] = True - # TODO: Check the following : is it really info['fmt'] ? - info['fmt'][quarter_start] = True - info['min'] = True - - info_fmt[quarter_start] = '%b' - info_fmt[year_start] = '%b\n%Y' - - elif span <= 4 * periodsperyear: - info_maj[year_start] = True - info['min'] = True - - jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6) - info_fmt[jan_or_jul] = '%b' - info_fmt[year_start] = '%b\n%Y' - - elif span <= 11 * periodsperyear: - quarter_start = (dates_ % 3 == 0).nonzero() - info_maj[year_start] = True - info['min'][quarter_start] = True - - info_fmt[year_start] = '%Y' - - else: - nyears = span / periodsperyear - (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) - years = dates_[year_start] // 12 + 1 - major_idx = year_start[(years % maj_anndef == 0)] - info_maj[major_idx] = True - info['min'][year_start[(years % min_anndef == 0)]] = True - - info_fmt[major_idx] = '%Y' - - return info - - -def _quarterly_finder(vmin, vmax, freq): - periodsperyear = 4 - vmin_orig = vmin - (vmin, vmax) = (int(vmin), int(vmax)) - span = vmax - vmin + 1 - - info = np.zeros(span, - dtype=[('val', int), ('maj', bool), ('min', bool), - ('fmt', '|S8')]) - info['val'] = np.arange(vmin, vmax + 1) - info['fmt'] = '' - dates_ = info['val'] - info_maj = info['maj'] - info_fmt = info['fmt'] - year_start = (dates_ % 4 == 0).nonzero()[0] - - if span <= 3.5 * periodsperyear: - info_maj[year_start] = True - info['min'] = True - - info_fmt[:] = 'Q%q' - info_fmt[year_start] = 'Q%q\n%F' - if not has_level_label(year_start, vmin_orig): - if dates_.size > 1: - idx = 1 - else: - idx = 0 - info_fmt[idx] = 'Q%q\n%F' - - elif span <= 11 * periodsperyear: - info_maj[year_start] = True - info['min'] = True - info_fmt[year_start] = '%F' - - else: - years = dates_[year_start] // 4 + 1 - nyears = span / periodsperyear - (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) - major_idx = year_start[(years % maj_anndef == 0)] - info_maj[major_idx] = True - info['min'][year_start[(years % min_anndef == 0)]] = True - info_fmt[major_idx] = '%F' - - return info - - -def _annual_finder(vmin, vmax, freq): - (vmin, vmax) = (int(vmin), int(vmax + 1)) - span = vmax - vmin + 1 - - info = np.zeros(span, - dtype=[('val', int), ('maj', bool), ('min', bool), - ('fmt', '|S8')]) - info['val'] = np.arange(vmin, vmax + 1) - info['fmt'] = '' - dates_ = info['val'] - - (min_anndef, maj_anndef) = _get_default_annual_spacing(span) - major_idx = dates_ % maj_anndef == 0 - info['maj'][major_idx] = True - info['min'][(dates_ % min_anndef == 0)] = True - info['fmt'][major_idx] = '%Y' - - return info - - -def get_finder(freq): - if isinstance(freq, compat.string_types): - freq = frequencies.get_freq(freq) - fgroup = frequencies.get_freq_group(freq) - - if fgroup == FreqGroup.FR_ANN: - return _annual_finder - elif fgroup == FreqGroup.FR_QTR: - return _quarterly_finder - elif freq == FreqGroup.FR_MTH: - return _monthly_finder - elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK): - return _daily_finder - else: # pragma: no cover - errmsg = "Unsupported frequency: %s" % (freq) - raise NotImplementedError(errmsg) - - -class TimeSeries_DateLocator(Locator): - """ - Locates the ticks along an axis controlled by a :class:`Series`. - - Parameters - ---------- - freq : {var} - Valid frequency specifier. - minor_locator : {False, True}, optional - Whether the locator is for minor ticks (True) or not. - dynamic_mode : {True, False}, optional - Whether the locator should work in dynamic mode. - base : {int}, optional - quarter : {int}, optional - month : {int}, optional - day : {int}, optional - """ - - def __init__(self, freq, minor_locator=False, dynamic_mode=True, - base=1, quarter=1, month=1, day=1, plot_obj=None): - if isinstance(freq, compat.string_types): - freq = frequencies.get_freq(freq) - self.freq = freq - self.base = base - (self.quarter, self.month, self.day) = (quarter, month, day) - self.isminor = minor_locator - self.isdynamic = dynamic_mode - self.offset = 0 - self.plot_obj = plot_obj - self.finder = get_finder(freq) - - def _get_default_locs(self, vmin, vmax): - "Returns the default locations of ticks." - - if self.plot_obj.date_axis_info is None: - self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) - - locator = self.plot_obj.date_axis_info - - if self.isminor: - return np.compress(locator['min'], locator['val']) - return np.compress(locator['maj'], locator['val']) - - def __call__(self): - 'Return the locations of the ticks.' - # axis calls Locator.set_axis inside set_m_formatter - vi = tuple(self.axis.get_view_interval()) - if vi != self.plot_obj.view_interval: - self.plot_obj.date_axis_info = None - self.plot_obj.view_interval = vi - vmin, vmax = vi - if vmax < vmin: - vmin, vmax = vmax, vmin - if self.isdynamic: - locs = self._get_default_locs(vmin, vmax) - else: # pragma: no cover - base = self.base - (d, m) = divmod(vmin, base) - vmin = (d + 1) * base - locs = lrange(vmin, vmax + 1, base) - return locs - - def autoscale(self): - """ - Sets the view limits to the nearest multiples of base that contain the - data. - """ - # requires matplotlib >= 0.98.0 - (vmin, vmax) = self.axis.get_data_interval() - - locs = self._get_default_locs(vmin, vmax) - (vmin, vmax) = locs[[0, -1]] - if vmin == vmax: - vmin -= 1 - vmax += 1 - return nonsingular(vmin, vmax) - -# ------------------------------------------------------------------------- -# --- Formatter --- -# ------------------------------------------------------------------------- - - -class TimeSeries_DateFormatter(Formatter): - """ - Formats the ticks along an axis controlled by a :class:`PeriodIndex`. - - Parameters - ---------- - freq : {int, string} - Valid frequency specifier. - minor_locator : {False, True} - Whether the current formatter should apply to minor ticks (True) or - major ticks (False). - dynamic_mode : {True, False} - Whether the formatter works in dynamic mode or not. - """ - - def __init__(self, freq, minor_locator=False, dynamic_mode=True, - plot_obj=None): - if isinstance(freq, compat.string_types): - freq = frequencies.get_freq(freq) - self.format = None - self.freq = freq - self.locs = [] - self.formatdict = None - self.isminor = minor_locator - self.isdynamic = dynamic_mode - self.offset = 0 - self.plot_obj = plot_obj - self.finder = get_finder(freq) - - def _set_default_format(self, vmin, vmax): - "Returns the default ticks spacing." - - if self.plot_obj.date_axis_info is None: - self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) - info = self.plot_obj.date_axis_info - - if self.isminor: - format = np.compress(info['min'] & np.logical_not(info['maj']), - info) - else: - format = np.compress(info['maj'], info) - self.formatdict = dict([(x, f) for (x, _, _, f) in format]) - return self.formatdict - - def set_locs(self, locs): - 'Sets the locations of the ticks' - # don't actually use the locs. This is just needed to work with - # matplotlib. Force to use vmin, vmax - self.locs = locs - - (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) - if vi != self.plot_obj.view_interval: - self.plot_obj.date_axis_info = None - self.plot_obj.view_interval = vi - if vmax < vmin: - (vmin, vmax) = (vmax, vmin) - self._set_default_format(vmin, vmax) - - def __call__(self, x, pos=0): - if self.formatdict is None: - return '' - else: - fmt = self.formatdict.pop(x, '') - return Period(ordinal=int(x), freq=self.freq).strftime(fmt) - - -class TimeSeries_TimedeltaFormatter(Formatter): - """ - Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`. - """ - - @staticmethod - def format_timedelta_ticks(x, pos, n_decimals): - """ - Convert seconds to 'D days HH:MM:SS.F' - """ - s, ns = divmod(x, 1e9) - m, s = divmod(s, 60) - h, m = divmod(m, 60) - d, h = divmod(h, 24) - decimals = int(ns * 10**(n_decimals - 9)) - s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)) - if n_decimals > 0: - s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals) - if d != 0: - s = '{:d} days '.format(int(d)) + s - return s - - def __call__(self, x, pos=0): - (vmin, vmax) = tuple(self.axis.get_view_interval()) - n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin)))) - if n_decimals > 9: - n_decimals = 9 - return self.format_timedelta_ticks(x, pos, n_decimals) +# flake8: noqa + +from pandas.plotting._converter import (register, time2num, + TimeConverter, TimeFormatter, + PeriodConverter, get_datevalue, + DatetimeConverter, + PandasAutoDateFormatter, + PandasAutoDateLocator, + MilliSecondLocator, get_finder, + TimeSeries_DateLocator, + TimeSeries_DateFormatter) diff --git a/pandas/tseries/plotting.py b/pandas/tseries/plotting.py index 4eddf54701889..302016907635d 100644 --- a/pandas/tseries/plotting.py +++ b/pandas/tseries/plotting.py @@ -1,344 +1,3 @@ -""" -Period formatters and locators adapted from scikits.timeseries by -Pierre GF Gerard-Marchant & Matt Knox -""" +# flake8: noqa -# TODO: Use the fact that axis can have units to simplify the process - -import numpy as np - -from matplotlib import pylab -from pandas.tseries.period import Period -from pandas.tseries.offsets import DateOffset -import pandas.tseries.frequencies as frequencies -from pandas.tseries.index import DatetimeIndex -from pandas.tseries.period import PeriodIndex -from pandas.tseries.tdi import TimedeltaIndex -from pandas.formats.printing import pprint_thing -import pandas.compat as compat - -from pandas.tseries.converter import (TimeSeries_DateLocator, - TimeSeries_DateFormatter, - TimeSeries_TimedeltaFormatter) - -# --------------------------------------------------------------------- -# Plotting functions and monkey patches - - -def tsplot(series, plotf, ax=None, **kwargs): - """ - Plots a Series on the given Matplotlib axes or the current axes - - Parameters - ---------- - axes : Axes - series : Series - - Notes - _____ - Supports same kwargs as Axes.plot - - """ - # Used inferred freq is possible, need a test case for inferred - if ax is None: - import matplotlib.pyplot as plt - ax = plt.gca() - - freq, series = _maybe_resample(series, ax, kwargs) - - # Set ax with freq info - _decorate_axes(ax, freq, kwargs) - ax._plot_data.append((series, plotf, kwargs)) - lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs) - - # set date formatter, locators and rescale limits - format_dateaxis(ax, ax.freq, series.index) - return lines - - -def _maybe_resample(series, ax, kwargs): - # resample against axes freq if necessary - freq, ax_freq = _get_freq(ax, series) - - if freq is None: # pragma: no cover - raise ValueError('Cannot use dynamic axis without frequency info') - - # Convert DatetimeIndex to PeriodIndex - if isinstance(series.index, DatetimeIndex): - series = series.to_period(freq=freq) - - if ax_freq is not None and freq != ax_freq: - if frequencies.is_superperiod(freq, ax_freq): # upsample input - series = series.copy() - series.index = series.index.asfreq(ax_freq, how='s') - freq = ax_freq - elif _is_sup(freq, ax_freq): # one is weekly - how = kwargs.pop('how', 'last') - series = getattr(series.resample('D'), how)().dropna() - series = getattr(series.resample(ax_freq), how)().dropna() - freq = ax_freq - elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): - _upsample_others(ax, freq, kwargs) - ax_freq = freq - else: # pragma: no cover - raise ValueError('Incompatible frequency conversion') - return freq, series - - -def _is_sub(f1, f2): - return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or - (f2.startswith('W') and frequencies.is_subperiod(f1, 'D'))) - - -def _is_sup(f1, f2): - return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or - (f2.startswith('W') and frequencies.is_superperiod(f1, 'D'))) - - -def _upsample_others(ax, freq, kwargs): - legend = ax.get_legend() - lines, labels = _replot_ax(ax, freq, kwargs) - _replot_ax(ax, freq, kwargs) - - other_ax = None - if hasattr(ax, 'left_ax'): - other_ax = ax.left_ax - if hasattr(ax, 'right_ax'): - other_ax = ax.right_ax - - if other_ax is not None: - rlines, rlabels = _replot_ax(other_ax, freq, kwargs) - lines.extend(rlines) - labels.extend(rlabels) - - if (legend is not None and kwargs.get('legend', True) and - len(lines) > 0): - title = legend.get_title().get_text() - if title == 'None': - title = None - ax.legend(lines, labels, loc='best', title=title) - - -def _replot_ax(ax, freq, kwargs): - data = getattr(ax, '_plot_data', None) - - # clear current axes and data - ax._plot_data = [] - ax.clear() - - _decorate_axes(ax, freq, kwargs) - - lines = [] - labels = [] - if data is not None: - for series, plotf, kwds in data: - series = series.copy() - idx = series.index.asfreq(freq, how='S') - series.index = idx - ax._plot_data.append((series, plotf, kwds)) - - # for tsplot - if isinstance(plotf, compat.string_types): - from pandas.tools.plotting import _plot_klass - plotf = _plot_klass[plotf]._plot - - lines.append(plotf(ax, series.index._mpl_repr(), - series.values, **kwds)[0]) - labels.append(pprint_thing(series.name)) - - return lines, labels - - -def _decorate_axes(ax, freq, kwargs): - """Initialize axes for time-series plotting""" - if not hasattr(ax, '_plot_data'): - ax._plot_data = [] - - ax.freq = freq - xaxis = ax.get_xaxis() - xaxis.freq = freq - if not hasattr(ax, 'legendlabels'): - ax.legendlabels = [kwargs.get('label', None)] - else: - ax.legendlabels.append(kwargs.get('label', None)) - ax.view_interval = None - ax.date_axis_info = None - - -def _get_ax_freq(ax): - """ - Get the freq attribute of the ax object if set. - Also checks shared axes (eg when using secondary yaxis, sharex=True - or twinx) - """ - ax_freq = getattr(ax, 'freq', None) - if ax_freq is None: - # check for left/right ax in case of secondary yaxis - if hasattr(ax, 'left_ax'): - ax_freq = getattr(ax.left_ax, 'freq', None) - elif hasattr(ax, 'right_ax'): - ax_freq = getattr(ax.right_ax, 'freq', None) - if ax_freq is None: - # check if a shared ax (sharex/twinx) has already freq set - shared_axes = ax.get_shared_x_axes().get_siblings(ax) - if len(shared_axes) > 1: - for shared_ax in shared_axes: - ax_freq = getattr(shared_ax, 'freq', None) - if ax_freq is not None: - break - return ax_freq - - -def _get_freq(ax, series): - # get frequency from data - freq = getattr(series.index, 'freq', None) - if freq is None: - freq = getattr(series.index, 'inferred_freq', None) - - ax_freq = _get_ax_freq(ax) - - # use axes freq if no data freq - if freq is None: - freq = ax_freq - - # get the period frequency - if isinstance(freq, DateOffset): - freq = freq.rule_code - else: - freq = frequencies.get_base_alias(freq) - - freq = frequencies.get_period_alias(freq) - return freq, ax_freq - - -def _use_dynamic_x(ax, data): - freq = _get_index_freq(data) - ax_freq = _get_ax_freq(ax) - - if freq is None: # convert irregular if axes has freq info - freq = ax_freq - else: # do not use tsplot if irregular was plotted first - if (ax_freq is None) and (len(ax.get_lines()) > 0): - return False - - if freq is None: - return False - - if isinstance(freq, DateOffset): - freq = freq.rule_code - else: - freq = frequencies.get_base_alias(freq) - freq = frequencies.get_period_alias(freq) - - if freq is None: - return False - - # hack this for 0.10.1, creating more technical debt...sigh - if isinstance(data.index, DatetimeIndex): - base = frequencies.get_freq(freq) - x = data.index - if (base <= frequencies.FreqGroup.FR_DAY): - return x[:1].is_normalized - return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0] - return True - - -def _get_index_freq(data): - freq = getattr(data.index, 'freq', None) - if freq is None: - freq = getattr(data.index, 'inferred_freq', None) - if freq == 'B': - weekdays = np.unique(data.index.dayofweek) - if (5 in weekdays) or (6 in weekdays): - freq = None - return freq - - -def _maybe_convert_index(ax, data): - # tsplot converts automatically, but don't want to convert index - # over and over for DataFrames - if isinstance(data.index, DatetimeIndex): - freq = getattr(data.index, 'freq', None) - - if freq is None: - freq = getattr(data.index, 'inferred_freq', None) - if isinstance(freq, DateOffset): - freq = freq.rule_code - - if freq is None: - freq = _get_ax_freq(ax) - - if freq is None: - raise ValueError('Could not get frequency alias for plotting') - - freq = frequencies.get_base_alias(freq) - freq = frequencies.get_period_alias(freq) - - data = data.to_period(freq=freq) - return data - - -# Patch methods for subplot. Only format_dateaxis is currently used. -# Do we need the rest for convenience? - -def format_timedelta_ticks(x, pos, n_decimals): - """ - Convert seconds to 'D days HH:MM:SS.F' - """ - s, ns = divmod(x, 1e9) - m, s = divmod(s, 60) - h, m = divmod(m, 60) - d, h = divmod(h, 24) - decimals = int(ns * 10**(n_decimals - 9)) - s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)) - if n_decimals > 0: - s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals) - if d != 0: - s = '{:d} days '.format(int(d)) + s - return s - - -def format_dateaxis(subplot, freq, index): - """ - Pretty-formats the date axis (x-axis). - - Major and minor ticks are automatically set for the frequency of the - current underlying series. As the dynamic mode is activated by - default, changing the limits of the x axis will intelligently change - the positions of the ticks. - """ - - # handle index specific formatting - # Note: DatetimeIndex does not use this - # interface. DatetimeIndex uses matplotlib.date directly - if isinstance(index, PeriodIndex): - - majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, - minor_locator=False, - plot_obj=subplot) - minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, - minor_locator=True, - plot_obj=subplot) - subplot.xaxis.set_major_locator(majlocator) - subplot.xaxis.set_minor_locator(minlocator) - - majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, - minor_locator=False, - plot_obj=subplot) - minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, - minor_locator=True, - plot_obj=subplot) - subplot.xaxis.set_major_formatter(majformatter) - subplot.xaxis.set_minor_formatter(minformatter) - - # x and y coord info - subplot.format_coord = lambda t, y: ( - "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y)) - - elif isinstance(index, TimedeltaIndex): - subplot.xaxis.set_major_formatter( - TimeSeries_TimedeltaFormatter()) - else: - raise TypeError('index type not supported') - - pylab.draw_if_interactive() +from pandas.plotting._timeseries import tsplot diff --git a/pandas/util/doctools.py b/pandas/util/doctools.py index 6df6444aeafab..cbc9518b96416 100644 --- a/pandas/util/doctools.py +++ b/pandas/util/doctools.py @@ -131,7 +131,7 @@ def _make_table(self, ax, df, title, height=None): ax.set_visible(False) return - import pandas.tools.plotting as plotting + import pandas.plotting as plotting idx_nlevels = df.index.nlevels col_nlevels = df.columns.nlevels diff --git a/setup.py b/setup.py index d8ee52f9b4f43..d76c6fa508008 100755 --- a/setup.py +++ b/setup.py @@ -649,6 +649,7 @@ def pxd(name): 'pandas.io.msgpack', 'pandas._libs', 'pandas.formats', + 'pandas.plotting', 'pandas.sparse', 'pandas.stats', 'pandas.util', From 005a09e971a8afb26acdd408a025f71a9951f52b Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Sat, 15 Apr 2017 19:53:31 -0400 Subject: [PATCH 30/56] CLN: move top-level dirs xref #13634 pandas.types -> pandas.core.dtypes pandas.computation -> pandas.core.computation pandas.sparse -> pandas.core.sparse Author: Jeff Reback Closes #15998 from jreback/move_types and squashes the following commits: 5fe80ae [Jeff Reback] move fixtures to top-level conftest 9d36104 [Jeff Reback] moar ci d165a3f [Jeff Reback] more verbose built test 3ca5ba3 [Jeff Reback] pep f1505d7 [Jeff Reback] try with pyargs f63b76c [Jeff Reback] CLN: pandas.sparse -> pandas.core.sparse 751fb33 [Jeff Reback] move privacy changes to development section 221a7b5 [Jeff Reback] rename .typed -> .dtypes 290315e [Jeff Reback] move pandas.api.lib.infer_dtype -> pandas.api.types ba32641 [Jeff Reback] CLN: move pandas.computation -> pandas.core.computation bbdb1ee [Jeff Reback] CLN: move pandas.types -> pandas.core.typed --- asv_bench/benchmarks/binary_ops.py | 2 +- asv_bench/benchmarks/categoricals.py | 2 +- asv_bench/benchmarks/eval.py | 2 +- asv_bench/benchmarks/indexing.py | 2 +- asv_bench/benchmarks/sparse.py | 4 +- ci/install_travis.sh | 1 + ci/script_multi.sh | 4 + doc/source/categorical.rst | 2 +- doc/source/whatsnew/v0.20.0.txt | 130 ++++++++++-------- pandas/__init__.py | 4 +- pandas/api/types/__init__.py | 3 +- pandas/compat/numpy/function.py | 2 +- pandas/compat/pickle_compat.py | 14 +- pandas/conftest.py | 8 ++ pandas/core/algorithms.py | 13 +- pandas/core/api.py | 2 +- pandas/core/base.py | 8 +- pandas/core/categorical.py | 35 ++--- pandas/core/common.py | 14 +- pandas/{ => core}/computation/__init__.py | 0 pandas/{ => core}/computation/align.py | 2 +- pandas/{ => core}/computation/api.py | 4 +- pandas/{ => core}/computation/common.py | 0 pandas/{ => core}/computation/engines.py | 7 +- pandas/{ => core}/computation/eval.py | 8 +- pandas/{ => core}/computation/expr.py | 13 +- pandas/{ => core}/computation/expressions.py | 2 +- pandas/{ => core}/computation/ops.py | 6 +- pandas/{ => core}/computation/pytables.py | 10 +- pandas/{ => core}/computation/scope.py | 2 +- pandas/core/config_init.py | 2 +- pandas/{sparse => core/dtypes}/__init__.py | 0 pandas/{types => core/dtypes}/api.py | 0 pandas/{types => core/dtypes}/cast.py | 0 pandas/{types => core/dtypes}/common.py | 0 pandas/{types => core/dtypes}/concat.py | 41 +++--- pandas/{types => core/dtypes}/dtypes.py | 11 +- pandas/{types => core/dtypes}/generic.py | 0 pandas/{types => core/dtypes}/inference.py | 0 pandas/{types => core/dtypes}/missing.py | 0 pandas/core/frame.py | 72 +++++----- pandas/core/generic.py | 33 ++--- pandas/core/groupby.py | 43 +++--- pandas/core/indexing.py | 23 ++-- pandas/core/internals.py | 78 ++++++----- pandas/core/missing.py | 25 ++-- pandas/core/nanops.py | 23 ++-- pandas/core/ops.py | 27 ++-- pandas/core/panel.py | 16 ++- pandas/core/reshape.py | 19 +-- pandas/core/series.py | 48 +++---- pandas/core/sorting.py | 9 +- pandas/core/sparse.py | 10 -- .../computation => core/sparse}/__init__.py | 0 pandas/core/sparse/api.py | 6 + pandas/{ => core}/sparse/array.py | 31 +++-- pandas/{ => core}/sparse/frame.py | 10 +- pandas/{ => core}/sparse/list.py | 6 +- pandas/{ => core}/sparse/scipy_sparse.py | 0 pandas/{ => core}/sparse/series.py | 20 +-- pandas/{ => core}/sparse/sparse.pyx | 0 .../{ => core}/sparse/sparse_op_helper.pxi.in | 0 pandas/core/strings.py | 21 +-- pandas/core/window.py | 30 ++-- pandas/formats/format.py | 29 ++-- pandas/formats/printing.py | 2 +- pandas/formats/style.py | 2 +- pandas/indexes/base.py | 45 +++--- pandas/indexes/category.py | 15 +- pandas/indexes/frozen.py | 2 +- pandas/indexes/interval.py | 27 ++-- pandas/indexes/multi.py | 15 +- pandas/indexes/numeric.py | 7 +- pandas/indexes/range.py | 7 +- pandas/io/common.py | 2 +- pandas/io/excel.py | 5 +- pandas/io/html.py | 2 +- pandas/io/json/json.py | 2 +- pandas/io/json/table_schema.py | 2 +- pandas/io/packers.py | 9 +- pandas/io/parsers.py | 15 +- pandas/io/parsers.pyx | 13 +- pandas/io/pickle.py | 2 +- pandas/io/pytables.py | 23 ++-- pandas/io/sql.py | 9 +- pandas/io/stata.py | 5 +- pandas/plotting/_converter.py | 12 +- pandas/plotting/_core.py | 11 +- pandas/plotting/_misc.py | 2 +- pandas/plotting/_style.py | 2 +- pandas/plotting/_tools.py | 2 +- pandas/sparse/api.py | 6 - pandas/stats/moments.py | 2 +- pandas/tests/api/test_api.py | 8 +- pandas/tests/api/test_lib.py | 10 -- pandas/tests/api/test_types.py | 15 +- pandas/tests/{sparse => core}/__init__.py | 0 .../{types => core/computation}/__init__.py | 0 .../{ => core}/computation/test_compat.py | 8 +- .../tests/{ => core}/computation/test_eval.py | 24 ++-- .../{types => tests/core/dtypes}/__init__.py | 0 .../tests/{types => core/dtypes}/test_cast.py | 21 +-- .../{types => core/dtypes}/test_common.py | 10 +- .../{types => core/dtypes}/test_concat.py | 2 +- .../{types => core/dtypes}/test_dtypes.py | 20 +-- .../{types => core/dtypes}/test_generic.py | 2 +- .../{types => core/dtypes}/test_inference.py | 35 +++-- .../tests/{types => core/dtypes}/test_io.py | 0 .../{types => core/dtypes}/test_missing.py | 7 +- pandas/tests/core/sparse/__init__.py | 0 pandas/tests/core/sparse/common.py | 0 .../{ => core}/sparse/test_arithmetics.py | 0 pandas/tests/{ => core}/sparse/test_array.py | 4 +- .../{ => core}/sparse/test_combine_concat.py | 0 pandas/tests/{ => core}/sparse/test_format.py | 0 pandas/tests/{ => core}/sparse/test_frame.py | 19 ++- .../tests/{ => core}/sparse/test_groupby.py | 0 .../tests/{ => core}/sparse/test_indexing.py | 0 .../tests/{ => core}/sparse/test_libsparse.py | 4 +- pandas/tests/{ => core}/sparse/test_list.py | 2 +- pandas/tests/{ => core}/sparse/test_pivot.py | 0 pandas/tests/{ => core}/sparse/test_series.py | 6 +- pandas/tests/formats/test_format.py | 29 ++-- pandas/tests/frame/test_alter_axes.py | 7 +- pandas/tests/frame/test_apply.py | 2 +- pandas/tests/frame/test_constructors.py | 2 +- pandas/tests/frame/test_dtypes.py | 2 +- pandas/tests/frame/test_indexing.py | 9 +- pandas/tests/frame/test_query_eval.py | 14 +- pandas/tests/groupby/test_bin_groupby.py | 2 +- pandas/tests/groupby/test_transform.py | 3 +- pandas/tests/indexes/common.py | 2 +- .../indexes/datetimes/test_construction.py | 15 ++ pandas/tests/indexes/datetimes/test_tools.py | 2 +- pandas/tests/indexing/common.py | 2 +- pandas/tests/indexing/test_indexing.py | 5 +- pandas/tests/indexing/test_ix.py | 2 +- .../tests/io/json/test_json_table_schema.py | 9 +- pandas/tests/io/parser/dtypes.py | 2 +- pandas/tests/io/test_feather.py | 2 + pandas/tests/io/test_sql.py | 5 +- pandas/tests/io/test_stata.py | 2 +- pandas/tests/plotting/common.py | 2 +- pandas/tests/plotting/test_frame.py | 2 +- pandas/tests/series/test_constructors.py | 4 +- pandas/tests/series/test_datetime_values.py | 2 +- pandas/tests/series/test_indexing.py | 2 +- pandas/tests/series/test_quantile.py | 2 +- pandas/tests/sparse/common.py | 10 -- pandas/tests/test_base.py | 5 +- pandas/tests/test_categorical.py | 9 +- pandas/tests/test_expressions.py | 2 +- pandas/tests/test_generic.py | 2 +- pandas/tests/test_internals.py | 2 +- pandas/tests/test_multilevel.py | 2 +- pandas/tests/test_nanops.py | 2 +- pandas/tests/test_panel.py | 2 +- pandas/tests/test_panel4d.py | 2 +- pandas/tests/tools/test_merge.py | 4 +- pandas/tests/tools/test_union_categoricals.py | 2 +- pandas/tests/tseries/test_resample.py | 2 +- pandas/tests/tseries/test_timezones.py | 2 +- pandas/tools/concat.py | 2 +- pandas/tools/hashing.py | 12 +- pandas/tools/merge.py | 37 ++--- pandas/tools/pivot.py | 2 +- pandas/tools/tile.py | 15 +- pandas/tools/util.py | 19 +-- pandas/tseries/base.py | 18 +-- pandas/tseries/common.py | 11 +- pandas/tseries/frequencies.py | 11 +- pandas/tseries/index.py | 37 ++--- pandas/tseries/offsets.py | 2 +- pandas/tseries/period.py | 31 +++-- pandas/tseries/tdi.py | 27 ++-- pandas/tseries/timedeltas.py | 11 +- pandas/tseries/tools.py | 28 ++-- pandas/tseries/util.py | 2 +- pandas/util/testing.py | 34 ++--- pandas/util/testing.pyx | 4 +- pandas/util/validators.py | 2 +- setup.py | 21 ++- vb_suite/binary_ops.py | 12 +- vb_suite/eval.py | 2 +- vb_suite/indexing.py | 4 +- vb_suite/sparse.py | 4 +- 186 files changed, 1021 insertions(+), 891 deletions(-) rename pandas/{ => core}/computation/__init__.py (100%) rename pandas/{ => core}/computation/align.py (98%) rename pandas/{ => core}/computation/api.py (74%) rename pandas/{ => core}/computation/common.py (100%) rename pandas/{ => core}/computation/engines.py (95%) rename pandas/{ => core}/computation/eval.py (97%) rename pandas/{ => core}/computation/expr.py (98%) rename pandas/{ => core}/computation/expressions.py (99%) rename pandas/{ => core}/computation/ops.py (98%) rename pandas/{ => core}/computation/pytables.py (98%) rename pandas/{ => core}/computation/scope.py (99%) rename pandas/{sparse => core/dtypes}/__init__.py (100%) rename pandas/{types => core/dtypes}/api.py (100%) rename pandas/{types => core/dtypes}/cast.py (100%) rename pandas/{types => core/dtypes}/common.py (100%) rename pandas/{types => core/dtypes}/concat.py (95%) rename pandas/{types => core/dtypes}/dtypes.py (97%) rename pandas/{types => core/dtypes}/generic.py (100%) rename pandas/{types => core/dtypes}/inference.py (100%) rename pandas/{types => core/dtypes}/missing.py (100%) delete mode 100644 pandas/core/sparse.py rename pandas/{tests/computation => core/sparse}/__init__.py (100%) create mode 100644 pandas/core/sparse/api.py rename pandas/{ => core}/sparse/array.py (97%) rename pandas/{ => core}/sparse/frame.py (99%) rename pandas/{ => core}/sparse/list.py (96%) rename pandas/{ => core}/sparse/scipy_sparse.py (100%) rename pandas/{ => core}/sparse/series.py (98%) rename pandas/{ => core}/sparse/sparse.pyx (100%) rename pandas/{ => core}/sparse/sparse_op_helper.pxi.in (100%) delete mode 100644 pandas/sparse/api.py delete mode 100644 pandas/tests/api/test_lib.py rename pandas/tests/{sparse => core}/__init__.py (100%) rename pandas/tests/{types => core/computation}/__init__.py (100%) rename pandas/tests/{ => core}/computation/test_compat.py (84%) rename pandas/tests/{ => core}/computation/test_eval.py (99%) rename pandas/{types => tests/core/dtypes}/__init__.py (100%) rename pandas/tests/{types => core/dtypes}/test_cast.py (95%) rename pandas/tests/{types => core/dtypes}/test_common.py (92%) rename pandas/tests/{types => core/dtypes}/test_concat.py (98%) rename pandas/tests/{types => core/dtypes}/test_dtypes.py (96%) rename pandas/tests/{types => core/dtypes}/test_generic.py (97%) rename pandas/tests/{types => core/dtypes}/test_inference.py (97%) rename pandas/tests/{types => core/dtypes}/test_io.py (100%) rename pandas/tests/{types => core/dtypes}/test_missing.py (98%) create mode 100644 pandas/tests/core/sparse/__init__.py create mode 100644 pandas/tests/core/sparse/common.py rename pandas/tests/{ => core}/sparse/test_arithmetics.py (100%) rename pandas/tests/{ => core}/sparse/test_array.py (99%) rename pandas/tests/{ => core}/sparse/test_combine_concat.py (100%) rename pandas/tests/{ => core}/sparse/test_format.py (100%) rename pandas/tests/{ => core}/sparse/test_frame.py (99%) rename pandas/tests/{ => core}/sparse/test_groupby.py (100%) rename pandas/tests/{ => core}/sparse/test_indexing.py (100%) rename pandas/tests/{ => core}/sparse/test_libsparse.py (99%) rename pandas/tests/{ => core}/sparse/test_list.py (98%) rename pandas/tests/{ => core}/sparse/test_pivot.py (100%) rename pandas/tests/{ => core}/sparse/test_series.py (99%) delete mode 100644 pandas/tests/sparse/common.py diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py index 72700c3de282e..cc869996b49cd 100644 --- a/asv_bench/benchmarks/binary_ops.py +++ b/asv_bench/benchmarks/binary_ops.py @@ -1,5 +1,5 @@ from .pandas_vb_common import * -import pandas.computation.expressions as expr +import pandas.core.computation.expressions as expr class Ops(object): diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 153107911ca2c..5b0dd126acdea 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -1,6 +1,6 @@ from .pandas_vb_common import * try: - from pandas.types.concat import union_categoricals + from pandas.core.dtypes.concat import union_categoricals except ImportError: pass diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py index a0819e33dc254..ee091e57c6403 100644 --- a/asv_bench/benchmarks/eval.py +++ b/asv_bench/benchmarks/eval.py @@ -1,6 +1,6 @@ from .pandas_vb_common import * import pandas as pd -import pandas.computation.expressions as expr +import pandas.core.computation.expressions as expr class Eval(object): diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index a32c9f25a0f09..79844414f2746 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -1,6 +1,6 @@ from .pandas_vb_common import * try: - import pandas.computation.expressions as expr + import pandas.core.computation.expressions as expr except: expr = None diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py index 717fe7218ceda..7d424592ed877 100644 --- a/asv_bench/benchmarks/sparse.py +++ b/asv_bench/benchmarks/sparse.py @@ -1,5 +1,5 @@ from .pandas_vb_common import * -import pandas.sparse.series +import pandas.core.sparse.series import scipy.sparse from pandas.core.sparse import SparseSeries, SparseDataFrame from pandas.core.sparse import SparseDataFrame @@ -37,7 +37,7 @@ def setup(self): self.A = scipy.sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(100, 100)) def time_sparse_series_from_coo(self): - self.ss = pandas.sparse.series.SparseSeries.from_coo(self.A) + self.ss = pandas.core.sparse.series.SparseSeries.from_coo(self.A) class sparse_series_to_coo(object): diff --git a/ci/install_travis.sh b/ci/install_travis.sh index f71df979c9df0..09668cbccc9d2 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -123,6 +123,7 @@ if [ "$BUILD_TEST" ]; then # build & install testing echo ["Starting installation test."] + rm -rf dist python setup.py clean python setup.py build_ext --inplace python setup.py sdist --formats=gztar diff --git a/ci/script_multi.sh b/ci/script_multi.sh index 88ecaf344a410..663d2feb5be23 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -19,7 +19,11 @@ export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 429496 echo PYTHONHASHSEED=$PYTHONHASHSEED if [ "$BUILD_TEST" ]; then + echo "build-test" cd /tmp + pwd + conda list pandas + echo "running" python -c "import pandas; pandas.test(['-n 2'])" elif [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index 411f973e9a71f..a508e84465107 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -673,7 +673,7 @@ will be the union of the categories being combined. .. ipython:: python - from pandas.types.concat import union_categoricals + from pandas.api.types import union_categoricals a = pd.Categorical(["b", "c"]) b = pd.Categorical(["a", "b"]) union_categoricals([a, b]) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 914995244fe5f..33d80f8347b0a 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -21,8 +21,11 @@ Highlights include: - Support for S3 handling now uses ``s3fs``, see :ref:`here ` - Google BigQuery support now uses the ``pandas-gbq`` library, see :ref:`here ` - Switched the test framework to use `pytest `__ (:issue:`13097`) -- The ``pandas.tools.plotting`` module has been deprecated, moved to ``pandas.plotting``. See :ref:`here ` +.. warning:: + + Pandas has changed the internal structure and layout of the codebase. + This can affect imports that are not from the top-level ``pandas.*`` namespace, please see the changes :ref:`here `. Check the :ref:`API Changes ` and :ref:`deprecations ` before updating. @@ -489,7 +492,7 @@ Other Enhancements - ``TimedeltaIndex`` now has a custom datetick formatter specifically designed for nanosecond level precision (:issue:`8711`) -- ``pd.types.concat.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs ` for more information. +- ``pd.api.types.union_categoricals`` gained the ``ignore_ordered`` argument to allow ignoring the ordered attribute of unioned categoricals (:issue:`13410`). See the :ref:`categorical union docs ` for more information. - ``DataFrame.to_latex()`` and ``DataFrame.to_string()`` now allow optional header aliases. (:issue:`15536`) - Re-enable the ``parse_dates`` keyword of ``pd.read_excel()`` to parse string columns as dates (:issue:`14326`) - Added ``.empty`` property to subclasses of ``Index``. (:issue:`15270`) @@ -558,31 +561,6 @@ Using ``.iloc``. Here we will get the location of the 'A' column, then use *posi df.iloc[[0, 2], df.columns.get_loc('A')] -.. _whatsnew_0200.api_breaking.deprecate_plotting - -Deprecate .plotting -^^^^^^^^^^^^^^^^^^^ - -The ``pandas.tools.plotting`` module has been deprecated, in favor of the top level ``pandas.plotting`` module. All the public plotting functions are now available -from ``pandas.plotting`` (:issue:`12548`). - -Furthermore, the top-level ``pandas.scatter_matrix`` and ``pandas.plot_params`` are deprecated. -Users can import these from ``pandas.plotting`` as well. - -Previous script: - -.. code-block:: python - - pd.tools.plotting.scatter_matrix(df) - pd.scatter_matrix(df) - -Should be changed to: - -.. code-block:: python - - pd.plotting.scatter_matrix(df) - - .. _whatsnew_0200.api_breaking.deprecate_panel: Deprecate Panel @@ -1026,34 +1004,6 @@ New Behavior: In [11]: index.memory_usage(deep=True) Out[11]: 260 -.. _whatsnew_0200.api_breaking.extensions: - -Extension Modules Moved -^^^^^^^^^^^^^^^^^^^^^^^ - -Some formerly public c/c++/cython extension modules have been moved and/or renamed. These are all removed from the public API. -If indicated, a deprecation warning will be issued if you reference that module. (:issue:`12588`) - -.. csv-table:: - :header: "Previous Location", "New Location", "Deprecated" - :widths: 30, 30, 4 - - "pandas.lib", "pandas._libs.lib", "X" - "pandas.tslib", "pandas._libs.tslib", "X" - "pandas._join", "pandas._libs.join", "" - "pandas._period", "pandas._libs.period", "" - "pandas.msgpack", "pandas.io.msgpack", "" - "pandas.index", "pandas._libs.index", "" - "pandas.algos", "pandas._libs.algos", "" - "pandas.hashtable", "pandas._libs.hashtable", "" - "pandas.json", "pandas.io.json.libjson", "X" - "pandas.parser", "pandas.io.libparsers", "X" - "pandas.io.sas.saslib", "pandas.io.sas.libsas", "" - "pandas._testing", "pandas.util.libtesting", "" - "pandas._sparse", "pandas.sparse.libsparse", "" - "pandas._hash", "pandas.tools.libhash", "" - "pandas._window", "pandas.core.libwindow", "" - .. _whatsnew_0200.api_breaking.sort_index: DataFrame.sort_index changes @@ -1354,10 +1304,74 @@ Other API Changes - ``DataFrame`` and ``Panel`` constructors with invalid input will now raise ``ValueError`` rather than ``pandas.core.common.PandasError``, if called with scalar inputs and not axes; The exception ``PandasError`` is removed as well. (:issue:`15541`) - The exception ``pandas.core.common.AmbiguousIndexError`` is removed as it is not referenced (:issue:`15541`) -.. _whatsnew_0200.develop: +.. _whatsnew_0200.privacy: + +Privacy Changes +~~~~~~~~~~~~~~~ + +.. _whatsnew_0200.privacy.extensions: + +Modules Privacy Has Changed +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Some formerly public python/c/c++/cython extension modules have been moved and/or renamed. These are all removed from the public API. +Furthermore, the ``pandas.core``, ``pandas.io``, and ``pandas.util`` top-level modules are now considered to be PRIVATE. +If indicated, a deprecation warning will be issued if you reference theses modules. (:issue:`12588`) + +.. csv-table:: + :header: "Previous Location", "New Location", "Deprecated" + :widths: 30, 30, 4 + + "pandas.lib", "pandas._libs.lib", "X" + "pandas.tslib", "pandas._libs.tslib", "X" + "pandas.computation", "pandas.core.computation", "" + "pandas.msgpack", "pandas.io.msgpack", "" + "pandas.index", "pandas._libs.index", "" + "pandas.algos", "pandas._libs.algos", "" + "pandas.hashtable", "pandas._libs.hashtable", "" + "pandas.json", "pandas.io.json.libjson", "X" + "pandas.parser", "pandas.io.libparsers", "X" + "pandas.sparse", "pandas.core.sparse", "" + "pandas.types", "pandas.core.dtypes", "" + "pandas.io.sas.saslib", "pandas.io.sas.libsas", "" + "pandas._join", "pandas._libs.join", "" + "pandas._hash", "pandas.tools.libhash", "" + "pandas._period", "pandas._libs.period", "" + "pandas._sparse", "pandas.core.sparse.libsparse", "" + "pandas._testing", "pandas.util.libtesting", "" + "pandas._window", "pandas.core.libwindow", "" + +- The function :func:`~pandas.api.type.union_categoricals` is now importable from ``pandas.api.types``, formerly from ``pandas.types.concat`` (:issue:`15998`) + +.. _whatsnew_0200.privacy.deprecate_plotting + +Deprecate .plotting +^^^^^^^^^^^^^^^^^^^ + +The ``pandas.tools.plotting`` module has been deprecated, in favor of the top level ``pandas.plotting`` module. All the public plotting functions are now available +from ``pandas.plotting`` (:issue:`12548`). + +Furthermore, the top-level ``pandas.scatter_matrix`` and ``pandas.plot_params`` are deprecated. +Users can import these from ``pandas.plotting`` as well. + +Previous script: + +.. code-block:: python + + pd.tools.plotting.scatter_matrix(df) + pd.scatter_matrix(df) + +Should be changed to: + +.. code-block:: python + + pd.plotting.scatter_matrix(df) + + +.. _whatsnew_0200.privacy.development: -Development Changes -~~~~~~~~~~~~~~~~~~~ +Other Developement Changes +^^^^^^^^^^^^^^^^^^^^^^^^^^ - Building pandas for development now requires ``cython >= 0.23`` (:issue:`14831`) - Require at least 0.23 version of cython to avoid problems with character encodings (:issue:`14699`) diff --git a/pandas/__init__.py b/pandas/__init__.py index bc38919f2c78c..4e1bcbd613965 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -40,10 +40,10 @@ import pandas.core.config_init from pandas.core.api import * -from pandas.sparse.api import * +from pandas.core.sparse.api import * from pandas.stats.api import * from pandas.tseries.api import * -from pandas.computation.api import * +from pandas.core.computation.api import * from pandas.tools.concat import concat from pandas.tools.merge import (merge, ordered_merge, diff --git a/pandas/api/types/__init__.py b/pandas/api/types/__init__.py index ee217543f0420..06fb5742ba067 100644 --- a/pandas/api/types/__init__.py +++ b/pandas/api/types/__init__.py @@ -1,4 +1,5 @@ """ public toolkit API """ -from pandas.types.api import * # noqa +from pandas.core.dtypes.api import * # noqa +from pandas.core.dtypes.concat import union_categoricals # noqa del np # noqa diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index 1dd22795533fc..d707ac66c4eab 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -22,7 +22,7 @@ from pandas.util.validators import (validate_args, validate_kwargs, validate_args_and_kwargs) from pandas.errors import UnsupportedFunctionCall -from pandas.types.common import is_integer, is_bool +from pandas.core.dtypes.common import is_integer, is_bool from pandas.compat import OrderedDict diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 5b4fcad252192..e977fdc3a267d 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -67,16 +67,24 @@ def load_reduce(self): ('pandas.core.series', 'TimeSeries'): ('pandas.core.series', 'Series'), ('pandas.sparse.series', 'SparseTimeSeries'): - ('pandas.sparse.series', 'SparseSeries'), + ('pandas.core.sparse.series', 'SparseSeries'), # 12588, extensions moving ('pandas._sparse', 'BlockIndex'): - ('pandas.sparse.libsparse', 'BlockIndex'), + ('pandas.core.sparse.libsparse', 'BlockIndex'), ('pandas.tslib', 'Timestamp'): ('pandas._libs.tslib', 'Timestamp'), ('pandas.tslib', '__nat_unpickle'): ('pandas._libs.tslib', '__nat_unpickle'), - ('pandas._period', 'Period'): ('pandas._libs.period', 'Period') + ('pandas._period', 'Period'): ('pandas._libs.period', 'Period'), + + # 15998 top-level dirs moving + ('pandas.sparse.array', 'SparseArray'): + ('pandas.core.sparse.array', 'SparseArray'), + ('pandas.sparse.series', 'SparseSeries'): + ('pandas.core.sparse.series', 'SparseSeries'), + ('pandas.sparse.frame', 'SparseDataFrame'): + ('pandas.core.sparse.frame', 'SparseDataFrame') } diff --git a/pandas/conftest.py b/pandas/conftest.py index e0a15f740688b..caced6a0c568e 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -2,6 +2,7 @@ import numpy import pandas +import pandas.util.testing as tm def pytest_addoption(parser): @@ -30,3 +31,10 @@ def pytest_runtest_setup(item): def add_imports(doctest_namespace): doctest_namespace['np'] = numpy doctest_namespace['pd'] = pandas + + +@pytest.fixture(params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']) +def spmatrix(request): + tm._skip_if_no_scipy() + from scipy import sparse + return getattr(sparse, request.param + '_matrix') diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 5d2db864dd48e..6df7fce631a3c 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -7,10 +7,11 @@ import numpy as np from pandas import compat, _np_version_under1p8 -from pandas.types.cast import maybe_promote -from pandas.types.generic import (ABCSeries, ABCIndex, - ABCIndexClass, ABCCategorical) -from pandas.types.common import ( +from pandas.core.dtypes.cast import maybe_promote +from pandas.core.dtypes.generic import ( + ABCSeries, ABCIndex, + ABCIndexClass, ABCCategorical) +from pandas.core.dtypes.common import ( is_unsigned_integer_dtype, is_signed_integer_dtype, is_integer_dtype, is_complex_dtype, is_categorical_dtype, is_sparse, @@ -25,9 +26,9 @@ _ensure_float64, _ensure_uint64, _ensure_int64) from pandas.compat.numpy import _np_version_under1p10 -from pandas.types.missing import isnull +from pandas.core.dtypes.missing import isnull -import pandas.core.common as com +from pandas.core import common as com from pandas.compat import string_types from pandas._libs import algos, lib, hashtable as htable from pandas._libs.tslib import iNaT diff --git a/pandas/core/api.py b/pandas/core/api.py index ea5be17ef3aaf..8e8969e1f6b26 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -5,7 +5,7 @@ import numpy as np from pandas.core.algorithms import factorize, unique, value_counts -from pandas.types.missing import isnull, notnull +from pandas.core.dtypes.missing import isnull, notnull from pandas.core.categorical import Categorical from pandas.core.groupby import Grouper from pandas.formats.format import set_eng_float_format diff --git a/pandas/core/base.py b/pandas/core/base.py index 33c95197debdc..e30751a6582f9 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -6,9 +6,9 @@ from pandas.compat import builtins import numpy as np -from pandas.types.missing import isnull -from pandas.types.generic import ABCDataFrame, ABCSeries, ABCIndexClass -from pandas.types.common import is_object_dtype, is_list_like, is_scalar +from pandas.core.dtypes.missing import isnull +from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCIndexClass +from pandas.core.dtypes.common import is_object_dtype, is_list_like, is_scalar from pandas.util.validators import validate_bool_kwarg from pandas.core import common as com @@ -725,7 +725,7 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): # we are concatting non-NDFrame objects, # e.g. a list of scalars - from pandas.types.cast import is_nested_object + from pandas.core.dtypes.cast import is_nested_object from pandas import Series result = Series(results, index=keys, name=self.name) if is_nested_object(result): diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 906e8efafe4af..d1f060113cf1d 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -8,21 +8,24 @@ from pandas.compat import u, lzip from pandas._libs import lib, algos as libalgos -from pandas.types.generic import ABCSeries, ABCIndexClass, ABCCategoricalIndex -from pandas.types.missing import isnull, notnull -from pandas.types.cast import (maybe_infer_to_datetimelike, - coerce_indexer_dtype) -from pandas.types.dtypes import CategoricalDtype -from pandas.types.common import (_ensure_int64, - _ensure_object, - _ensure_platform_int, - is_dtype_equal, - is_datetimelike, - is_categorical, - is_categorical_dtype, - is_integer_dtype, is_bool, - is_list_like, is_sequence, - is_scalar) +from pandas.core.dtypes.generic import ( + ABCSeries, ABCIndexClass, ABCCategoricalIndex) +from pandas.core.dtypes.missing import isnull, notnull +from pandas.core.dtypes.cast import ( + maybe_infer_to_datetimelike, + coerce_indexer_dtype) +from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.common import ( + _ensure_int64, + _ensure_object, + _ensure_platform_int, + is_dtype_equal, + is_datetimelike, + is_categorical, + is_categorical_dtype, + is_integer_dtype, is_bool, + is_list_like, is_sequence, + is_scalar) from pandas.core.common import is_null_slice from pandas.core.algorithms import factorize, take_1d, unique1d @@ -1215,7 +1218,7 @@ def value_counts(self, dropna=True): """ from numpy import bincount - from pandas.types.missing import isnull + from pandas.core.dtypes.missing import isnull from pandas.core.series import Series from pandas.core.index import CategoricalIndex diff --git a/pandas/core/common.py b/pandas/core/common.py index bf4acf1fbf257..39a5da0aa6912 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -13,12 +13,12 @@ from pandas import compat from pandas.compat import long, zip, iteritems from pandas.core.config import get_option -from pandas.types.generic import ABCSeries -from pandas.types.common import _NS_DTYPE -from pandas.types.inference import _iterable_not_string -from pandas.types.missing import isnull +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.common import _NS_DTYPE +from pandas.core.dtypes.inference import _iterable_not_string +from pandas.core.dtypes.missing import isnull from pandas.api import types -from pandas.types import common +from pandas.core.dtypes import common # compat from pandas.errors import ( # noqa @@ -60,7 +60,7 @@ def wrapper(*args, **kwargs): warnings.warn("pandas.core.common.{t} is deprecated. " "These are not longer public API functions, " "but can be imported from " - "pandas.types.common.{t} instead".format(t=t), + "pandas.api.types.{t} instead".format(t=t), DeprecationWarning, stacklevel=3) return getattr(common, t)(*args, **kwargs) return wrapper @@ -73,7 +73,7 @@ def wrapper(*args, **kwargs): def array_equivalent(*args, **kwargs): warnings.warn("'pandas.core.common.array_equivalent' is deprecated and " "is no longer public API", DeprecationWarning, stacklevel=2) - from pandas.types import missing + from pandas.core.dtypes import missing return missing.array_equivalent(*args, **kwargs) diff --git a/pandas/computation/__init__.py b/pandas/core/computation/__init__.py similarity index 100% rename from pandas/computation/__init__.py rename to pandas/core/computation/__init__.py diff --git a/pandas/computation/align.py b/pandas/core/computation/align.py similarity index 98% rename from pandas/computation/align.py rename to pandas/core/computation/align.py index b4c80f4d493af..1c75301082297 100644 --- a/pandas/computation/align.py +++ b/pandas/core/computation/align.py @@ -11,7 +11,7 @@ from pandas import compat from pandas.errors import PerformanceWarning from pandas.core.common import flatten -from pandas.computation.common import _result_type_many +from pandas.core.computation.common import _result_type_many def _align_core_single_unary_op(term): diff --git a/pandas/computation/api.py b/pandas/core/computation/api.py similarity index 74% rename from pandas/computation/api.py rename to pandas/core/computation/api.py index fe3dad015048e..a6fe5aae822df 100644 --- a/pandas/computation/api.py +++ b/pandas/core/computation/api.py @@ -1,6 +1,6 @@ # flake8: noqa -from pandas.computation.eval import eval +from pandas.core.computation.eval import eval # deprecation, xref #13790 @@ -10,5 +10,5 @@ def Expr(*args, **kwargs): warnings.warn("pd.Expr is deprecated as it is not " "applicable to user code", FutureWarning, stacklevel=2) - from pandas.computation.expr import Expr + from pandas.core.computation.expr import Expr return Expr(*args, **kwargs) diff --git a/pandas/computation/common.py b/pandas/core/computation/common.py similarity index 100% rename from pandas/computation/common.py rename to pandas/core/computation/common.py diff --git a/pandas/computation/engines.py b/pandas/core/computation/engines.py similarity index 95% rename from pandas/computation/engines.py rename to pandas/core/computation/engines.py index aebc5bb02d59d..675a3d5eca792 100644 --- a/pandas/computation/engines.py +++ b/pandas/core/computation/engines.py @@ -7,9 +7,10 @@ from pandas import compat from pandas.compat import map import pandas.formats.printing as printing -from pandas.computation.align import _align, _reconstruct_object -from pandas.computation.ops import (UndefinedVariableError, - _mathops, _reductions) +from pandas.core.computation.align import _align, _reconstruct_object +from pandas.core.computation.ops import ( + UndefinedVariableError, + _mathops, _reductions) _ne_builtins = frozenset(_mathops + _reductions) diff --git a/pandas/computation/eval.py b/pandas/core/computation/eval.py similarity index 97% rename from pandas/computation/eval.py rename to pandas/core/computation/eval.py index 5b21c753a71da..fc3986e317d13 100644 --- a/pandas/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -6,11 +6,11 @@ import warnings import tokenize from pandas.formats.printing import pprint_thing -from pandas.computation import _NUMEXPR_INSTALLED -from pandas.computation.expr import Expr, _parsers, tokenize_string -from pandas.computation.scope import _ensure_scope +from pandas.core.computation import _NUMEXPR_INSTALLED +from pandas.core.computation.expr import Expr, _parsers, tokenize_string +from pandas.core.computation.scope import _ensure_scope from pandas.compat import string_types -from pandas.computation.engines import _engines +from pandas.core.computation.engines import _engines from pandas.util.validators import validate_bool_kwarg diff --git a/pandas/computation/expr.py b/pandas/core/computation/expr.py similarity index 98% rename from pandas/computation/expr.py rename to pandas/core/computation/expr.py index e78806b38c667..01c5d1f6f100c 100644 --- a/pandas/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -14,12 +14,13 @@ from pandas.core import common as com import pandas.formats.printing as printing from pandas.tools.util import compose -from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms, - _arith_ops_syms, _unary_ops_syms, is_term) -from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG -from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div -from pandas.computation.ops import UndefinedVariableError, FuncNode -from pandas.computation.scope import Scope +from pandas.core.computation.ops import ( + _cmp_ops_syms, _bool_ops_syms, + _arith_ops_syms, _unary_ops_syms, is_term) +from pandas.core.computation.ops import _reductions, _mathops, _LOCAL_TAG +from pandas.core.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div +from pandas.core.computation.ops import UndefinedVariableError, FuncNode +from pandas.core.computation.scope import Scope def tokenize_string(source): diff --git a/pandas/computation/expressions.py b/pandas/core/computation/expressions.py similarity index 99% rename from pandas/computation/expressions.py rename to pandas/core/computation/expressions.py index 8fd9ab3477b74..4eeefb183001e 100644 --- a/pandas/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -9,7 +9,7 @@ import warnings import numpy as np from pandas.core.common import _values_from_object -from pandas.computation import _NUMEXPR_INSTALLED +from pandas.core.computation import _NUMEXPR_INSTALLED if _NUMEXPR_INSTALLED: import numexpr as ne diff --git a/pandas/computation/ops.py b/pandas/core/computation/ops.py similarity index 98% rename from pandas/computation/ops.py rename to pandas/core/computation/ops.py index 6ba2a21940d55..91c414bbc0ec1 100644 --- a/pandas/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -7,14 +7,14 @@ import numpy as np -from pandas.types.common import is_list_like, is_scalar +from pandas.core.dtypes.common import is_list_like, is_scalar import pandas as pd from pandas.compat import PY3, string_types, text_type import pandas.core.common as com from pandas.formats.printing import pprint_thing, pprint_thing_encoded from pandas.core.base import StringMixin -from pandas.computation.common import _ensure_decoded, _result_type_many -from pandas.computation.scope import _DEFAULT_GLOBALS +from pandas.core.computation.common import _ensure_decoded, _result_type_many +from pandas.core.computation.scope import _DEFAULT_GLOBALS _reductions = 'sum', 'prod' diff --git a/pandas/computation/pytables.py b/pandas/core/computation/pytables.py similarity index 98% rename from pandas/computation/pytables.py rename to pandas/core/computation/pytables.py index 2a5056963fe8d..8d0f23e28c0a2 100644 --- a/pandas/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -5,15 +5,15 @@ import numpy as np import pandas as pd -from pandas.types.common import is_list_like +from pandas.core.dtypes.common import is_list_like import pandas.core.common as com from pandas.compat import u, string_types, DeepChainMap from pandas.core.base import StringMixin from pandas.formats.printing import pprint_thing, pprint_thing_encoded -from pandas.computation import expr, ops -from pandas.computation.ops import is_term, UndefinedVariableError -from pandas.computation.expr import BaseExprVisitor -from pandas.computation.common import _ensure_decoded +from pandas.core.computation import expr, ops +from pandas.core.computation.ops import is_term, UndefinedVariableError +from pandas.core.computation.expr import BaseExprVisitor +from pandas.core.computation.common import _ensure_decoded from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type diff --git a/pandas/computation/scope.py b/pandas/core/computation/scope.py similarity index 99% rename from pandas/computation/scope.py rename to pandas/core/computation/scope.py index 9ade755e0ff12..5a589473f64b7 100644 --- a/pandas/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -15,7 +15,7 @@ import pandas as pd # noqa from pandas.compat import DeepChainMap, map, StringIO from pandas.core.base import StringMixin -import pandas.computation as compu +import pandas.core.computation as compu def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(), diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index cf2a653638e90..7307980c8312e 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -415,7 +415,7 @@ def mpl_style_cb(key): def use_inf_as_null_cb(key): - from pandas.types.missing import _use_inf_as_null + from pandas.core.dtypes.missing import _use_inf_as_null _use_inf_as_null(key) diff --git a/pandas/sparse/__init__.py b/pandas/core/dtypes/__init__.py similarity index 100% rename from pandas/sparse/__init__.py rename to pandas/core/dtypes/__init__.py diff --git a/pandas/types/api.py b/pandas/core/dtypes/api.py similarity index 100% rename from pandas/types/api.py rename to pandas/core/dtypes/api.py diff --git a/pandas/types/cast.py b/pandas/core/dtypes/cast.py similarity index 100% rename from pandas/types/cast.py rename to pandas/core/dtypes/cast.py diff --git a/pandas/types/common.py b/pandas/core/dtypes/common.py similarity index 100% rename from pandas/types/common.py rename to pandas/core/dtypes/common.py diff --git a/pandas/types/concat.py b/pandas/core/dtypes/concat.py similarity index 95% rename from pandas/types/concat.py rename to pandas/core/dtypes/concat.py index b098bbb75d984..ddff78c9d511f 100644 --- a/pandas/types/concat.py +++ b/pandas/core/dtypes/concat.py @@ -5,20 +5,21 @@ import numpy as np import pandas._libs.tslib as tslib from pandas import compat -from pandas.core.algorithms import take_1d -from .common import (is_categorical_dtype, - is_sparse, - is_datetimetz, - is_datetime64_dtype, - is_timedelta64_dtype, - is_period_dtype, - is_object_dtype, - is_bool_dtype, - is_dtype_equal, - _NS_DTYPE, - _TD_DTYPE) -from pandas.types.generic import (ABCDatetimeIndex, ABCTimedeltaIndex, - ABCPeriodIndex) +from pandas.core.dtypes.common import ( + is_categorical_dtype, + is_sparse, + is_datetimetz, + is_datetime64_dtype, + is_timedelta64_dtype, + is_period_dtype, + is_object_dtype, + is_bool_dtype, + is_dtype_equal, + _NS_DTYPE, + _TD_DTYPE) +from pandas.core.dtypes.generic import ( + ABCDatetimeIndex, ABCTimedeltaIndex, + ABCPeriodIndex) def get_dtype_kinds(l): @@ -68,7 +69,7 @@ def _get_series_result_type(result): if isinstance(result, dict): # concat Series with axis 1 if all(is_sparse(c) for c in compat.itervalues(result)): - from pandas.sparse.api import SparseDataFrame + from pandas.core.sparse.api import SparseDataFrame return SparseDataFrame else: from pandas.core.frame import DataFrame @@ -76,7 +77,7 @@ def _get_series_result_type(result): elif is_sparse(result): # concat Series with axis 1 - from pandas.sparse.api import SparseSeries + from pandas.core.sparse.api import SparseSeries return SparseSeries else: from pandas.core.series import Series @@ -90,7 +91,7 @@ def _get_frame_result_type(result, objs): otherwise, return 1st obj """ if any(b.is_sparse for b in result.blocks): - from pandas.sparse.api import SparseDataFrame + from pandas.core.sparse.api import SparseDataFrame return SparseDataFrame else: return objs[0] @@ -276,6 +277,8 @@ def _maybe_unwrap(x): if sort_categories and not categories.is_monotonic_increasing: categories = categories.sort_values() indexer = categories.get_indexer(first.categories) + + from pandas.core.algorithms import take_1d new_codes = take_1d(indexer, new_codes, fill_value=-1) elif ignore_order or all(not c.ordered for c in to_union): # different categories - union and recode @@ -288,6 +291,8 @@ def _maybe_unwrap(x): for c in to_union: if len(c.categories) > 0: indexer = categories.get_indexer(c.categories) + + from pandas.core.algorithms import take_1d new_codes.append(take_1d(indexer, c.codes, fill_value=-1)) else: # must be all NaN @@ -433,7 +438,7 @@ def _concat_sparse(to_concat, axis=0, typs=None): a single array, preserving the combined dtypes """ - from pandas.sparse.array import SparseArray, _make_index + from pandas.core.sparse.array import SparseArray, _make_index def convert_sparse(x, axis): # coerce to native type diff --git a/pandas/types/dtypes.py b/pandas/core/dtypes/dtypes.py similarity index 97% rename from pandas/types/dtypes.py rename to pandas/core/dtypes/dtypes.py index 7913950a597c9..59c23addd418e 100644 --- a/pandas/types/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -209,8 +209,15 @@ def __new__(cls, unit=None, tz=None): raise ValueError("DatetimeTZDtype constructor must have a tz " "supplied") + # hash with the actual tz if we can + # some cannot be hashed, so stringfy + try: + key = (unit, tz) + hash(key) + except TypeError: + key = (unit, str(tz)) + # set/retrieve from cache - key = (unit, str(tz)) try: return cls._cache[key] except KeyError: @@ -410,7 +417,7 @@ def __new__(cls, subtype=None): if m is not None: subtype = m.group('subtype') - from pandas.types.common import pandas_dtype + from pandas.core.dtypes.common import pandas_dtype try: subtype = pandas_dtype(subtype) except TypeError: diff --git a/pandas/types/generic.py b/pandas/core/dtypes/generic.py similarity index 100% rename from pandas/types/generic.py rename to pandas/core/dtypes/generic.py diff --git a/pandas/types/inference.py b/pandas/core/dtypes/inference.py similarity index 100% rename from pandas/types/inference.py rename to pandas/core/dtypes/inference.py diff --git a/pandas/types/missing.py b/pandas/core/dtypes/missing.py similarity index 100% rename from pandas/types/missing.py rename to pandas/core/dtypes/missing.py diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a5256868ce419..3a5a0e7044e79 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -23,38 +23,40 @@ import numpy as np import numpy.ma as ma -from pandas.types.cast import (maybe_upcast, infer_dtype_from_scalar, - maybe_cast_to_datetime, - maybe_infer_to_datetimelike, - maybe_convert_platform, - maybe_downcast_to_dtype, - invalidate_string_dtypes, - coerce_to_dtypes, - maybe_upcast_putmask, - find_common_type) -from pandas.types.common import (is_categorical_dtype, - is_object_dtype, - is_extension_type, - is_datetimetz, - is_datetime64_any_dtype, - is_datetime64tz_dtype, - is_bool_dtype, - is_integer_dtype, - is_float_dtype, - is_integer, - is_scalar, - is_dtype_equal, - needs_i8_conversion, - _get_dtype_from_object, - _ensure_float, - _ensure_float64, - _ensure_int64, - _ensure_platform_int, - is_list_like, - is_iterator, - is_sequence, - is_named_tuple) -from pandas.types.missing import isnull, notnull +from pandas.core.dtypes.cast import ( + maybe_upcast, infer_dtype_from_scalar, + maybe_cast_to_datetime, + maybe_infer_to_datetimelike, + maybe_convert_platform, + maybe_downcast_to_dtype, + invalidate_string_dtypes, + coerce_to_dtypes, + maybe_upcast_putmask, + find_common_type) +from pandas.core.dtypes.common import ( + is_categorical_dtype, + is_object_dtype, + is_extension_type, + is_datetimetz, + is_datetime64_any_dtype, + is_datetime64tz_dtype, + is_bool_dtype, + is_integer_dtype, + is_float_dtype, + is_integer, + is_scalar, + is_dtype_equal, + needs_i8_conversion, + _get_dtype_from_object, + _ensure_float, + _ensure_float64, + _ensure_int64, + _ensure_platform_int, + is_list_like, + is_iterator, + is_sequence, + is_named_tuple) +from pandas.core.dtypes.missing import isnull, notnull from pandas.core.common import (_try_sort, _default_index, @@ -70,9 +72,9 @@ create_block_manager_from_blocks) from pandas.core.series import Series from pandas.core.categorical import Categorical -import pandas.computation.expressions as expressions +import pandas.core.computation.expressions as expressions import pandas.core.algorithms as algorithms -from pandas.computation.eval import eval as _eval +from pandas.core.computation.eval import eval as _eval from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u, OrderedDict, raise_with_traceback) from pandas import compat @@ -1269,7 +1271,7 @@ def to_sparse(self, fill_value=None, kind='block'): ------- y : SparseDataFrame """ - from pandas.core.sparse import SparseDataFrame + from pandas.core.sparse.frame import SparseDataFrame return SparseDataFrame(self._series, index=self.index, columns=self.columns, default_kind=kind, default_fill_value=fill_value) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 316c9f5e2ccd8..167af8dfc0d8e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10,22 +10,23 @@ import pandas as pd from pandas._libs import tslib, lib -from pandas.types.common import (_coerce_to_dtype, - _ensure_int64, - needs_i8_conversion, - is_scalar, - is_integer, is_bool, - is_bool_dtype, - is_numeric_dtype, - is_datetime64_dtype, - is_timedelta64_dtype, - is_datetime64tz_dtype, - is_list_like, - is_dict_like, - is_re_compilable) -from pandas.types.cast import maybe_promote, maybe_upcast_putmask -from pandas.types.missing import isnull, notnull -from pandas.types.generic import ABCSeries, ABCPanel +from pandas.core.dtypes.common import ( + _coerce_to_dtype, + _ensure_int64, + needs_i8_conversion, + is_scalar, + is_integer, is_bool, + is_bool_dtype, + is_numeric_dtype, + is_datetime64_dtype, + is_timedelta64_dtype, + is_datetime64tz_dtype, + is_list_like, + is_dict_like, + is_re_compilable) +from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask +from pandas.core.dtypes.missing import isnull, notnull +from pandas.core.dtypes.generic import ABCSeries, ABCPanel from pandas.core.common import (_values_from_object, _maybe_box_datetimelike, diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 27e256a8eb572..2cbcb9ef6efec 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -15,26 +15,27 @@ from pandas.compat.numpy import function as nv, _np_version_under1p8 from pandas.compat import set_function_name -from pandas.types.common import (is_numeric_dtype, - is_timedelta64_dtype, is_datetime64_dtype, - is_categorical_dtype, - is_interval_dtype, - is_datetimelike, - is_datetime64_any_dtype, - is_bool, is_integer_dtype, - is_complex_dtype, - is_bool_dtype, - is_scalar, - is_list_like, - needs_i8_conversion, - _ensure_float64, - _ensure_platform_int, - _ensure_int64, - _ensure_object, - _ensure_categorical, - _ensure_float) -from pandas.types.cast import maybe_downcast_to_dtype -from pandas.types.missing import isnull, notnull, _maybe_fill +from pandas.core.dtypes.common import ( + is_numeric_dtype, + is_timedelta64_dtype, is_datetime64_dtype, + is_categorical_dtype, + is_interval_dtype, + is_datetimelike, + is_datetime64_any_dtype, + is_bool, is_integer_dtype, + is_complex_dtype, + is_bool_dtype, + is_scalar, + is_list_like, + needs_i8_conversion, + _ensure_float64, + _ensure_platform_int, + _ensure_int64, + _ensure_object, + _ensure_categorical, + _ensure_float) +from pandas.core.dtypes.cast import maybe_downcast_to_dtype +from pandas.core.dtypes.missing import isnull, notnull, _maybe_fill from pandas.core.common import (_values_from_object, AbstractMethodError, _default_index) @@ -4079,7 +4080,7 @@ def _apply_to_column_groupbys(self, func): def count(self): """ Compute count of group, excluding missing values """ from functools import partial - from pandas.types.missing import _isnull_ndarraylike as isnull + from pandas.core.dtypes.missing import _isnull_ndarraylike as isnull data, _ = self._get_data_to_aggregate() ids, _, ngroups = self.grouper.group_info diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index dd8fa2d3ddc81..a01e3dc46dfe9 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -4,17 +4,18 @@ import numpy as np from pandas.compat import range, zip import pandas.compat as compat -from pandas.types.generic import ABCDataFrame, ABCPanel, ABCSeries -from pandas.types.common import (is_integer_dtype, - is_integer, is_float, - is_list_like, - is_sequence, - is_iterator, - is_scalar, - is_sparse, - _is_unorderable_exception, - _ensure_platform_int) -from pandas.types.missing import isnull, _infer_fill_value +from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_integer, is_float, + is_list_like, + is_sequence, + is_iterator, + is_scalar, + is_sparse, + _is_unorderable_exception, + _ensure_platform_int) +from pandas.core.dtypes.missing import isnull, _infer_fill_value from pandas.core.index import Index, MultiIndex diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 57361886eab8c..f7d7efd66f8db 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -9,41 +9,45 @@ from pandas.core.base import PandasObject -from pandas.types.dtypes import (ExtensionDtype, DatetimeTZDtype, - CategoricalDtype) -from pandas.types.common import (_TD_DTYPE, _NS_DTYPE, - _ensure_int64, _ensure_platform_int, - is_integer, - is_dtype_equal, - is_timedelta64_dtype, - is_datetime64_dtype, is_datetimetz, is_sparse, - is_categorical, is_categorical_dtype, - is_integer_dtype, - is_datetime64tz_dtype, - is_object_dtype, - is_datetimelike_v_numeric, - is_float_dtype, is_numeric_dtype, - is_numeric_v_string_like, is_extension_type, - is_list_like, - is_re, - is_re_compilable, - is_scalar, - _get_dtype) -from pandas.types.cast import (maybe_downcast_to_dtype, - maybe_convert_string_to_object, - maybe_upcast, - maybe_convert_scalar, maybe_promote, - infer_dtype_from_scalar, - soft_convert_objects, - maybe_convert_objects, - astype_nansafe, - find_common_type) -from pandas.types.missing import (isnull, array_equivalent, - _is_na_compat, - is_null_datelike_scalar) -import pandas.types.concat as _concat - -from pandas.types.generic import ABCSeries +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, DatetimeTZDtype, + CategoricalDtype) +from pandas.core.dtypes.common import ( + _TD_DTYPE, _NS_DTYPE, + _ensure_int64, _ensure_platform_int, + is_integer, + is_dtype_equal, + is_timedelta64_dtype, + is_datetime64_dtype, is_datetimetz, is_sparse, + is_categorical, is_categorical_dtype, + is_integer_dtype, + is_datetime64tz_dtype, + is_object_dtype, + is_datetimelike_v_numeric, + is_float_dtype, is_numeric_dtype, + is_numeric_v_string_like, is_extension_type, + is_list_like, + is_re, + is_re_compilable, + is_scalar, + _get_dtype) +from pandas.core.dtypes.cast import ( + maybe_downcast_to_dtype, + maybe_convert_string_to_object, + maybe_upcast, + maybe_convert_scalar, maybe_promote, + infer_dtype_from_scalar, + soft_convert_objects, + maybe_convert_objects, + astype_nansafe, + find_common_type) +from pandas.core.dtypes.missing import ( + isnull, array_equivalent, + _is_na_compat, + is_null_datelike_scalar) +import pandas.core.dtypes.concat as _concat + +from pandas.core.dtypes.generic import ABCSeries from pandas.core.common import is_null_slice import pandas.core.algorithms as algos @@ -54,12 +58,12 @@ from pandas.formats.printing import pprint_thing import pandas.core.missing as missing -from pandas.sparse.array import _maybe_to_sparse, SparseArray +from pandas.core.sparse.array import _maybe_to_sparse, SparseArray from pandas._libs import lib, tslib from pandas._libs.tslib import Timedelta from pandas._libs.lib import BlockPlacement -import pandas.computation.expressions as expressions +import pandas.core.computation.expressions as expressions from pandas.util.decorators import cache_readonly from pandas.util.validators import validate_bool_kwarg diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 91039f3270af2..3010348423340 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -8,18 +8,19 @@ from pandas._libs import algos, lib from pandas.compat import range, string_types -from pandas.types.common import (is_numeric_v_string_like, - is_float_dtype, - is_datetime64_dtype, - is_datetime64tz_dtype, - is_integer_dtype, - is_scalar, - is_integer, - needs_i8_conversion, - _ensure_float64) - -from pandas.types.cast import infer_dtype_from_array -from pandas.types.missing import isnull +from pandas.core.dtypes.common import ( + is_numeric_v_string_like, + is_float_dtype, + is_datetime64_dtype, + is_datetime64tz_dtype, + is_integer_dtype, + is_scalar, + is_integer, + needs_i8_conversion, + _ensure_float64) + +from pandas.core.dtypes.cast import infer_dtype_from_array +from pandas.core.dtypes.missing import isnull def mask_missing(arr, values_to_mask): diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 6ec94e69740a2..5ce302967de24 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -11,17 +11,18 @@ from pandas import compat from pandas._libs import tslib, algos, lib -from pandas.types.common import (_get_dtype, - is_float, is_scalar, - is_integer, is_complex, is_float_dtype, - is_complex_dtype, is_integer_dtype, - is_bool_dtype, is_object_dtype, - is_numeric_dtype, - is_datetime64_dtype, is_timedelta64_dtype, - is_datetime_or_timedelta_dtype, - is_int_or_datetime_dtype, is_any_int_dtype) -from pandas.types.cast import _int64_max, maybe_upcast_putmask -from pandas.types.missing import isnull, notnull +from pandas.core.dtypes.common import ( + _get_dtype, + is_float, is_scalar, + is_integer, is_complex, is_float_dtype, + is_complex_dtype, is_integer_dtype, + is_bool_dtype, is_object_dtype, + is_numeric_dtype, + is_datetime64_dtype, is_timedelta64_dtype, + is_datetime_or_timedelta_dtype, + is_int_or_datetime_dtype, is_any_int_dtype) +from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask +from pandas.core.dtypes.missing import isnull, notnull from pandas.core.common import _values_from_object diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 9e777fd94de66..50815498f40df 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -16,25 +16,26 @@ from pandas import compat from pandas.util.decorators import Appender -import pandas.computation.expressions as expressions +import pandas.core.computation.expressions as expressions from pandas.compat import bind_method import pandas.core.missing as missing from pandas.errors import PerformanceWarning from pandas.core.common import _values_from_object, _maybe_match_name -from pandas.types.missing import notnull, isnull -from pandas.types.common import (needs_i8_conversion, - is_datetimelike_v_numeric, - is_integer_dtype, is_categorical_dtype, - is_object_dtype, is_timedelta64_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, - is_bool_dtype, is_datetimetz, - is_list_like, - is_scalar, - _ensure_object) -from pandas.types.cast import maybe_upcast_putmask, find_common_type -from pandas.types.generic import ABCSeries, ABCIndex, ABCPeriodIndex +from pandas.core.dtypes.missing import notnull, isnull +from pandas.core.dtypes.common import ( + needs_i8_conversion, + is_datetimelike_v_numeric, + is_integer_dtype, is_categorical_dtype, + is_object_dtype, is_timedelta64_dtype, + is_datetime64_dtype, is_datetime64tz_dtype, + is_bool_dtype, is_datetimetz, + is_list_like, + is_scalar, + _ensure_object) +from pandas.core.dtypes.cast import maybe_upcast_putmask, find_common_type +from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCPeriodIndex # ----------------------------------------------------------------------------- # Functions that add arithmetic methods to objects, given arithmetic factory diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 24f4d219fb9ca..76053b3bdb83d 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -6,13 +6,15 @@ import numpy as np import warnings -from pandas.types.cast import (infer_dtype_from_scalar, - maybe_cast_item) -from pandas.types.common import (is_integer, is_list_like, - is_string_like, is_scalar) -from pandas.types.missing import notnull - -import pandas.computation.expressions as expressions +from pandas.core.dtypes.cast import ( + infer_dtype_from_scalar, + maybe_cast_item) +from pandas.core.dtypes.common import ( + is_integer, is_list_like, + is_string_like, is_scalar) +from pandas.core.dtypes.missing import notnull + +import pandas.core.computation.expressions as expressions import pandas.core.common as com import pandas.core.ops as ops import pandas.core.missing as missing diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index b03c3d77928c7..b3a06d85967f2 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -7,19 +7,20 @@ import numpy as np -from pandas.types.common import (_ensure_platform_int, - is_list_like, is_bool_dtype, - needs_i8_conversion) -from pandas.types.cast import maybe_promote -from pandas.types.missing import notnull -import pandas.types.concat as _concat +from pandas.core.dtypes.common import ( + _ensure_platform_int, + is_list_like, is_bool_dtype, + needs_i8_conversion) +from pandas.core.dtypes.cast import maybe_promote +from pandas.core.dtypes.missing import notnull +import pandas.core.dtypes.concat as _concat from pandas.core.series import Series from pandas.core.frame import DataFrame -from pandas.core.sparse import SparseDataFrame, SparseSeries -from pandas.sparse.array import SparseArray -from pandas.sparse.libsparse import IntIndex +from pandas.core.sparse.api import SparseDataFrame, SparseSeries +from pandas.core.sparse.array import SparseArray +from pandas.core.sparse.libsparse import IntIndex from pandas.core.categorical import Categorical, _factorize_from_iterable from pandas.core.sorting import (get_group_index, get_compressed_ids, diff --git a/pandas/core/series.py b/pandas/core/series.py index 1cf537cf3c315..596dae4345cb3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -13,26 +13,28 @@ import numpy as np import numpy.ma as ma -from pandas.types.common import (_coerce_to_dtype, is_categorical_dtype, - is_bool, - is_integer, is_integer_dtype, - is_float_dtype, - is_extension_type, is_datetimetz, - is_datetimelike, - is_datetime64tz_dtype, - is_timedelta64_dtype, - is_list_like, - is_hashable, - is_iterator, - is_dict_like, - is_scalar, - _is_unorderable_exception, - _ensure_platform_int) -from pandas.types.generic import ABCSparseArray, ABCDataFrame -from pandas.types.cast import (maybe_upcast, infer_dtype_from_scalar, - maybe_convert_platform, - maybe_cast_to_datetime, maybe_castable) -from pandas.types.missing import isnull, notnull +from pandas.core.dtypes.common import ( + _coerce_to_dtype, is_categorical_dtype, + is_bool, + is_integer, is_integer_dtype, + is_float_dtype, + is_extension_type, is_datetimetz, + is_datetimelike, + is_datetime64tz_dtype, + is_timedelta64_dtype, + is_list_like, + is_hashable, + is_iterator, + is_dict_like, + is_scalar, + _is_unorderable_exception, + _ensure_platform_int) +from pandas.core.dtypes.generic import ABCSparseArray, ABCDataFrame +from pandas.core.dtypes.cast import ( + maybe_upcast, infer_dtype_from_scalar, + maybe_convert_platform, + maybe_cast_to_datetime, maybe_castable) +from pandas.core.dtypes.missing import isnull, notnull from pandas.core.common import (is_bool_indexer, _default_index, @@ -255,7 +257,7 @@ def from_array(cls, arr, index=None, name=None, dtype=None, copy=False, fastpath=False): # return a sparse series here if isinstance(arr, ABCSparseArray): - from pandas.sparse.series import SparseSeries + from pandas.core.sparse.series import SparseSeries cls = SparseSeries return cls(arr, index=index, name=name, dtype=dtype, copy=copy, @@ -1130,7 +1132,7 @@ def to_sparse(self, kind='block', fill_value=None): ------- sp : SparseSeries """ - from pandas.core.sparse import SparseSeries + from pandas.core.sparse.series import SparseSeries return SparseSeries(self, kind=kind, fill_value=fill_value).__finalize__(self) @@ -2867,8 +2869,6 @@ def _sanitize_index(data, index, copy=False): data = data.asobject elif isinstance(data, DatetimeIndex): data = data._to_embed(keep_tz=True) - if copy: - data = data.copy() elif isinstance(data, np.ndarray): # coerce datetimelike types diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index e56a4f50de134..69b427df981b7 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -3,10 +3,11 @@ import numpy as np from pandas.compat import long from pandas.core.categorical import Categorical -from pandas.types.common import (_ensure_platform_int, - _ensure_int64, - is_categorical_dtype) -from pandas.types.missing import isnull +from pandas.core.dtypes.common import ( + _ensure_platform_int, + _ensure_int64, + is_categorical_dtype) +from pandas.core.dtypes.missing import isnull import pandas.core.algorithms as algorithms from pandas._libs import lib, algos, hashtable from pandas._libs.hashtable import unique_label_indices diff --git a/pandas/core/sparse.py b/pandas/core/sparse.py deleted file mode 100644 index 4fc329844d616..0000000000000 --- a/pandas/core/sparse.py +++ /dev/null @@ -1,10 +0,0 @@ -""" -Data structures for sparse float data. Life is made simpler by dealing only -with float64 data -""" - -# pylint: disable=W0611 -# flake8: noqa - -from pandas.sparse.series import SparseSeries -from pandas.sparse.frame import SparseDataFrame diff --git a/pandas/tests/computation/__init__.py b/pandas/core/sparse/__init__.py similarity index 100% rename from pandas/tests/computation/__init__.py rename to pandas/core/sparse/__init__.py diff --git a/pandas/core/sparse/api.py b/pandas/core/sparse/api.py new file mode 100644 index 0000000000000..f79bb4886da4b --- /dev/null +++ b/pandas/core/sparse/api.py @@ -0,0 +1,6 @@ +# pylint: disable=W0611 +# flake8: noqa +from pandas.core.sparse.array import SparseArray +from pandas.core.sparse.list import SparseList +from pandas.core.sparse.series import SparseSeries +from pandas.core.sparse.frame import SparseDataFrame diff --git a/pandas/sparse/array.py b/pandas/core/sparse/array.py similarity index 97% rename from pandas/sparse/array.py rename to pandas/core/sparse/array.py index f149e724c19c3..74e9be54ae6df 100644 --- a/pandas/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -14,20 +14,23 @@ from pandas.compat import range from pandas.compat.numpy import function as nv -from pandas.types.generic import ABCSparseArray, ABCSparseSeries -from pandas.types.common import (_ensure_platform_int, - is_float, is_integer, - is_integer_dtype, - is_bool_dtype, - is_list_like, - is_string_dtype, - is_scalar, is_dtype_equal) -from pandas.types.cast import (maybe_convert_platform, maybe_promote, - astype_nansafe, find_common_type) -from pandas.types.missing import isnull, notnull, na_value_for_dtype - -from pandas.sparse import libsparse as splib -from pandas.sparse.libsparse import SparseIndex, BlockIndex, IntIndex +from pandas.core.dtypes.generic import ( + ABCSparseArray, ABCSparseSeries) +from pandas.core.dtypes.common import ( + _ensure_platform_int, + is_float, is_integer, + is_integer_dtype, + is_bool_dtype, + is_list_like, + is_string_dtype, + is_scalar, is_dtype_equal) +from pandas.core.dtypes.cast import ( + maybe_convert_platform, maybe_promote, + astype_nansafe, find_common_type) +from pandas.core.dtypes.missing import isnull, notnull, na_value_for_dtype + +from pandas.core.sparse import libsparse as splib +from pandas.core.sparse.libsparse import SparseIndex, BlockIndex, IntIndex from pandas._libs import index as libindex import pandas.core.algorithms as algos import pandas.core.ops as ops diff --git a/pandas/sparse/frame.py b/pandas/core/sparse/frame.py similarity index 99% rename from pandas/sparse/frame.py rename to pandas/core/sparse/frame.py index 455d120cca640..05c97fac4b53a 100644 --- a/pandas/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -10,9 +10,9 @@ from pandas import compat import numpy as np -from pandas.types.missing import isnull, notnull -from pandas.types.cast import maybe_upcast, find_common_type -from pandas.types.common import _ensure_platform_int, is_scipy_sparse +from pandas.core.dtypes.missing import isnull, notnull +from pandas.core.dtypes.cast import maybe_upcast, find_common_type +from pandas.core.dtypes.common import _ensure_platform_int, is_scipy_sparse from pandas.core.common import _try_sort from pandas.compat.numpy import function as nv @@ -24,8 +24,8 @@ from pandas.core.internals import (BlockManager, create_block_manager_from_arrays) import pandas.core.generic as generic -from pandas.sparse.series import SparseSeries, SparseArray -from pandas.sparse.libsparse import BlockIndex, get_blocks +from pandas.core.sparse.series import SparseSeries, SparseArray +from pandas.core.sparse.libsparse import BlockIndex, get_blocks from pandas.util.decorators import Appender import pandas.core.ops as ops diff --git a/pandas/sparse/list.py b/pandas/core/sparse/list.py similarity index 96% rename from pandas/sparse/list.py rename to pandas/core/sparse/list.py index 54ebf5e51045d..381a811ac828b 100644 --- a/pandas/sparse/list.py +++ b/pandas/core/sparse/list.py @@ -3,10 +3,10 @@ from pandas.core.base import PandasObject from pandas.formats.printing import pprint_thing -from pandas.types.common import is_scalar -from pandas.sparse.array import SparseArray +from pandas.core.dtypes.common import is_scalar +from pandas.core.sparse.array import SparseArray from pandas.util.validators import validate_bool_kwarg -import pandas.sparse.libsparse as splib +from pandas.core.sparse import libsparse as splib class SparseList(PandasObject): diff --git a/pandas/sparse/scipy_sparse.py b/pandas/core/sparse/scipy_sparse.py similarity index 100% rename from pandas/sparse/scipy_sparse.py rename to pandas/core/sparse/scipy_sparse.py diff --git a/pandas/sparse/series.py b/pandas/core/sparse/series.py similarity index 98% rename from pandas/sparse/series.py rename to pandas/core/sparse/series.py index 7ec42f02c3998..a77bce8f06783 100644 --- a/pandas/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -8,8 +8,8 @@ import numpy as np import warnings -from pandas.types.missing import isnull, notnull -from pandas.types.common import is_scalar +from pandas.core.dtypes.missing import isnull, notnull +from pandas.core.dtypes.common import is_scalar from pandas.core.common import _values_from_object, _maybe_match_name from pandas.compat.numpy import function as nv @@ -23,13 +23,15 @@ import pandas._libs.index as _index from pandas.util.decorators import Appender -from pandas.sparse.array import (make_sparse, _sparse_array_op, SparseArray, - _make_index) -from pandas.sparse.libsparse import BlockIndex, IntIndex -import pandas.sparse.libsparse as splib +from pandas.core.sparse.array import ( + make_sparse, _sparse_array_op, SparseArray, + _make_index) +from pandas.core.sparse.libsparse import BlockIndex, IntIndex +import pandas.core.sparse.libsparse as splib -from pandas.sparse.scipy_sparse import (_sparse_series_to_coo, - _coo_to_sparse_series) +from pandas.core.sparse.scipy_sparse import ( + _sparse_series_to_coo, + _coo_to_sparse_series) _shared_doc_kwargs = dict(axes='index', klass='SparseSeries', @@ -264,7 +266,7 @@ def _constructor(self): @property def _constructor_expanddim(self): - from pandas.sparse.api import SparseDataFrame + from pandas.core.sparse.api import SparseDataFrame return SparseDataFrame @property diff --git a/pandas/sparse/sparse.pyx b/pandas/core/sparse/sparse.pyx similarity index 100% rename from pandas/sparse/sparse.pyx rename to pandas/core/sparse/sparse.pyx diff --git a/pandas/sparse/sparse_op_helper.pxi.in b/pandas/core/sparse/sparse_op_helper.pxi.in similarity index 100% rename from pandas/sparse/sparse_op_helper.pxi.in rename to pandas/core/sparse/sparse_op_helper.pxi.in diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 504d3dd47cc21..5082ac7f80fbf 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1,16 +1,17 @@ import numpy as np from pandas.compat import zip -from pandas.types.generic import ABCSeries, ABCIndex -from pandas.types.missing import isnull, notnull -from pandas.types.common import (is_bool_dtype, - is_categorical_dtype, - is_object_dtype, - is_string_like, - is_list_like, - is_scalar, - is_integer, - is_re) +from pandas.core.dtypes.generic import ABCSeries, ABCIndex +from pandas.core.dtypes.missing import isnull, notnull +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_categorical_dtype, + is_object_dtype, + is_string_like, + is_list_like, + is_scalar, + is_integer, + is_re) from pandas.core.common import _values_from_object from pandas.core.algorithms import take_1d diff --git a/pandas/core/window.py b/pandas/core/window.py index 5b84b075ce81a..6fdc05a13b773 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -12,20 +12,22 @@ from collections import defaultdict from datetime import timedelta -from pandas.types.generic import (ABCSeries, - ABCDataFrame, - ABCDatetimeIndex, - ABCTimedeltaIndex, - ABCPeriodIndex) -from pandas.types.common import (is_integer, - is_bool, - is_float_dtype, - is_integer_dtype, - needs_i8_conversion, - is_timedelta64_dtype, - is_list_like, - _ensure_float64, - is_scalar) +from pandas.core.dtypes.generic import ( + ABCSeries, + ABCDataFrame, + ABCDatetimeIndex, + ABCTimedeltaIndex, + ABCPeriodIndex) +from pandas.core.dtypes.common import ( + is_integer, + is_bool, + is_float_dtype, + is_integer_dtype, + needs_i8_conversion, + is_timedelta64_dtype, + is_list_like, + _ensure_float64, + is_scalar) import pandas as pd from pandas.core.base import (PandasObject, SelectionMixin, diff --git a/pandas/formats/format.py b/pandas/formats/format.py index 907198d98cf5b..aad6c182416f6 100644 --- a/pandas/formats/format.py +++ b/pandas/formats/format.py @@ -10,20 +10,21 @@ import sys -from pandas.types.missing import isnull, notnull -from pandas.types.common import (is_categorical_dtype, - is_float_dtype, - is_period_arraylike, - is_integer_dtype, - is_interval_dtype, - is_datetimetz, - is_integer, - is_float, - is_numeric_dtype, - is_datetime64_dtype, - is_timedelta64_dtype, - is_list_like) -from pandas.types.generic import ABCSparseArray +from pandas.core.dtypes.missing import isnull, notnull +from pandas.core.dtypes.common import ( + is_categorical_dtype, + is_float_dtype, + is_period_arraylike, + is_integer_dtype, + is_interval_dtype, + is_datetimetz, + is_integer, + is_float, + is_numeric_dtype, + is_datetime64_dtype, + is_timedelta64_dtype, + is_list_like) +from pandas.core.dtypes.generic import ABCSparseArray from pandas.core.base import PandasObject from pandas.core.index import Index, MultiIndex, _ensure_index from pandas import compat diff --git a/pandas/formats/printing.py b/pandas/formats/printing.py index 37bd4b63d6f7a..5ea47df2c817f 100644 --- a/pandas/formats/printing.py +++ b/pandas/formats/printing.py @@ -2,7 +2,7 @@ printing tools """ -from pandas.types.inference import is_sequence +from pandas.core.dtypes.inference import is_sequence from pandas import compat from pandas.compat import u from pandas.core.config import get_option diff --git a/pandas/formats/style.py b/pandas/formats/style.py index af02077bd5b41..3ca1d8259729d 100644 --- a/pandas/formats/style.py +++ b/pandas/formats/style.py @@ -19,7 +19,7 @@ "or `pip install Jinja2`" raise ImportError(msg) -from pandas.types.common import is_float, is_string_like +from pandas.core.dtypes.common import is_float, is_string_like import numpy as np import pandas as pd diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py index 00ad4ca71cb9d..b0439e122ea9e 100644 --- a/pandas/indexes/base.py +++ b/pandas/indexes/base.py @@ -13,27 +13,28 @@ from pandas import compat -from pandas.types.generic import ABCSeries, ABCMultiIndex, ABCPeriodIndex -from pandas.types.missing import isnull, array_equivalent -from pandas.types.common import (_ensure_int64, - _ensure_object, - _ensure_categorical, - _ensure_platform_int, - is_integer, - is_float, - is_dtype_equal, - is_object_dtype, - is_categorical_dtype, - is_interval_dtype, - is_bool_dtype, - is_signed_integer_dtype, - is_unsigned_integer_dtype, - is_integer_dtype, is_float_dtype, - is_datetime64_any_dtype, - is_timedelta64_dtype, - needs_i8_conversion, - is_iterator, is_list_like, - is_scalar) +from pandas.core.dtypes.generic import ABCSeries, ABCMultiIndex, ABCPeriodIndex +from pandas.core.dtypes.missing import isnull, array_equivalent +from pandas.core.dtypes.common import ( + _ensure_int64, + _ensure_object, + _ensure_categorical, + _ensure_platform_int, + is_integer, + is_float, + is_dtype_equal, + is_object_dtype, + is_categorical_dtype, + is_interval_dtype, + is_bool_dtype, + is_signed_integer_dtype, + is_unsigned_integer_dtype, + is_integer_dtype, is_float_dtype, + is_datetime64_any_dtype, + is_timedelta64_dtype, + needs_i8_conversion, + is_iterator, is_list_like, + is_scalar) from pandas.core.common import (is_bool_indexer, _values_from_object, _asarray_tuplesafe) @@ -44,7 +45,7 @@ deprecate, deprecate_kwarg) from pandas.indexes.frozen import FrozenList import pandas.core.common as com -import pandas.types.concat as _concat +import pandas.core.dtypes.concat as _concat import pandas.core.missing as missing import pandas.core.algorithms as algos from pandas.formats.printing import pprint_thing diff --git a/pandas/indexes/category.py b/pandas/indexes/category.py index 6c57b2ed83705..5f9d106189767 100644 --- a/pandas/indexes/category.py +++ b/pandas/indexes/category.py @@ -3,14 +3,15 @@ from pandas import compat from pandas.compat.numpy import function as nv -from pandas.types.generic import ABCCategorical, ABCSeries -from pandas.types.common import (is_categorical_dtype, - _ensure_platform_int, - is_list_like, - is_interval_dtype, - is_scalar) +from pandas.core.dtypes.generic import ABCCategorical, ABCSeries +from pandas.core.dtypes.common import ( + is_categorical_dtype, + _ensure_platform_int, + is_list_like, + is_interval_dtype, + is_scalar) from pandas.core.common import _asarray_tuplesafe -from pandas.types.missing import array_equivalent +from pandas.core.dtypes.missing import array_equivalent from pandas.util.decorators import Appender, cache_readonly diff --git a/pandas/indexes/frozen.py b/pandas/indexes/frozen.py index ab1228c008ca8..19b04319b37f9 100644 --- a/pandas/indexes/frozen.py +++ b/pandas/indexes/frozen.py @@ -10,7 +10,7 @@ import numpy as np from pandas.core.base import PandasObject -from pandas.types.cast import coerce_indexer_dtype +from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.formats.printing import pprint_thing diff --git a/pandas/indexes/interval.py b/pandas/indexes/interval.py index 63315ef861d12..88a2b0ff9595b 100644 --- a/pandas/indexes/interval.py +++ b/pandas/indexes/interval.py @@ -2,19 +2,20 @@ import numpy as np -from pandas.types.missing import notnull, isnull -from pandas.types.generic import ABCPeriodIndex -from pandas.types.dtypes import IntervalDtype -from pandas.types.common import (_ensure_platform_int, - is_list_like, - is_datetime_or_timedelta_dtype, - is_integer_dtype, - is_object_dtype, - is_categorical_dtype, - is_float_dtype, - is_interval_dtype, - is_scalar, - is_integer) +from pandas.core.dtypes.missing import notnull, isnull +from pandas.core.dtypes.generic import ABCPeriodIndex +from pandas.core.dtypes.dtypes import IntervalDtype +from pandas.core.dtypes.common import ( + _ensure_platform_int, + is_list_like, + is_datetime_or_timedelta_dtype, + is_integer_dtype, + is_object_dtype, + is_categorical_dtype, + is_float_dtype, + is_interval_dtype, + is_scalar, + is_integer) from pandas.indexes.base import (Index, _ensure_index, default_pprint, _index_shared_docs) diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py index d1c8e0ba1cc4e..f410dbddb4428 100644 --- a/pandas/indexes/multi.py +++ b/pandas/indexes/multi.py @@ -12,13 +12,14 @@ from pandas.compat.numpy import function as nv from pandas import compat -from pandas.types.common import (_ensure_int64, - _ensure_platform_int, - is_object_dtype, - is_iterator, - is_list_like, - is_scalar) -from pandas.types.missing import isnull, array_equivalent +from pandas.core.dtypes.common import ( + _ensure_int64, + _ensure_platform_int, + is_object_dtype, + is_iterator, + is_list_like, + is_scalar) +from pandas.core.dtypes.missing import isnull, array_equivalent from pandas.errors import PerformanceWarning, UnsortedIndexError from pandas.core.common import (_values_from_object, is_bool_indexer, diff --git a/pandas/indexes/numeric.py b/pandas/indexes/numeric.py index 31258c785d9e8..2f68101520229 100644 --- a/pandas/indexes/numeric.py +++ b/pandas/indexes/numeric.py @@ -1,9 +1,10 @@ import numpy as np from pandas._libs import (index as libindex, algos as libalgos, join as libjoin) -from pandas.types.common import (is_dtype_equal, pandas_dtype, - is_float_dtype, is_object_dtype, - is_integer_dtype, is_scalar) +from pandas.core.dtypes.common import ( + is_dtype_equal, pandas_dtype, + is_float_dtype, is_object_dtype, + is_integer_dtype, is_scalar) from pandas.core.common import _asarray_tuplesafe, _values_from_object from pandas import compat diff --git a/pandas/indexes/range.py b/pandas/indexes/range.py index be68c97fb7890..1eedfcc619aec 100644 --- a/pandas/indexes/range.py +++ b/pandas/indexes/range.py @@ -4,9 +4,10 @@ import numpy as np from pandas._libs import index as libindex -from pandas.types.common import (is_integer, - is_scalar, - is_int64_dtype) +from pandas.core.dtypes.common import ( + is_integer, + is_scalar, + is_int64_dtype) from pandas import compat from pandas.compat import lrange, range diff --git a/pandas/io/common.py b/pandas/io/common.py index 8ee6ded67f790..5cd5a9cd3e8dc 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -10,7 +10,7 @@ from pandas import compat from pandas.formats.printing import pprint_thing from pandas.core.common import AbstractMethodError -from pandas.types.common import is_number, is_file_like +from pandas.core.dtypes.common import is_number, is_file_like # compat from pandas.errors import (ParserError, DtypeWarning, # noqa diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 7f2f0cf4943b8..b19837973a94a 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -10,8 +10,9 @@ import abc import numpy as np -from pandas.types.common import (is_integer, is_float, - is_bool, is_list_like) +from pandas.core.dtypes.common import ( + is_integer, is_float, + is_bool, is_list_like) from pandas.core.frame import DataFrame from pandas.io.parsers import TextParser diff --git a/pandas/io/html.py b/pandas/io/html.py index 7b58e612de2df..8e5b8def1ea91 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -12,7 +12,7 @@ import numpy as np -from pandas.types.common import is_list_like +from pandas.core.dtypes.common import is_list_like from pandas.errors import EmptyDataError from pandas.io.common import (_is_url, urlopen, parse_url, _validate_header_arg) diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 114ec4bb2723e..19e84c04b7ddb 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -12,7 +12,7 @@ from pandas.formats.printing import pprint_thing from .normalize import _convert_to_line_delimits from .table_schema import build_table_schema -from pandas.types.common import is_period_dtype +from pandas.core.dtypes.common import is_period_dtype loads = libjson.loads dumps = libjson.dumps diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 48f92d28baf61..d8ef3afc9591f 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -3,7 +3,7 @@ http://specs.frictionlessdata.io/json-table-schema/ """ -from pandas.types.common import ( +from pandas.core.dtypes.common import ( is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype, is_bool_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_categorical_dtype, is_period_dtype, is_string_dtype diff --git a/pandas/io/packers.py b/pandas/io/packers.py index ca5a27ee5b68e..a4b454eda7472 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -48,16 +48,17 @@ from pandas import compat from pandas.compat import u, u_safe -from pandas.types.common import (is_categorical_dtype, is_object_dtype, - needs_i8_conversion, pandas_dtype) +from pandas.core.dtypes.common import ( + is_categorical_dtype, is_object_dtype, + needs_i8_conversion, pandas_dtype) from pandas import (Timestamp, Period, Series, DataFrame, # noqa Index, MultiIndex, Float64Index, Int64Index, Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT, Categorical, CategoricalIndex) from pandas._libs.tslib import NaTType -from pandas.sparse.api import SparseSeries, SparseDataFrame -from pandas.sparse.array import BlockIndex, IntIndex +from pandas.core.sparse.api import SparseSeries, SparseDataFrame +from pandas.core.sparse.array import BlockIndex, IntIndex from pandas.core.generic import NDFrame from pandas.errors import PerformanceWarning from pandas.io.common import get_filepath_or_buffer diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index efbf6d64404c0..f2449e3064867 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -15,13 +15,14 @@ from pandas import compat from pandas.compat import (range, lrange, StringIO, lzip, zip, string_types, map, u) -from pandas.types.common import (is_integer, _ensure_object, - is_list_like, is_integer_dtype, - is_float, is_dtype_equal, - is_object_dtype, is_string_dtype, - is_scalar, is_categorical_dtype) -from pandas.types.missing import isnull -from pandas.types.cast import astype_nansafe +from pandas.core.dtypes.common import ( + is_integer, _ensure_object, + is_list_like, is_integer_dtype, + is_float, is_dtype_equal, + is_object_dtype, is_string_dtype, + is_scalar, is_categorical_dtype) +from pandas.core.dtypes.missing import isnull +from pandas.core.dtypes.cast import astype_nansafe from pandas.core.index import Index, MultiIndex, RangeIndex from pandas.core.series import Series from pandas.core.frame import DataFrame diff --git a/pandas/io/parsers.pyx b/pandas/io/parsers.pyx index 4053e726d0a04..2def4dc9dcf24 100644 --- a/pandas/io/parsers.pyx +++ b/pandas/io/parsers.pyx @@ -39,14 +39,15 @@ cimport util import pandas._libs.lib as lib import pandas.compat as compat -from pandas.types.common import (is_categorical_dtype, CategoricalDtype, - is_integer_dtype, is_float_dtype, - is_bool_dtype, is_object_dtype, - is_string_dtype, is_datetime64_dtype, - pandas_dtype) +from pandas.core.dtypes.common import ( + is_categorical_dtype, CategoricalDtype, + is_integer_dtype, is_float_dtype, + is_bool_dtype, is_object_dtype, + is_string_dtype, is_datetime64_dtype, + pandas_dtype) from pandas.core.categorical import Categorical from pandas.core.algorithms import take_1d -from pandas.types.concat import union_categoricals +from pandas.core.dtypes.concat import union_categoricals from pandas import Index import time diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 969a2a51cb15d..0f91c407766fb 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -3,7 +3,7 @@ import numpy as np from numpy.lib.format import read_array, write_array from pandas.compat import BytesIO, cPickle as pkl, pickle_compat as pc, PY3 -from pandas.types.common import is_datetime64_dtype, _NS_DTYPE +from pandas.core.dtypes.common import is_datetime64_dtype, _NS_DTYPE from pandas.io.common import _get_handle, _infer_compression diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 802f460ecba07..4771134f3fe5c 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -12,15 +12,16 @@ import warnings import os -from pandas.types.common import (is_list_like, - is_categorical_dtype, - is_timedelta64_dtype, - is_datetime64tz_dtype, - is_datetime64_dtype, - _ensure_object, - _ensure_int64, - _ensure_platform_int) -from pandas.types.missing import array_equivalent +from pandas.core.dtypes.common import ( + is_list_like, + is_categorical_dtype, + is_timedelta64_dtype, + is_datetime64tz_dtype, + is_datetime64_dtype, + _ensure_object, + _ensure_int64, + _ensure_platform_int) +from pandas.core.dtypes.missing import array_equivalent import numpy as np from pandas import (Series, DataFrame, Panel, Panel4D, Index, @@ -29,7 +30,7 @@ DatetimeIndex, TimedeltaIndex) from pandas.core import config from pandas.io.common import _stringify_path -from pandas.sparse.array import BlockIndex, IntIndex +from pandas.core.sparse.array import BlockIndex, IntIndex from pandas.core.base import StringMixin from pandas.formats.printing import adjoin, pprint_thing from pandas.errors import PerformanceWarning @@ -43,7 +44,7 @@ from pandas import compat from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter from pandas.core.config import get_option -from pandas.computation.pytables import Expr, maybe_expression +from pandas.core.computation.pytables import Expr, maybe_expression from pandas._libs import tslib, algos, lib diff --git a/pandas/io/sql.py b/pandas/io/sql.py index b210baedaaf6d..de47a8ad5401f 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -12,10 +12,11 @@ import numpy as np import pandas._libs.lib as lib -from pandas.types.missing import isnull -from pandas.types.dtypes import DatetimeTZDtype -from pandas.types.common import (is_list_like, is_dict_like, - is_datetime64tz_dtype) +from pandas.core.dtypes.missing import isnull +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.common import ( + is_list_like, is_dict_like, + is_datetime64tz_dtype) from pandas.compat import (map, zip, raise_with_traceback, string_types, text_type) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 1d2951da68086..691582629251a 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -15,8 +15,9 @@ import struct from dateutil.relativedelta import relativedelta -from pandas.types.common import (is_categorical_dtype, is_datetime64_dtype, - _ensure_object) +from pandas.core.dtypes.common import ( + is_categorical_dtype, is_datetime64_dtype, + _ensure_object) from pandas.core.base import StringMixin from pandas.core.categorical import Categorical diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 0aa8cc31646c5..0e51e95057be2 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -11,12 +11,12 @@ from matplotlib.transforms import nonsingular -from pandas.types.common import (is_float, is_integer, - is_integer_dtype, - is_float_dtype, - is_datetime64_ns_dtype, - is_period_arraylike, - ) +from pandas.core.dtypes.common import ( + is_float, is_integer, + is_integer_dtype, + is_float_dtype, + is_datetime64_ns_dtype, + is_period_arraylike) from pandas.compat import lrange import pandas.compat as compat diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 3980f5e7f2f61..02f2df4949189 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -11,11 +11,12 @@ from pandas.util.decorators import cache_readonly from pandas.core.base import PandasObject -from pandas.types.common import (is_list_like, - is_integer, - is_number, - is_hashable, - is_iterator) +from pandas.core.dtypes.common import ( + is_list_like, + is_integer, + is_number, + is_hashable, + is_iterator) from pandas.core.common import AbstractMethodError, isnull, _try_sort from pandas.core.generic import _shared_docs, _shared_doc_kwargs from pandas.core.index import Index, MultiIndex diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index 2c32a532dd2e2..f09bcef82b45d 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -5,7 +5,7 @@ import numpy as np from pandas.util.decorators import deprecate_kwarg -from pandas.types.missing import notnull +from pandas.core.dtypes.missing import notnull from pandas.compat import range, lrange, lmap, zip from pandas.formats.printing import pprint_thing diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 5d6dc7cbcdfc6..8cb4e30e0d91c 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -8,7 +8,7 @@ import numpy as np -from pandas.types.common import is_list_like +from pandas.core.dtypes.common import is_list_like from pandas.compat import range, lrange, lmap import pandas.compat as compat from pandas.plotting._compat import _mpl_ge_2_0_0 diff --git a/pandas/plotting/_tools.py b/pandas/plotting/_tools.py index 720f776279869..0c2314087525c 100644 --- a/pandas/plotting/_tools.py +++ b/pandas/plotting/_tools.py @@ -7,7 +7,7 @@ import numpy as np -from pandas.types.common import is_list_like +from pandas.core.dtypes.common import is_list_like from pandas.core.index import Index from pandas.core.series import Series from pandas.compat import range diff --git a/pandas/sparse/api.py b/pandas/sparse/api.py deleted file mode 100644 index 90be0a216535f..0000000000000 --- a/pandas/sparse/api.py +++ /dev/null @@ -1,6 +0,0 @@ -# pylint: disable=W0611 -# flake8: noqa -from pandas.sparse.array import SparseArray -from pandas.sparse.list import SparseList -from pandas.sparse.series import SparseSeries -from pandas.sparse.frame import SparseDataFrame diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index 914c4c08863a2..f98ffa26e0c2b 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -6,7 +6,7 @@ import warnings import numpy as np -from pandas.types.common import is_scalar +from pandas.core.dtypes.common import is_scalar from pandas.core.api import DataFrame, Series from pandas.util.decorators import Substitution, Appender diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 02734189ca340..221458e629055 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -30,10 +30,10 @@ class TestPDApi(Base, tm.TestCase): ignored = ['tests', 'locale', 'conftest'] # top-level sub-packages - lib = ['api', 'compat', 'computation', 'core', - 'indexes', 'formats', 'errors', 'pandas', 'plotting', - 'test', 'tools', 'tseries', 'sparse', - 'types', 'util', 'options', 'io'] + lib = ['api', 'compat', 'core', + 'indexes', 'formats', 'errors', 'pandas', + 'plotting', 'test', 'tools', 'tseries', + 'util', 'options', 'io'] # these are already deprecated; awaiting removal deprecated_modules = ['stats', 'datetools', 'parser', diff --git a/pandas/tests/api/test_lib.py b/pandas/tests/api/test_lib.py deleted file mode 100644 index db2c68c6197d7..0000000000000 --- a/pandas/tests/api/test_lib.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8 -*- - -from warnings import catch_warnings -import pandas # noqa - - -def test_moved_infer_dtype(): - with catch_warnings(record=True): - e = pandas.lib.infer_dtype('foo') - assert e is not None diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py index 1d05eda88e265..e0267d2990085 100644 --- a/pandas/tests/api/test_types.py +++ b/pandas/tests/api/test_types.py @@ -1,7 +1,9 @@ # -*- coding: utf-8 -*- +from warnings import catch_warnings import numpy as np +import pandas from pandas.core import common as com from pandas.api import types from pandas.util import testing as tm @@ -28,7 +30,7 @@ class TestTypes(Base, tm.TestCase): 'is_dict_like', 'is_iterator', 'is_file_like', 'is_list_like', 'is_hashable', 'is_named_tuple', 'is_sequence', - 'pandas_dtype'] + 'pandas_dtype', 'union_categoricals'] def test_types(self): @@ -61,7 +63,7 @@ def test_deprecation_core_common_array_equivalent(self): def test_deprecation_core_common_moved(self): - # these are in pandas.types.common + # these are in pandas.core.dtypes.common l = ['is_datetime_arraylike', 'is_datetime_or_timedelta_dtype', 'is_datetimelike', @@ -73,7 +75,7 @@ def test_deprecation_core_common_moved(self): 'is_string_like', 'is_string_like_dtype'] - from pandas.types import common as c + from pandas.core.dtypes import common as c for t in l: self.check_deprecation(getattr(com, t), getattr(c, t)) @@ -82,3 +84,10 @@ def test_removed_from_core_common(self): for t in ['is_null_datelike_scalar', 'ensure_float']: self.assertRaises(AttributeError, lambda: getattr(com, t)) + + +def test_moved_infer_dtype(): + + with catch_warnings(record=True): + e = pandas.lib.infer_dtype('foo') + assert e is not None diff --git a/pandas/tests/sparse/__init__.py b/pandas/tests/core/__init__.py similarity index 100% rename from pandas/tests/sparse/__init__.py rename to pandas/tests/core/__init__.py diff --git a/pandas/tests/types/__init__.py b/pandas/tests/core/computation/__init__.py similarity index 100% rename from pandas/tests/types/__init__.py rename to pandas/tests/core/computation/__init__.py diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/core/computation/test_compat.py similarity index 84% rename from pandas/tests/computation/test_compat.py rename to pandas/tests/core/computation/test_compat.py index 56a7cab730f1f..7b6c0f9c4c9aa 100644 --- a/pandas/tests/computation/test_compat.py +++ b/pandas/tests/core/computation/test_compat.py @@ -4,15 +4,15 @@ import pandas as pd from pandas.util import testing as tm -from pandas.computation.engines import _engines -import pandas.computation.expr as expr -from pandas.computation import _MIN_NUMEXPR_VERSION +from pandas.core.computation.engines import _engines +import pandas.core.computation.expr as expr +from pandas.core.computation import _MIN_NUMEXPR_VERSION def test_compat(): # test we have compat with our version of nu - from pandas.computation import _NUMEXPR_INSTALLED + from pandas.core.computation import _NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/core/computation/test_eval.py similarity index 99% rename from pandas/tests/computation/test_eval.py rename to pandas/tests/core/computation/test_eval.py index 78aad90cacf94..1f519174ce210 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/core/computation/test_eval.py @@ -8,23 +8,25 @@ from numpy.random import randn, rand, randint import numpy as np -from pandas.types.common import is_list_like, is_scalar +from pandas.core.dtypes.common import is_list_like, is_scalar import pandas as pd from pandas.core import common as com from pandas.errors import PerformanceWarning from pandas import DataFrame, Series, Panel, date_range from pandas.util.testing import makeCustomDataframe as mkdf -from pandas.computation import pytables -from pandas.computation.engines import _engines, NumExprClobberingError -from pandas.computation.expr import PythonExprVisitor, PandasExprVisitor -from pandas.computation.expressions import _USE_NUMEXPR, _NUMEXPR_INSTALLED -from pandas.computation.ops import (_binary_ops_dict, - _special_case_arith_ops_syms, - _arith_ops_syms, _bool_ops_syms, - _unary_math_ops, _binary_math_ops) - -import pandas.computation.expr as expr +from pandas.core.computation import pytables +from pandas.core.computation.engines import _engines, NumExprClobberingError +from pandas.core.computation.expr import PythonExprVisitor, PandasExprVisitor +from pandas.core.computation.expressions import ( + _USE_NUMEXPR, _NUMEXPR_INSTALLED) +from pandas.core.computation.ops import ( + _binary_ops_dict, + _special_case_arith_ops_syms, + _arith_ops_syms, _bool_ops_syms, + _unary_math_ops, _binary_math_ops) + +import pandas.core.computation.expr as expr import pandas.util.testing as tm from pandas.util.testing import (assert_frame_equal, randbool, assertRaisesRegexp, assert_numpy_array_equal, diff --git a/pandas/types/__init__.py b/pandas/tests/core/dtypes/__init__.py similarity index 100% rename from pandas/types/__init__.py rename to pandas/tests/core/dtypes/__init__.py diff --git a/pandas/tests/types/test_cast.py b/pandas/tests/core/dtypes/test_cast.py similarity index 95% rename from pandas/tests/types/test_cast.py rename to pandas/tests/core/dtypes/test_cast.py index de6ef7af9d7f9..a1490426ebf9d 100644 --- a/pandas/tests/types/test_cast.py +++ b/pandas/tests/core/dtypes/test_cast.py @@ -10,15 +10,18 @@ import numpy as np from pandas import Timedelta, Timestamp, DatetimeIndex -from pandas.types.cast import (maybe_downcast_to_dtype, - maybe_convert_objects, - infer_dtype_from_scalar, - infer_dtype_from_array, - maybe_convert_string_to_object, - maybe_convert_scalar, - find_common_type) -from pandas.types.dtypes import (CategoricalDtype, - DatetimeTZDtype, PeriodDtype) +from pandas.core.dtypes.cast import ( + maybe_downcast_to_dtype, + maybe_convert_objects, + infer_dtype_from_scalar, + infer_dtype_from_array, + maybe_convert_string_to_object, + maybe_convert_scalar, + find_common_type) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + PeriodDtype) from pandas.util import testing as tm diff --git a/pandas/tests/types/test_common.py b/pandas/tests/core/dtypes/test_common.py similarity index 92% rename from pandas/tests/types/test_common.py rename to pandas/tests/core/dtypes/test_common.py index 21772bab44d01..1017f93b8241c 100644 --- a/pandas/tests/types/test_common.py +++ b/pandas/tests/core/dtypes/test_common.py @@ -3,8 +3,10 @@ import pytest import numpy as np -from pandas.types.dtypes import DatetimeTZDtype, PeriodDtype, CategoricalDtype -from pandas.types.common import pandas_dtype, is_dtype_equal +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, PeriodDtype, CategoricalDtype) +from pandas.core.dtypes.common import ( + pandas_dtype, is_dtype_equal) import pandas.util.testing as tm @@ -87,11 +89,11 @@ def test_dtype_equal_strict(): def get_is_dtype_funcs(): """ - Get all functions in pandas.types.common that + Get all functions in pandas.core.dtypes.common that begin with 'is_' and end with 'dtype' """ - import pandas.types.common as com + import pandas.core.dtypes.common as com fnames = [f for f in dir(com) if (f.startswith('is_') and f.endswith('dtype'))] diff --git a/pandas/tests/types/test_concat.py b/pandas/tests/core/dtypes/test_concat.py similarity index 98% rename from pandas/tests/types/test_concat.py rename to pandas/tests/core/dtypes/test_concat.py index f4faab45f4ba2..e8eb042d78f30 100644 --- a/pandas/tests/types/test_concat.py +++ b/pandas/tests/core/dtypes/test_concat.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import pandas as pd -import pandas.types.concat as _concat +import pandas.core.dtypes.concat as _concat import pandas.util.testing as tm diff --git a/pandas/tests/types/test_dtypes.py b/pandas/tests/core/dtypes/test_dtypes.py similarity index 96% rename from pandas/tests/types/test_dtypes.py rename to pandas/tests/core/dtypes/test_dtypes.py index 79d9fd84396e7..ec9876df14e3b 100644 --- a/pandas/tests/types/test_dtypes.py +++ b/pandas/tests/core/dtypes/test_dtypes.py @@ -5,15 +5,17 @@ import pandas as pd from pandas import Series, Categorical, IntervalIndex, date_range -from pandas.types.dtypes import (DatetimeTZDtype, PeriodDtype, - IntervalDtype, CategoricalDtype) -from pandas.types.common import (is_categorical_dtype, is_categorical, - is_datetime64tz_dtype, is_datetimetz, - is_period_dtype, is_period, - is_dtype_equal, is_datetime64_ns_dtype, - is_datetime64_dtype, is_interval_dtype, - is_datetime64_any_dtype, is_string_dtype, - _coerce_to_dtype) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, PeriodDtype, + IntervalDtype, CategoricalDtype) +from pandas.core.dtypes.common import ( + is_categorical_dtype, is_categorical, + is_datetime64tz_dtype, is_datetimetz, + is_period_dtype, is_period, + is_dtype_equal, is_datetime64_ns_dtype, + is_datetime64_dtype, is_interval_dtype, + is_datetime64_any_dtype, is_string_dtype, + _coerce_to_dtype) import pandas.util.testing as tm diff --git a/pandas/tests/types/test_generic.py b/pandas/tests/core/dtypes/test_generic.py similarity index 97% rename from pandas/tests/types/test_generic.py rename to pandas/tests/core/dtypes/test_generic.py index 7994aa77bb220..d550b5535cea3 100644 --- a/pandas/tests/types/test_generic.py +++ b/pandas/tests/core/dtypes/test_generic.py @@ -4,7 +4,7 @@ import numpy as np import pandas as pd import pandas.util.testing as tm -from pandas.types import generic as gt +from pandas.core.dtypes import generic as gt class TestABCClasses(tm.TestCase): diff --git a/pandas/tests/types/test_inference.py b/pandas/tests/core/dtypes/test_inference.py similarity index 97% rename from pandas/tests/types/test_inference.py rename to pandas/tests/core/dtypes/test_inference.py index ec61903d3f20c..94d1d21d59d88 100644 --- a/pandas/tests/types/test_inference.py +++ b/pandas/tests/core/dtypes/test_inference.py @@ -18,26 +18,25 @@ DatetimeIndex, TimedeltaIndex, Timestamp, Panel, Period, Categorical) from pandas.compat import u, PY2, PY3, StringIO, lrange -from pandas.types import inference -from pandas.types.common import (is_timedelta64_dtype, - is_timedelta64_ns_dtype, - is_datetime64_dtype, - is_datetime64_ns_dtype, - is_datetime64_any_dtype, - is_datetime64tz_dtype, - is_number, - is_integer, - is_float, - is_bool, - is_scalar, - is_scipy_sparse, - _ensure_int32, - _ensure_categorical) -from pandas.types.missing import isnull +from pandas.core.dtypes import inference +from pandas.core.dtypes.common import ( + is_timedelta64_dtype, + is_timedelta64_ns_dtype, + is_datetime64_dtype, + is_datetime64_ns_dtype, + is_datetime64_any_dtype, + is_datetime64tz_dtype, + is_number, + is_integer, + is_float, + is_bool, + is_scalar, + is_scipy_sparse, + _ensure_int32, + _ensure_categorical) +from pandas.core.dtypes.missing import isnull from pandas.util import testing as tm -from pandas.tests.sparse.test_frame import spmatrix # noqa: F401 - def test_is_sequence(): is_seq = inference.is_sequence diff --git a/pandas/tests/types/test_io.py b/pandas/tests/core/dtypes/test_io.py similarity index 100% rename from pandas/tests/types/test_io.py rename to pandas/tests/core/dtypes/test_io.py diff --git a/pandas/tests/types/test_missing.py b/pandas/tests/core/dtypes/test_missing.py similarity index 98% rename from pandas/tests/types/test_missing.py rename to pandas/tests/core/dtypes/test_missing.py index 31bf2817c8bab..52dec66fe73eb 100644 --- a/pandas/tests/types/test_missing.py +++ b/pandas/tests/core/dtypes/test_missing.py @@ -11,9 +11,10 @@ from pandas._libs.tslib import iNaT from pandas import (NaT, Float64Index, Series, DatetimeIndex, TimedeltaIndex, date_range) -from pandas.types.dtypes import DatetimeTZDtype -from pandas.types.missing import (array_equivalent, isnull, notnull, - na_value_for_dtype) +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.missing import ( + array_equivalent, isnull, notnull, + na_value_for_dtype) def test_notnull(): diff --git a/pandas/tests/core/sparse/__init__.py b/pandas/tests/core/sparse/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/core/sparse/common.py b/pandas/tests/core/sparse/common.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/sparse/test_arithmetics.py b/pandas/tests/core/sparse/test_arithmetics.py similarity index 100% rename from pandas/tests/sparse/test_arithmetics.py rename to pandas/tests/core/sparse/test_arithmetics.py diff --git a/pandas/tests/sparse/test_array.py b/pandas/tests/core/sparse/test_array.py similarity index 99% rename from pandas/tests/sparse/test_array.py rename to pandas/tests/core/sparse/test_array.py index 15531cecfe79b..b7b664e7bfb8a 100644 --- a/pandas/tests/sparse/test_array.py +++ b/pandas/tests/core/sparse/test_array.py @@ -7,8 +7,8 @@ import numpy as np from pandas import _np_version_under1p8 -from pandas.sparse.api import SparseArray, SparseSeries -from pandas.sparse.libsparse import IntIndex +from pandas.core.sparse.api import SparseArray, SparseSeries +from pandas.core.sparse.libsparse import IntIndex from pandas.util.testing import assert_almost_equal, assertRaisesRegexp import pandas.util.testing as tm diff --git a/pandas/tests/sparse/test_combine_concat.py b/pandas/tests/core/sparse/test_combine_concat.py similarity index 100% rename from pandas/tests/sparse/test_combine_concat.py rename to pandas/tests/core/sparse/test_combine_concat.py diff --git a/pandas/tests/sparse/test_format.py b/pandas/tests/core/sparse/test_format.py similarity index 100% rename from pandas/tests/sparse/test_format.py rename to pandas/tests/core/sparse/test_format.py diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/core/sparse/test_frame.py similarity index 99% rename from pandas/tests/sparse/test_frame.py rename to pandas/tests/core/sparse/test_frame.py index 075d5efcefbe0..adb813a27e7e9 100644 --- a/pandas/tests/sparse/test_frame.py +++ b/pandas/tests/core/sparse/test_frame.py @@ -9,23 +9,22 @@ import pandas as pd from pandas import Series, DataFrame, bdate_range, Panel -from pandas.types.common import (is_bool_dtype, - is_float_dtype, - is_object_dtype, - is_float) +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_float_dtype, + is_object_dtype, + is_float) from pandas.tseries.index import DatetimeIndex from pandas.tseries.offsets import BDay -import pandas.util.testing as tm +from pandas.util import testing as tm from pandas.compat import lrange from pandas import compat -import pandas.sparse.frame as spf +from pandas.core.sparse import frame as spf -from pandas.sparse.libsparse import BlockIndex, IntIndex -from pandas.sparse.api import SparseSeries, SparseDataFrame, SparseArray +from pandas.core.sparse.libsparse import BlockIndex, IntIndex +from pandas.core.sparse.api import SparseSeries, SparseDataFrame, SparseArray from pandas.tests.frame.test_api import SharedWithSparse -from pandas.tests.sparse.common import spmatrix # noqa: F401 - class TestSparseDataFrame(tm.TestCase, SharedWithSparse): klass = SparseDataFrame diff --git a/pandas/tests/sparse/test_groupby.py b/pandas/tests/core/sparse/test_groupby.py similarity index 100% rename from pandas/tests/sparse/test_groupby.py rename to pandas/tests/core/sparse/test_groupby.py diff --git a/pandas/tests/sparse/test_indexing.py b/pandas/tests/core/sparse/test_indexing.py similarity index 100% rename from pandas/tests/sparse/test_indexing.py rename to pandas/tests/core/sparse/test_indexing.py diff --git a/pandas/tests/sparse/test_libsparse.py b/pandas/tests/core/sparse/test_libsparse.py similarity index 99% rename from pandas/tests/sparse/test_libsparse.py rename to pandas/tests/core/sparse/test_libsparse.py index 696d2cf47f4c0..e4c3d6d3050cb 100644 --- a/pandas/tests/sparse/test_libsparse.py +++ b/pandas/tests/core/sparse/test_libsparse.py @@ -7,8 +7,8 @@ from pandas import compat -from pandas.sparse.array import IntIndex, BlockIndex, _make_index -import pandas.sparse.libsparse as splib +from pandas.core.sparse.array import IntIndex, BlockIndex, _make_index +import pandas.core.sparse.libsparse as splib TEST_LENGTH = 20 diff --git a/pandas/tests/sparse/test_list.py b/pandas/tests/core/sparse/test_list.py similarity index 98% rename from pandas/tests/sparse/test_list.py rename to pandas/tests/core/sparse/test_list.py index 8511cd5997368..9f91d73a8228a 100644 --- a/pandas/tests/sparse/test_list.py +++ b/pandas/tests/core/sparse/test_list.py @@ -4,7 +4,7 @@ from numpy import nan import numpy as np -from pandas.sparse.api import SparseList, SparseArray +from pandas.core.sparse.api import SparseList, SparseArray import pandas.util.testing as tm diff --git a/pandas/tests/sparse/test_pivot.py b/pandas/tests/core/sparse/test_pivot.py similarity index 100% rename from pandas/tests/sparse/test_pivot.py rename to pandas/tests/core/sparse/test_pivot.py diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/core/sparse/test_series.py similarity index 99% rename from pandas/tests/sparse/test_series.py rename to pandas/tests/core/sparse/test_series.py index 83f0237841dbd..0b71dffe1782b 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/core/sparse/test_series.py @@ -14,10 +14,10 @@ from pandas import compat from pandas.tools.util import cartesian_product -import pandas.sparse.frame as spf +import pandas.core.sparse.frame as spf -from pandas.sparse.libsparse import BlockIndex, IntIndex -from pandas.sparse.api import SparseSeries +from pandas.core.sparse.libsparse import BlockIndex, IntIndex +from pandas.core.sparse.api import SparseSeries from pandas.tests.series.test_api import SharedWithSparse diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/formats/test_format.py index 83458c82a3d7c..92f6a600a9e2a 100644 --- a/pandas/tests/formats/test_format.py +++ b/pandas/tests/formats/test_format.py @@ -1420,20 +1420,23 @@ def test_repr_html_wide_multiindex_cols(self): assert '...' in wide_repr def test_repr_html_long(self): - max_rows = get_option('display.max_rows') - h = max_rows - 1 - df = DataFrame({'A': np.arange(1, 1 + h), 'B': np.arange(41, 41 + h)}) - reg_repr = df._repr_html_() - assert '..' not in reg_repr - assert str(41 + max_rows // 2) in reg_repr + with option_context('display.max_rows', 60): + max_rows = get_option('display.max_rows') + h = max_rows - 1 + df = DataFrame({'A': np.arange(1, 1 + h), + 'B': np.arange(41, 41 + h)}) + reg_repr = df._repr_html_() + assert '..' not in reg_repr + assert str(41 + max_rows // 2) in reg_repr - h = max_rows + 1 - df = DataFrame({'A': np.arange(1, 1 + h), 'B': np.arange(41, 41 + h)}) - long_repr = df._repr_html_() - assert '..' in long_repr - assert str(41 + max_rows // 2) not in long_repr - assert u('%d rows ') % h in long_repr - assert u('2 columns') in long_repr + h = max_rows + 1 + df = DataFrame({'A': np.arange(1, 1 + h), + 'B': np.arange(41, 41 + h)}) + long_repr = df._repr_html_() + assert '..' in long_repr + assert str(41 + max_rows // 2) not in long_repr + assert u('%d rows ') % h in long_repr + assert u('2 columns') in long_repr def test_repr_html_float(self): with option_context('display.max_rows', 60): diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index f05b6fdd6bc23..9add944d2293e 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -9,9 +9,10 @@ from pandas.compat import lrange from pandas import (DataFrame, Series, Index, MultiIndex, RangeIndex, date_range, IntervalIndex) -from pandas.types.common import (is_object_dtype, - is_categorical_dtype, - is_interval_dtype) +from pandas.core.dtypes.common import ( + is_object_dtype, + is_categorical_dtype, + is_interval_dtype) import pandas as pd from pandas.util.testing import (assert_series_equal, diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index 157cd1cdf1b22..1afb048ad825a 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -10,7 +10,7 @@ from pandas import (notnull, DataFrame, Series, MultiIndex, date_range, Timestamp, compat) import pandas as pd -from pandas.types.dtypes import CategoricalDtype +from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.util.testing import (assert_series_equal, assert_frame_equal) import pandas.util.testing as tm diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 6d28d3b4dfcd5..508053a6367fa 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -13,7 +13,7 @@ import numpy.ma as ma import numpy.ma.mrecords as mrecords -from pandas.types.common import is_integer_dtype +from pandas.core.dtypes.common import is_integer_dtype from pandas.compat import (lmap, long, zip, range, lrange, lzip, OrderedDict, is_platform_little_endian) from pandas import compat diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index f7d2c1a654cd5..14334dfbeddb3 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -7,7 +7,7 @@ from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp, compat, concat, option_context) from pandas.compat import u -from pandas.types.dtypes import DatetimeTZDtype +from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.tests.frame.common import TestData from pandas.util.testing import (assert_series_equal, assert_frame_equal, diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index f0dfc4553886b..b624657ca4b4b 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -20,9 +20,10 @@ from pandas._libs.tslib import iNaT from pandas.tseries.offsets import BDay -from pandas.types.common import (is_float_dtype, - is_integer, - is_scalar) +from pandas.core.dtypes.common import ( + is_float_dtype, + is_integer, + is_scalar) from pandas.util.testing import (assert_almost_equal, assert_numpy_array_equal, assert_series_equal, @@ -1866,7 +1867,7 @@ def test_iloc_duplicates(self): assert_frame_equal(result, expected) def test_iloc_sparse_propegate_fill_value(self): - from pandas.sparse.api import SparseDataFrame + from pandas.core.sparse.api import SparseDataFrame df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999) self.assertTrue(len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values)) diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index f90b37b66d200..a531b86699e90 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -19,7 +19,7 @@ makeCustomDataframe as mkdf) import pandas.util.testing as tm -from pandas.computation import _NUMEXPR_INSTALLED +from pandas.core.computation import _NUMEXPR_INSTALLED from pandas.tests.frame.common import TestData @@ -511,7 +511,7 @@ def test_query_syntax_error(self): df.query('i - +', engine=engine, parser=parser) def test_query_scope(self): - from pandas.computation.ops import UndefinedVariableError + from pandas.core.computation.ops import UndefinedVariableError engine, parser = self.engine, self.parser skip_if_no_pandas_parser(parser) @@ -535,7 +535,7 @@ def test_query_scope(self): df.query('@a > b > c', engine=engine, parser=parser) def test_query_doesnt_pickup_local(self): - from pandas.computation.ops import UndefinedVariableError + from pandas.core.computation.ops import UndefinedVariableError engine, parser = self.engine, self.parser n = m = 10 @@ -546,7 +546,7 @@ def test_query_doesnt_pickup_local(self): df.query('sin > 5', engine=engine, parser=parser) def test_query_builtin(self): - from pandas.computation.engines import NumExprClobberingError + from pandas.core.computation.engines import NumExprClobberingError engine, parser = self.engine, self.parser n = m = 10 @@ -624,7 +624,7 @@ def test_nested_scope(self): assert_frame_equal(result, expected) def test_nested_raises_on_local_self_reference(self): - from pandas.computation.ops import UndefinedVariableError + from pandas.core.computation.ops import UndefinedVariableError df = DataFrame(np.random.randn(5, 3)) @@ -683,7 +683,7 @@ def test_at_inside_string(self): assert_frame_equal(result, expected) def test_query_undefined_local(self): - from pandas.computation.ops import UndefinedVariableError + from pandas.core.computation.ops import UndefinedVariableError engine, parser = self.engine, self.parser skip_if_no_pandas_parser(parser) df = DataFrame(np.random.rand(10, 2), columns=list('ab')) @@ -803,7 +803,7 @@ def test_date_index_query_with_NaT_duplicates(self): df.query('index < 20130101 < dates3', engine=engine, parser=parser) def test_nested_scope(self): - from pandas.computation.ops import UndefinedVariableError + from pandas.core.computation.ops import UndefinedVariableError engine = self.engine parser = self.parser # smoke test diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index 02c7933e020ea..289723ed5667a 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -3,7 +3,7 @@ from numpy import nan import numpy as np -from pandas.types.common import _ensure_int64 +from pandas.core.dtypes.common import _ensure_int64 from pandas import Index, isnull from pandas.util.testing import assert_almost_equal import pandas.util.testing as tm diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 3b85fadda6cfe..541f5d28be421 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -4,7 +4,8 @@ import pandas as pd from pandas.util import testing as tm from pandas import Series, DataFrame, Timestamp, MultiIndex, concat, date_range -from pandas.types.common import _ensure_platform_int, is_timedelta64_dtype +from pandas.core.dtypes.common import ( + _ensure_platform_int, is_timedelta64_dtype) from pandas.compat import StringIO from pandas._libs import groupby from .common import MixIn, assert_fp_equal diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 54d47d02c5e8e..25214e6b170b5 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -9,7 +9,7 @@ RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, IntervalIndex, notnull, isnull) -from pandas.types.common import needs_i8_conversion +from pandas.core.dtypes.common import needs_i8_conversion from pandas.util.testing import assertRaisesRegexp from pandas._libs.tslib import iNaT diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 16881de6e8c39..d4e672d0584cf 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -12,21 +12,36 @@ class TestDatetimeIndex(tm.TestCase): + def test_construction_caching(self): + + df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3), + 'dttz': pd.date_range('20130101', periods=3, + tz='US/Eastern'), + 'dt_with_null': [pd.Timestamp('20130101'), pd.NaT, + pd.Timestamp('20130103')], + 'dtns': pd.date_range('20130101', periods=3, + freq='ns')}) + assert df.dttz.dtype.tz.zone == 'US/Eastern' + def test_construction_with_alt(self): i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern') i2 = DatetimeIndex(i, dtype=i.dtype) self.assert_index_equal(i, i2) + assert i.tz.zone == 'US/Eastern' i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz) self.assert_index_equal(i, i2) + assert i.tz.zone == 'US/Eastern' i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype) self.assert_index_equal(i, i2) + assert i.tz.zone == 'US/Eastern' i2 = DatetimeIndex( i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz) self.assert_index_equal(i, i2) + assert i.tz.zone == 'US/Eastern' # localize into the provided tz i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC') diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 1260ee4e5ab07..28fbce43bf983 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -14,7 +14,7 @@ from pandas.tseries.tools import normalize_date from pandas.compat import lmap from pandas.compat.numpy import np_array_datetime64_compat -from pandas.types.common import is_datetime64_ns_dtype +from pandas.core.dtypes.common import is_datetime64_ns_dtype from pandas.util import testing as tm from pandas.util.testing import assert_series_equal, _skip_if_has_locale from pandas import (isnull, to_datetime, Timestamp, Series, DataFrame, diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index 0f8a9573a233b..51c0889a6f091 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -5,7 +5,7 @@ import numpy as np from pandas.compat import lrange -from pandas.types.common import is_scalar +from pandas.core.dtypes.common import is_scalar from pandas import Series, DataFrame, Panel, date_range, UInt64Index from pandas.util import testing as tm from pandas.formats.printing import pprint_thing diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 0d6ca383a1be1..53812feaa8da7 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -6,8 +6,9 @@ from warnings import catch_warnings from datetime import datetime -from pandas.types.common import (is_integer_dtype, - is_float_dtype) +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_float_dtype) from pandas.compat import range, lrange, lzip, StringIO import numpy as np diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py index b12d1eb97f88b..6eda8b2b6f631 100644 --- a/pandas/tests/indexing/test_ix.py +++ b/pandas/tests/indexing/test_ix.py @@ -5,7 +5,7 @@ import numpy as np import pandas as pd -from pandas.types.common import is_scalar +from pandas.core.dtypes.common import is_scalar from pandas.compat import lrange from pandas import Series, DataFrame, option_context, MultiIndex from pandas.util import testing as tm diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index d1795f2816817..2a785375acaea 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -7,11 +7,14 @@ import pytest from pandas import DataFrame -from pandas.types.dtypes import PeriodDtype, CategoricalDtype, DatetimeTZDtype +from pandas.core.dtypes.dtypes import ( + PeriodDtype, CategoricalDtype, DatetimeTZDtype) import pandas.util.testing as tm from pandas.io.json.table_schema import ( - as_json_table_type, build_table_schema, make_field, set_default_names -) + as_json_table_type, + build_table_schema, + make_field, + set_default_names) class TestBuildSchema(tm.TestCase): diff --git a/pandas/tests/io/parser/dtypes.py b/pandas/tests/io/parser/dtypes.py index 8066718363803..50c9a1bc724fc 100644 --- a/pandas/tests/io/parser/dtypes.py +++ b/pandas/tests/io/parser/dtypes.py @@ -11,7 +11,7 @@ from pandas import DataFrame, Series, Index, MultiIndex, Categorical from pandas.compat import StringIO -from pandas.types.dtypes import CategoricalDtype +from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.errors import ParserWarning diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index 3fad2637ef057..232bb126d9d67 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -11,6 +11,7 @@ from pandas.util.testing import assert_frame_equal, ensure_clean +@pytest.mark.single class TestFeather(object): def check_error_on_write(self, df, exc): @@ -52,6 +53,7 @@ def test_basic(self): 'dtns': pd.date_range('20130101', periods=3, freq='ns')}) + assert df.dttz.dtype.tz.zone == 'US/Eastern' self.check_round_trip(df) def test_strided_data_issues(self): diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index b4e8d6a3b972c..ce411bb4d5c4e 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -31,8 +31,9 @@ from datetime import datetime, date, time -from pandas.types.common import (is_object_dtype, is_datetime64_dtype, - is_datetime64tz_dtype) +from pandas.core.dtypes.common import ( + is_object_dtype, is_datetime64_dtype, + is_datetime64tz_dtype) from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat from pandas import date_range, to_datetime, to_timedelta, Timestamp import pandas.compat as compat diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index db594889c91ee..50d3342c56522 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -20,7 +20,7 @@ from pandas.io.stata import (read_stata, StataReader, InvalidColumnName, PossiblePrecisionLoss, StataMissingValue) from pandas._libs.tslib import NaT -from pandas.types.common import is_categorical_dtype +from pandas.core.dtypes.common import is_categorical_dtype class TestStata(tm.TestCase): diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index d81f73e73ae69..0ffd53b149d7a 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -8,7 +8,7 @@ from pandas import DataFrame, Series from pandas.compat import zip, iteritems from pandas.util.decorators import cache_readonly -from pandas.types.api import is_list_like +from pandas.core.dtypes.api import is_list_like import pandas.util.testing as tm from pandas.util.testing import (ensure_clean, assert_is_valid_plot_return_object) diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 404752b567f63..fe07f5b9f193e 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -11,7 +11,7 @@ import pandas as pd from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range, bdate_range) -from pandas.types.api import is_list_like +from pandas.core.dtypes.api import is_list_like from pandas.compat import range, lrange, lmap, lzip, u, zip, PY3 from pandas.formats.printing import pprint_thing import pandas.util.testing as tm diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index f4297208b2e26..24b2a12d70709 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -8,7 +8,9 @@ import numpy.ma as ma import pandas as pd -from pandas.types.common import is_categorical_dtype, is_datetime64tz_dtype +from pandas.core.dtypes.common import ( + is_categorical_dtype, + is_datetime64tz_dtype) from pandas import (Index, Series, isnull, date_range, NaT, period_range, MultiIndex, IntervalIndex) from pandas.tseries.index import Timestamp, DatetimeIndex diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index 89f972a33a630..8825ba5607a20 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -6,7 +6,7 @@ import numpy as np import pandas as pd -from pandas.types.common import is_integer_dtype, is_list_like +from pandas.core.dtypes.common import is_integer_dtype, is_list_like from pandas import (Index, Series, DataFrame, bdate_range, date_range, period_range, timedelta_range, PeriodIndex, Timestamp, DatetimeIndex, TimedeltaIndex) diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index 48410c1c73479..6c1d77acd70d5 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -8,7 +8,7 @@ import pandas as pd import pandas._libs.index as _index -from pandas.types.common import is_integer, is_scalar +from pandas.core.dtypes.common import is_integer, is_scalar from pandas import (Index, Series, DataFrame, isnull, date_range, NaT, MultiIndex, Timestamp, DatetimeIndex, Timedelta) diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py index 5aca34fb86576..339d871b63049 100644 --- a/pandas/tests/series/test_quantile.py +++ b/pandas/tests/series/test_quantile.py @@ -7,7 +7,7 @@ from pandas import (Index, Series, _np_version_under1p9) from pandas.tseries.index import Timestamp -from pandas.types.common import is_integer +from pandas.core.dtypes.common import is_integer import pandas.util.testing as tm from .common import TestData diff --git a/pandas/tests/sparse/common.py b/pandas/tests/sparse/common.py deleted file mode 100644 index 3aeef8d436e1a..0000000000000 --- a/pandas/tests/sparse/common.py +++ /dev/null @@ -1,10 +0,0 @@ -import pytest - -import pandas.util.testing as tm - - -@pytest.fixture(params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']) -def spmatrix(request): - tm._skip_if_no_scipy() - from scipy import sparse - return getattr(sparse, request.param + '_matrix') diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 4a1cf6314aaed..91c06a2c30e50 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -9,8 +9,9 @@ import pandas as pd import pandas.compat as compat -from pandas.types.common import (is_object_dtype, is_datetimetz, - needs_i8_conversion) +from pandas.core.dtypes.common import ( + is_object_dtype, is_datetimetz, + needs_i8_conversion) import pandas.util.testing as tm from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex, PeriodIndex, Timedelta, IntervalIndex, Interval) diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index dd370f0a20c2e..3296673e96316 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -9,10 +9,11 @@ import numpy as np -from pandas.types.dtypes import CategoricalDtype -from pandas.types.common import (is_categorical_dtype, - is_float_dtype, - is_integer_dtype) +from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.common import ( + is_categorical_dtype, + is_float_dtype, + is_integer_dtype) import pandas as pd import pandas.compat as compat diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index dc4787176a0b5..b353f73f4004d 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -12,7 +12,7 @@ import numpy as np from pandas.core.api import DataFrame, Panel -from pandas.computation import expressions as expr +from pandas.core.computation import expressions as expr from pandas import compat, _np_version_under1p11 from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal, diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 118039d1f354c..80059277407c3 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -11,7 +11,7 @@ import pandas as pd from distutils.version import LooseVersion -from pandas.types.common import is_scalar +from pandas.core.dtypes.common import is_scalar from pandas import (Index, Series, DataFrame, Panel, isnull, date_range, period_range, Panel4D) from pandas.core.index import MultiIndex diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index af7c584249416..b18214bbef926 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -12,7 +12,7 @@ from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex, Series, Categorical) from pandas.compat import OrderedDict, lrange -from pandas.sparse.array import SparseArray +from pandas.core.sparse.array import SparseArray from pandas.core.internals import (BlockPlacement, SingleBlockManager, make_block, BlockManager) import pandas.core.algorithms as algos diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index e3193cddbaaab..648a3b98b245a 100755 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -12,7 +12,7 @@ from pandas import Panel, DataFrame, Series, notnull, isnull, Timestamp from pandas.core.common import UnsortedIndexError -from pandas.types.common import is_float_dtype, is_integer_dtype +from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype import pandas.core.common as com import pandas.util.testing as tm from pandas.compat import (range, lrange, StringIO, lzip, u, product as diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 54de8c1e34031..20a9238310ccf 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -6,7 +6,7 @@ import warnings import numpy as np from pandas import Series, isnull, _np_version_under1p9 -from pandas.types.common import is_integer_dtype +from pandas.core.dtypes.common import is_integer_dtype import pandas.core.nanops as nanops import pandas.util.testing as tm diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index bc7bb8a4dfec1..f0e53046e3552 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -9,7 +9,7 @@ import numpy as np import pandas as pd -from pandas.types.common import is_float_dtype +from pandas.core.dtypes.common import is_float_dtype from pandas import (Series, DataFrame, Index, date_range, isnull, notnull, pivot, MultiIndex) from pandas.core.nanops import nanall, nanany diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index c0511581cd299..3af47a2b408bc 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -6,7 +6,7 @@ from warnings import catch_warnings import numpy as np -from pandas.types.common import is_float_dtype +from pandas.core.dtypes.common import is_float_dtype from pandas import Series, Index, isnull, notnull from pandas.core.panel import Panel from pandas.core.panel4d import Panel4D diff --git a/pandas/tests/tools/test_merge.py b/pandas/tests/tools/test_merge.py index 8011bc4a1cfc2..cc4a97df33801 100644 --- a/pandas/tests/tools/test_merge.py +++ b/pandas/tests/tools/test_merge.py @@ -12,8 +12,8 @@ from pandas.tools.concat import concat from pandas.tools.merge import merge, MergeError from pandas.util.testing import assert_frame_equal, assert_series_equal -from pandas.types.dtypes import CategoricalDtype -from pandas.types.common import is_categorical_dtype, is_object_dtype +from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.common import is_categorical_dtype, is_object_dtype from pandas import DataFrame, Index, MultiIndex, Series, Categorical import pandas.util.testing as tm diff --git a/pandas/tests/tools/test_union_categoricals.py b/pandas/tests/tools/test_union_categoricals.py index 299b60f2a00b0..f9224d0126f6c 100644 --- a/pandas/tests/tools/test_union_categoricals.py +++ b/pandas/tests/tools/test_union_categoricals.py @@ -1,7 +1,7 @@ import numpy as np import pandas as pd from pandas import Categorical, Series, CategoricalIndex -from pandas.types.concat import union_categoricals +from pandas.core.dtypes.concat import union_categoricals from pandas.util import testing as tm diff --git a/pandas/tests/tseries/test_resample.py b/pandas/tests/tseries/test_resample.py index 98664c1ec118c..e81dfd8649e8e 100755 --- a/pandas/tests/tseries/test_resample.py +++ b/pandas/tests/tseries/test_resample.py @@ -12,7 +12,7 @@ from pandas import (Series, DataFrame, Panel, Index, isnull, notnull, Timestamp) -from pandas.types.generic import ABCSeries, ABCDataFrame +from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame from pandas.compat import range, lrange, zip, product, OrderedDict from pandas.core.base import SpecificationError from pandas.errors import UnsupportedFunctionCall diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index 3e1b29f4c282c..125e031b5e3a2 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -11,7 +11,7 @@ import pandas.tseries.offsets as offsets from pandas.compat import lrange, zip from pandas.tseries.index import bdate_range, date_range -from pandas.types.dtypes import DatetimeTZDtype +from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas._libs import tslib from pandas import (Index, Series, DataFrame, isnull, Timestamp, NaT, DatetimeIndex, to_datetime) diff --git a/pandas/tools/concat.py b/pandas/tools/concat.py index 5df9a5abb78b2..af2eb734a02f6 100644 --- a/pandas/tools/concat.py +++ b/pandas/tools/concat.py @@ -12,7 +12,7 @@ from pandas.core.internals import concatenate_block_managers from pandas.core import common as com from pandas.core.generic import NDFrame -import pandas.types.concat as _concat +import pandas.core.dtypes.concat as _concat # --------------------------------------------------------------------- # Concatenate DataFrame objects diff --git a/pandas/tools/hashing.py b/pandas/tools/hashing.py index 85ceb439435ee..275c1c87ea57a 100644 --- a/pandas/tools/hashing.py +++ b/pandas/tools/hashing.py @@ -7,10 +7,14 @@ from pandas import Series, factorize, Categorical, Index, MultiIndex from pandas.tools import libhashing as _hash from pandas._libs.lib import is_bool_array -from pandas.types.generic import ABCIndexClass, ABCSeries, ABCDataFrame -from pandas.types.common import (is_categorical_dtype, is_numeric_dtype, - is_datetime64_dtype, is_timedelta64_dtype, - is_list_like) +from pandas.core.dtypes.generic import ( + ABCIndexClass, + ABCSeries, + ABCDataFrame) +from pandas.core.dtypes.common import ( + is_categorical_dtype, is_numeric_dtype, + is_datetime64_dtype, is_timedelta64_dtype, + is_list_like) # 16 byte long hashing key _default_hash_key = '0123456789123456' diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 7de2549cadfc7..53208fbdd5529 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -14,24 +14,25 @@ from pandas import (Categorical, Series, DataFrame, Index, MultiIndex, Timedelta) from pandas.core.frame import _merge_doc -from pandas.types.common import (is_datetime64tz_dtype, - is_datetime64_dtype, - needs_i8_conversion, - is_int64_dtype, - is_categorical_dtype, - is_integer_dtype, - is_float_dtype, - is_numeric_dtype, - is_integer, - is_int_or_datetime_dtype, - is_dtype_equal, - is_bool, - is_list_like, - _ensure_int64, - _ensure_float64, - _ensure_object, - _get_dtype) -from pandas.types.missing import na_value_for_dtype +from pandas.core.dtypes.common import ( + is_datetime64tz_dtype, + is_datetime64_dtype, + needs_i8_conversion, + is_int64_dtype, + is_categorical_dtype, + is_integer_dtype, + is_float_dtype, + is_numeric_dtype, + is_integer, + is_int_or_datetime_dtype, + is_dtype_equal, + is_bool, + is_list_like, + _ensure_int64, + _ensure_float64, + _ensure_object, + _get_dtype) +from pandas.core.dtypes.missing import na_value_for_dtype from pandas.core.internals import (items_overlap_with_suffix, concatenate_block_managers) from pandas.util.decorators import Appender, Substitution diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index e23beb8332fd4..11ca2e548f171 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -1,7 +1,7 @@ # pylint: disable=E1103 -from pandas.types.common import is_list_like, is_scalar +from pandas.core.dtypes.common import is_list_like, is_scalar from pandas import Series, DataFrame, MultiIndex, Index, concat from pandas.core.groupby import Grouper from pandas.tools.util import cartesian_product diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py index 2a258d4a7b7e5..746742f47f2aa 100644 --- a/pandas/tools/tile.py +++ b/pandas/tools/tile.py @@ -2,13 +2,14 @@ Quantilization functions and related stuff """ -from pandas.types.missing import isnull -from pandas.types.common import (is_integer, - is_scalar, - is_categorical_dtype, - is_datetime64_dtype, - is_timedelta64_dtype, - _ensure_int64) +from pandas.core.dtypes.missing import isnull +from pandas.core.dtypes.common import ( + is_integer, + is_scalar, + is_categorical_dtype, + is_datetime64_dtype, + is_timedelta64_dtype, + _ensure_int64) import pandas.core.algorithms as algos import pandas.core.nanops as nanops diff --git a/pandas/tools/util.py b/pandas/tools/util.py index 263d2f16a4216..baf968440858d 100644 --- a/pandas/tools/util.py +++ b/pandas/tools/util.py @@ -1,15 +1,16 @@ import numpy as np import pandas._libs.lib as lib -from pandas.types.common import (is_number, - is_numeric_dtype, - is_datetime_or_timedelta_dtype, - is_list_like, - _ensure_object, - is_decimal, - is_scalar as isscalar) - -from pandas.types.cast import maybe_downcast_to_dtype +from pandas.core.dtypes.common import ( + is_number, + is_numeric_dtype, + is_datetime_or_timedelta_dtype, + is_list_like, + _ensure_object, + is_decimal, + is_scalar as isscalar) + +from pandas.core.dtypes.cast import maybe_downcast_to_dtype import pandas as pd from pandas.compat import reduce diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py index 48d236177b474..cf79cadef78dd 100644 --- a/pandas/tseries/base.py +++ b/pandas/tseries/base.py @@ -9,13 +9,15 @@ from pandas.compat.numpy import function as nv import numpy as np -from pandas.types.common import (is_integer, is_float, - is_bool_dtype, _ensure_int64, - is_scalar, is_dtype_equal, - is_list_like) -from pandas.types.generic import (ABCIndex, ABCSeries, - ABCPeriodIndex, ABCIndexClass) -from pandas.types.missing import isnull +from pandas.core.dtypes.common import ( + is_integer, is_float, + is_bool_dtype, _ensure_int64, + is_scalar, is_dtype_equal, + is_list_like) +from pandas.core.dtypes.generic import ( + ABCIndex, ABCSeries, + ABCPeriodIndex, ABCIndexClass) +from pandas.core.dtypes.missing import isnull from pandas.core import common as com, algorithms from pandas.core.algorithms import checked_add_with_arr from pandas.core.common import AbstractMethodError @@ -28,7 +30,7 @@ from pandas.core.index import Index from pandas.indexes.base import _index_shared_docs from pandas.util.decorators import Appender, cache_readonly -import pandas.types.concat as _concat +import pandas.core.dtypes.concat as _concat import pandas.tseries.frequencies as frequencies import pandas.indexes.base as ibase diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py index 955edce2591e6..2154cfd4b2857 100644 --- a/pandas/tseries/common.py +++ b/pandas/tseries/common.py @@ -4,11 +4,12 @@ import numpy as np -from pandas.types.common import (is_period_arraylike, - is_datetime_arraylike, is_integer_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, - is_timedelta64_dtype, is_categorical_dtype, - is_list_like) +from pandas.core.dtypes.common import ( + is_period_arraylike, + is_datetime_arraylike, is_integer_dtype, + is_datetime64_dtype, is_datetime64tz_dtype, + is_timedelta64_dtype, is_categorical_dtype, + is_list_like) from pandas.core.base import PandasDelegate, NoNewAttributesMixin from pandas.tseries.index import DatetimeIndex diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 8013947babc5a..06d70f1456518 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -6,11 +6,12 @@ import numpy as np -from pandas.types.generic import ABCSeries -from pandas.types.common import (is_integer, - is_period_arraylike, - is_timedelta64_dtype, - is_datetime64_dtype) +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.common import ( + is_integer, + is_period_arraylike, + is_timedelta64_dtype, + is_datetime64_dtype) import pandas.core.algorithms as algos from pandas.core.algorithms import unique diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 2c14d4f8ea79e..95594652e3943 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -7,24 +7,25 @@ import numpy as np from pandas.core.base import _shared_docs -from pandas.types.common import (_NS_DTYPE, _INT64_DTYPE, - is_object_dtype, is_datetime64_dtype, - is_datetimetz, is_dtype_equal, - is_integer, is_float, - is_integer_dtype, - is_datetime64_ns_dtype, - is_period_dtype, - is_bool_dtype, - is_string_dtype, - is_list_like, - is_scalar, - pandas_dtype, - _ensure_int64) -from pandas.types.generic import ABCSeries -from pandas.types.dtypes import DatetimeTZDtype -from pandas.types.missing import isnull - -import pandas.types.concat as _concat +from pandas.core.dtypes.common import ( + _NS_DTYPE, _INT64_DTYPE, + is_object_dtype, is_datetime64_dtype, + is_datetimetz, is_dtype_equal, + is_integer, is_float, + is_integer_dtype, + is_datetime64_ns_dtype, + is_period_dtype, + is_bool_dtype, + is_string_dtype, + is_list_like, + is_scalar, + pandas_dtype, + _ensure_int64) +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.missing import isnull + +import pandas.core.dtypes.concat as _concat from pandas.errors import PerformanceWarning from pandas.core.common import _values_from_object, _maybe_box diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 2b6a684fc39dd..a097c56a0ffd3 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -3,7 +3,7 @@ from pandas import compat import numpy as np -from pandas.types.generic import ABCSeries, ABCDatetimeIndex, ABCPeriod +from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex, ABCPeriod from pandas.tseries.tools import to_datetime, normalize_date from pandas.core.common import AbstractMethodError diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 7f7b3286fd4f8..66275925ff355 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -5,21 +5,22 @@ from pandas.core import common as com -from pandas.types.common import (is_integer, - is_float, - is_object_dtype, - is_integer_dtype, - is_float_dtype, - is_scalar, - is_datetime64_dtype, - is_datetime64tz_dtype, - is_timedelta64_dtype, - is_period_dtype, - is_bool_dtype, - pandas_dtype, - _ensure_object) -from pandas.types.dtypes import PeriodDtype -from pandas.types.generic import ABCSeries +from pandas.core.dtypes.common import ( + is_integer, + is_float, + is_object_dtype, + is_integer_dtype, + is_float_dtype, + is_scalar, + is_datetime64_dtype, + is_datetime64tz_dtype, + is_timedelta64_dtype, + is_period_dtype, + is_bool_dtype, + pandas_dtype, + _ensure_object) +from pandas.core.dtypes.dtypes import PeriodDtype +from pandas.core.dtypes.generic import ABCSeries import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py index d0f373fcc5a45..c26f023ea942a 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/tseries/tdi.py @@ -2,18 +2,19 @@ from datetime import timedelta import numpy as np -from pandas.types.common import (_TD_DTYPE, - is_integer, is_float, - is_bool_dtype, - is_list_like, - is_scalar, - is_integer_dtype, - is_object_dtype, - is_timedelta64_dtype, - is_timedelta64_ns_dtype, - _ensure_int64) -from pandas.types.missing import isnull -from pandas.types.generic import ABCSeries +from pandas.core.dtypes.common import ( + _TD_DTYPE, + is_integer, is_float, + is_bool_dtype, + is_list_like, + is_scalar, + is_integer_dtype, + is_object_dtype, + is_timedelta64_dtype, + is_timedelta64_ns_dtype, + _ensure_int64) +from pandas.core.dtypes.missing import isnull +from pandas.core.dtypes.generic import ABCSeries from pandas.core.common import _maybe_box, _values_from_object, is_bool_indexer from pandas.core.index import Index, Int64Index @@ -24,7 +25,7 @@ from pandas.core.base import _shared_docs from pandas.indexes.base import _index_shared_docs import pandas.core.common as com -import pandas.types.concat as _concat +import pandas.core.dtypes.concat as _concat from pandas.util.decorators import Appender, Substitution, deprecate_kwarg from pandas.tseries.base import TimelikeOps, DatetimeIndexOpsMixin from pandas.tseries.timedeltas import (to_timedelta, diff --git a/pandas/tseries/timedeltas.py b/pandas/tseries/timedeltas.py index ead602ee80e32..fe03f89fdb2c5 100644 --- a/pandas/tseries/timedeltas.py +++ b/pandas/tseries/timedeltas.py @@ -6,11 +6,12 @@ import pandas as pd import pandas._libs.tslib as tslib -from pandas.types.common import (_ensure_object, - is_integer_dtype, - is_timedelta64_dtype, - is_list_like) -from pandas.types.generic import ABCSeries, ABCIndexClass +from pandas.core.dtypes.common import ( + _ensure_object, + is_integer_dtype, + is_timedelta64_dtype, + is_list_like) +from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass def to_timedelta(arg, unit='ns', box=True, errors='raise'): diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 9d5821d859187..db7aa5974e562 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -4,19 +4,21 @@ from pandas._libs import lib, tslib -from pandas.types.common import (_ensure_object, - is_datetime64_ns_dtype, - is_datetime64_dtype, - is_datetime64tz_dtype, - is_integer_dtype, - is_integer, - is_float, - is_list_like, - is_scalar, - is_numeric_dtype) -from pandas.types.generic import (ABCIndexClass, ABCSeries, - ABCDataFrame) -from pandas.types.missing import notnull +from pandas.core.dtypes.common import ( + _ensure_object, + is_datetime64_ns_dtype, + is_datetime64_dtype, + is_datetime64tz_dtype, + is_integer_dtype, + is_integer, + is_float, + is_list_like, + is_scalar, + is_numeric_dtype) +from pandas.core.dtypes.generic import ( + ABCIndexClass, ABCSeries, + ABCDataFrame) +from pandas.core.dtypes.missing import notnull from pandas.core import algorithms import pandas.compat as compat diff --git a/pandas/tseries/util.py b/pandas/tseries/util.py index da3bb075dd02c..5934f5843736c 100644 --- a/pandas/tseries/util.py +++ b/pandas/tseries/util.py @@ -2,7 +2,7 @@ from pandas.compat import lrange import numpy as np -from pandas.types.common import _ensure_platform_int +from pandas.core.dtypes.common import _ensure_platform_int from pandas.core.frame import DataFrame import pandas.core.algorithms as algorithms diff --git a/pandas/util/testing.py b/pandas/util/testing.py index c73cca56f975a..638a190d810a5 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -23,15 +23,16 @@ import numpy as np import pandas as pd -from pandas.types.missing import array_equivalent -from pandas.types.common import (is_datetimelike_v_numeric, - is_datetimelike_v_object, - is_number, is_bool, - needs_i8_conversion, - is_categorical_dtype, - is_interval_dtype, - is_sequence, - is_list_like) +from pandas.core.dtypes.missing import array_equivalent +from pandas.core.dtypes.common import ( + is_datetimelike_v_numeric, + is_datetimelike_v_object, + is_number, is_bool, + needs_i8_conversion, + is_categorical_dtype, + is_interval_dtype, + is_sequence, + is_list_like) from pandas.formats.printing import pprint_thing from pandas.core.algorithms import take_1d @@ -42,7 +43,7 @@ StringIO, PY3 ) -from pandas.computation import expressions as expr +from pandas.core.computation import expressions as expr from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex, @@ -401,8 +402,9 @@ def _incompat_bottleneck_version(method): def skip_if_no_ne(engine='numexpr'): - from pandas.computation.expressions import (_USE_NUMEXPR, - _NUMEXPR_INSTALLED) + from pandas.core.computation.expressions import ( + _USE_NUMEXPR, + _NUMEXPR_INSTALLED) if engine == 'numexpr': if not _USE_NUMEXPR: @@ -1539,10 +1541,10 @@ def assert_sp_array_equal(left, right, check_dtype=True): check_dtype=check_dtype) # SparseIndex comparison - assertIsInstance(left.sp_index, - pd.sparse.libsparse.SparseIndex, '[SparseIndex]') - assertIsInstance(right.sp_index, - pd.sparse.libsparse.SparseIndex, '[SparseIndex]') + assertIsInstance( + left.sp_index, pd.core.sparse.libsparse.SparseIndex, '[SparseIndex]') + assertIsInstance( + right.sp_index, pd.core.sparse.libsparse.SparseIndex, '[SparseIndex]') if not left.sp_index.equals(right.sp_index): raise_assert_detail('SparseArray.index', 'index are not equal', diff --git a/pandas/util/testing.pyx b/pandas/util/testing.pyx index cda21ba9c4ce1..9495af87f5c31 100644 --- a/pandas/util/testing.pyx +++ b/pandas/util/testing.pyx @@ -1,8 +1,8 @@ import numpy as np from pandas import compat -from pandas.types.missing import isnull, array_equivalent -from pandas.types.common import is_dtype_equal +from pandas.core.dtypes.missing import isnull, array_equivalent +from pandas.core.dtypes.common import is_dtype_equal cdef NUMERIC_TYPES = ( bool, diff --git a/pandas/util/validators.py b/pandas/util/validators.py index f22412a2bcd17..6b19904f4a665 100644 --- a/pandas/util/validators.py +++ b/pandas/util/validators.py @@ -3,7 +3,7 @@ for validating data or function arguments """ -from pandas.types.common import is_bool +from pandas.core.dtypes.common import is_bool def _check_arg_length(fname, args, max_fname_arg_count, compat_args): diff --git a/setup.py b/setup.py index d76c6fa508008..b7c4581c4ecfe 100755 --- a/setup.py +++ b/setup.py @@ -118,7 +118,7 @@ def is_platform_mac(): 'hashtable': ['_libs/hashtable_class_helper.pxi.in', '_libs/hashtable_func_helper.pxi.in'], 'index': ['_libs/index_class_helper.pxi.in'], - 'sparse': ['sparse/sparse_op_helper.pxi.in'], + 'sparse': ['core/sparse/sparse_op_helper.pxi.in'], 'interval': ['_libs/intervaltree.pxi.in'] } @@ -338,7 +338,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/join.pyx', 'pandas/_libs/interval.pyx', 'pandas/core/window.pyx', - 'pandas/sparse/sparse.pyx', + 'pandas/core/sparse/sparse.pyx', 'pandas/util/testing.pyx', 'pandas/tools/hash.pyx', 'pandas/io/parsers.pyx', @@ -523,8 +523,8 @@ def pxd(name): 'pandas/_libs/src/numpy_helper.h'], 'sources': ['pandas/_libs/src/parser/tokenizer.c', 'pandas/_libs/src/parser/io.c']}, - 'sparse.libsparse': {'pyxfile': 'sparse/sparse', - 'depends': (['pandas/sparse/sparse.pyx'] + + 'core.sparse.libsparse': {'pyxfile': 'core/sparse/sparse', + 'depends': (['pandas/core/sparse/sparse.pyx'] + _pxi_dep['sparse'])}, 'util.libtesting': {'pyxfile': 'util/testing', 'depends': ['pandas/util/testing.pyx']}, @@ -636,11 +636,12 @@ def pxd(name): packages=['pandas', 'pandas.api', 'pandas.api.types', - 'pandas.api.lib', 'pandas.compat', 'pandas.compat.numpy', - 'pandas.computation', 'pandas.core', + 'pandas.core.dtypes', + 'pandas.core.computation', + 'pandas.core.sparse', 'pandas.indexes', 'pandas.errors', 'pandas.io', @@ -650,12 +651,13 @@ def pxd(name): 'pandas._libs', 'pandas.formats', 'pandas.plotting', - 'pandas.sparse', 'pandas.stats', 'pandas.util', 'pandas.tests', 'pandas.tests.api', - 'pandas.tests.computation', + 'pandas.tests.core.dtypes', + 'pandas.tests.core.computation', + 'pandas.tests.core.sparse', 'pandas.tests.frame', 'pandas.tests.indexes', 'pandas.tests.indexes.datetimes', @@ -670,14 +672,11 @@ def pxd(name): 'pandas.tests.series', 'pandas.tests.formats', 'pandas.tests.scalar', - 'pandas.tests.sparse', 'pandas.tests.tseries', 'pandas.tests.tools', - 'pandas.tests.types', 'pandas.tests.plotting', 'pandas.tools', 'pandas.tseries', - 'pandas.types', 'pandas.util.clipboard' ], package_data={'pandas.tests': ['data/*.csv'], diff --git a/vb_suite/binary_ops.py b/vb_suite/binary_ops.py index 7c821374a83ab..edc29bf3eec37 100644 --- a/vb_suite/binary_ops.py +++ b/vb_suite/binary_ops.py @@ -21,7 +21,7 @@ start_date=datetime(2012, 1, 1)) setup = common_setup + """ -import pandas.computation.expressions as expr +import pandas.core.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_numexpr_threads(1) @@ -32,7 +32,7 @@ start_date=datetime(2013, 2, 26)) setup = common_setup + """ -import pandas.computation.expressions as expr +import pandas.core.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_use_numexpr(False) @@ -53,7 +53,7 @@ start_date=datetime(2012, 1, 1)) setup = common_setup + """ -import pandas.computation.expressions as expr +import pandas.core.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_numexpr_threads(1) @@ -63,7 +63,7 @@ start_date=datetime(2013, 2, 26)) setup = common_setup + """ -import pandas.computation.expressions as expr +import pandas.core.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_use_numexpr(False) @@ -129,7 +129,7 @@ start_date=datetime(2012, 1, 1)) setup = common_setup + """ -import pandas.computation.expressions as expr +import pandas.core.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_numexpr_threads(1) @@ -139,7 +139,7 @@ start_date=datetime(2013, 2, 26)) setup = common_setup + """ -import pandas.computation.expressions as expr +import pandas.core.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_use_numexpr(False) diff --git a/vb_suite/eval.py b/vb_suite/eval.py index bf80aad956184..011669256a9bc 100644 --- a/vb_suite/eval.py +++ b/vb_suite/eval.py @@ -10,7 +10,7 @@ """ setup = common_setup + """ -import pandas.computation.expressions as expr +import pandas.core.computation.expressions as expr expr.set_numexpr_threads(1) """ diff --git a/vb_suite/indexing.py b/vb_suite/indexing.py index 3d95d52dccd71..ff634bf2a8fc7 100644 --- a/vb_suite/indexing.py +++ b/vb_suite/indexing.py @@ -141,7 +141,7 @@ setup = common_setup + """ try: - import pandas.computation.expressions as expr + import pandas.core.computation.expressions as expr except: expr = None @@ -159,7 +159,7 @@ setup = common_setup + """ try: - import pandas.computation.expressions as expr + import pandas.core.computation.expressions as expr except: expr = None diff --git a/vb_suite/sparse.py b/vb_suite/sparse.py index 53e2778ee0865..b1c1a2f24e41d 100644 --- a/vb_suite/sparse.py +++ b/vb_suite/sparse.py @@ -55,11 +55,11 @@ setup = common_setup + """ import scipy.sparse -import pandas.sparse.series +import pandas.core.sparse.series A = scipy.sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(100, 100)) """ -stmt = "ss = pandas.sparse.series.SparseSeries.from_coo(A)" +stmt = "ss = pandas.core.sparse.series.SparseSeries.from_coo(A)" sparse_series_from_coo = Benchmark(stmt, setup, name="sparse_series_from_coo", start_date=datetime(2015, 1, 3)) From 3119e909f7902a8d5f6d588e645e8744578afda9 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Sun, 16 Apr 2017 00:51:29 +0000 Subject: [PATCH 31/56] COMPAT: use the correct dtype for interval comparisons on 32-bit (#16011) --- pandas/tests/indexes/test_interval.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index 79b6ff2e7a2a7..2d0015a5258ed 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -779,8 +779,8 @@ def test_get_loc_closed(self): np.array([0], dtype='int64')) def test_get_indexer_closed(self): - x = np.arange(1000, dtype='intp') - found = x + x = np.arange(1000, dtype='float64') + found = x.astype('intp') not_found = (-1 * np.ones(1000)).astype('intp') for leaf_size in [1, 10, 100, 10000]: for closed in ['left', 'right', 'both', 'neither']: From 39d7b113bc6e44a8883f3bf6a7cb6927cee15a07 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Sun, 16 Apr 2017 13:18:45 +0200 Subject: [PATCH 32/56] CLN: updates to benchmarks after repo reorg (#16020) --- asv_bench/benchmarks/binary_ops.py | 5 ++++- asv_bench/benchmarks/categoricals.py | 7 +++++-- asv_bench/benchmarks/eval.py | 5 ++++- asv_bench/benchmarks/indexing.py | 4 ---- asv_bench/benchmarks/plotting.py | 5 ++++- asv_bench/benchmarks/reshape.py | 2 +- asv_bench/benchmarks/sparse.py | 6 ++---- asv_bench/benchmarks/timeseries.py | 5 ++++- 8 files changed, 24 insertions(+), 15 deletions(-) diff --git a/asv_bench/benchmarks/binary_ops.py b/asv_bench/benchmarks/binary_ops.py index cc869996b49cd..0ca21b929ea17 100644 --- a/asv_bench/benchmarks/binary_ops.py +++ b/asv_bench/benchmarks/binary_ops.py @@ -1,5 +1,8 @@ from .pandas_vb_common import * -import pandas.core.computation.expressions as expr +try: + import pandas.core.computation.expressions as expr +except ImportError: + import pandas.computation.expressions as expr class Ops(object): diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 5b0dd126acdea..6432ccfb19efe 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -1,8 +1,11 @@ from .pandas_vb_common import * try: - from pandas.core.dtypes.concat import union_categoricals + from pandas.api.types import union_categoricals except ImportError: - pass + try: + from pandas.types.concat import union_categoricals + except ImportError: + pass class Categoricals(object): diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py index ee091e57c6403..6f33590ee9e33 100644 --- a/asv_bench/benchmarks/eval.py +++ b/asv_bench/benchmarks/eval.py @@ -1,6 +1,9 @@ from .pandas_vb_common import * import pandas as pd -import pandas.core.computation.expressions as expr +try: + import pandas.core.computation.expressions as expr +except ImportError: + import pandas.computation.expressions as expr class Eval(object): diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index 79844414f2746..8947a0fdd796c 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -1,8 +1,4 @@ from .pandas_vb_common import * -try: - import pandas.core.computation.expressions as expr -except: - expr = None class Int64Indexing(object): diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py index 757c3e27dd333..dda684b35e301 100644 --- a/asv_bench/benchmarks/plotting.py +++ b/asv_bench/benchmarks/plotting.py @@ -4,7 +4,10 @@ except ImportError: def date_range(start=None, end=None, periods=None, freq=None): return DatetimeIndex(start, end, periods=periods, offset=freq) -from pandas.tools.plotting import andrews_curves +try: + from pandas.plotting import andrews_curves +except ImportError: + from pandas.tools.plotting import andrews_curves class TimeseriesPlotting(object): diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index b9346c497b9ef..177e3e7cb87fa 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -1,5 +1,5 @@ from .pandas_vb_common import * -from pandas.core.reshape import melt, wide_to_long +from pandas import melt, wide_to_long class melt_dataframe(object): diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py index 7d424592ed877..500149b89b08b 100644 --- a/asv_bench/benchmarks/sparse.py +++ b/asv_bench/benchmarks/sparse.py @@ -1,8 +1,6 @@ from .pandas_vb_common import * -import pandas.core.sparse.series import scipy.sparse -from pandas.core.sparse import SparseSeries, SparseDataFrame -from pandas.core.sparse import SparseDataFrame +from pandas import SparseSeries, SparseDataFrame class sparse_series_to_frame(object): @@ -37,7 +35,7 @@ def setup(self): self.A = scipy.sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(100, 100)) def time_sparse_series_from_coo(self): - self.ss = pandas.core.sparse.series.SparseSeries.from_coo(self.A) + self.ss = SparseSeries.from_coo(self.A) class sparse_series_to_coo(object): diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index dfe3f0ef87c11..b63b3386a7563 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -1,4 +1,7 @@ -from pandas.tseries.converter import DatetimeConverter +try: + from pandas.plotting._converter import DatetimeConverter +except ImportError: + from pandas.tseries.converter import DatetimeConverter from .pandas_vb_common import * import pandas as pd from datetime import timedelta From 1f812e37f33bf79beacbbfd2b1e0fa38958006e2 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Sun, 16 Apr 2017 13:01:17 +0000 Subject: [PATCH 33/56] CLN: move pandas.formats -> pandas.io.formats (#16013) --- MANIFEST.in | 2 +- doc/source/whatsnew/v0.20.0.txt | 1 + pandas/core/api.py | 2 +- pandas/core/categorical.py | 4 ++-- pandas/core/computation/engines.py | 2 +- pandas/core/computation/eval.py | 2 +- pandas/core/computation/expr.py | 2 +- pandas/core/computation/ops.py | 2 +- pandas/core/computation/pytables.py | 2 +- pandas/core/config.py | 4 ++-- pandas/core/config_init.py | 2 +- pandas/core/frame.py | 10 +++++----- pandas/core/generic.py | 4 ++-- pandas/core/groupby.py | 2 +- pandas/core/internals.py | 6 +++--- pandas/core/panel.py | 2 +- pandas/core/series.py | 2 +- pandas/core/sparse/array.py | 2 +- pandas/core/sparse/list.py | 2 +- pandas/indexes/base.py | 6 +++--- pandas/indexes/frozen.py | 2 +- pandas/indexes/interval.py | 2 +- pandas/indexes/multi.py | 4 ++-- pandas/indexes/numeric.py | 2 +- pandas/io/api.py | 2 +- pandas/io/common.py | 2 +- pandas/io/excel.py | 2 +- pandas/{ => io}/formats/__init__.py | 0 pandas/{ => io}/formats/format.py | 2 +- pandas/{ => io}/formats/printing.py | 0 pandas/{ => io}/formats/style.py | 2 +- pandas/{ => io}/formats/templates/html.tpl | 0 pandas/io/html.py | 2 +- pandas/io/json/json.py | 2 +- pandas/io/pytables.py | 2 +- pandas/plotting/_core.py | 2 +- pandas/plotting/_misc.py | 2 +- pandas/plotting/_timeseries.py | 2 +- pandas/tests/api/test_api.py | 2 +- pandas/tests/frame/test_analytics.py | 2 +- pandas/tests/frame/test_operators.py | 2 +- pandas/tests/frame/test_repr_info.py | 2 +- pandas/tests/groupby/test_aggregate.py | 2 +- pandas/tests/indexing/common.py | 2 +- pandas/tests/{ => io}/formats/__init__.py | 0 pandas/tests/{ => io}/formats/data/unicode_series.csv | 0 pandas/tests/{ => io}/formats/test_eng_formatting.py | 2 +- pandas/tests/{ => io}/formats/test_format.py | 6 +++--- pandas/tests/{ => io}/formats/test_printing.py | 4 ++-- pandas/tests/{ => io}/formats/test_style.py | 2 +- pandas/tests/{ => io}/formats/test_to_csv.py | 0 pandas/tests/{ => io}/formats/test_to_html.py | 2 +- pandas/tests/{ => io}/formats/test_to_latex.py | 0 pandas/tests/io/test_excel.py | 4 ++-- pandas/tests/io/test_pytables.py | 2 +- pandas/tests/plotting/test_frame.py | 2 +- pandas/tests/series/test_api.py | 2 +- pandas/tests/test_expressions.py | 2 +- pandas/tests/test_generic.py | 2 +- pandas/tests/test_panel.py | 2 +- pandas/tseries/base.py | 2 +- pandas/tseries/index.py | 6 +++--- pandas/tseries/tdi.py | 4 ++-- pandas/util/testing.py | 2 +- setup.py | 4 ++-- 65 files changed, 78 insertions(+), 77 deletions(-) rename pandas/{ => io}/formats/__init__.py (100%) rename pandas/{ => io}/formats/format.py (99%) rename pandas/{ => io}/formats/printing.py (100%) rename pandas/{ => io}/formats/style.py (99%) rename pandas/{ => io}/formats/templates/html.tpl (100%) rename pandas/tests/{ => io}/formats/__init__.py (100%) rename pandas/tests/{ => io}/formats/data/unicode_series.csv (100%) rename pandas/tests/{ => io}/formats/test_eng_formatting.py (99%) rename pandas/tests/{ => io}/formats/test_format.py (99%) rename pandas/tests/{ => io}/formats/test_printing.py (98%) rename pandas/tests/{ => io}/formats/test_style.py (99%) rename pandas/tests/{ => io}/formats/test_to_csv.py (100%) rename pandas/tests/{ => io}/formats/test_to_html.py (99%) rename pandas/tests/{ => io}/formats/test_to_latex.py (100%) diff --git a/MANIFEST.in b/MANIFEST.in index 31de3466cb357..8bd83a7d56948 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -25,4 +25,4 @@ global-exclude *.png # recursive-include LICENSES * include versioneer.py include pandas/_version.py -include pandas/formats/templates/*.tpl +include pandas/io/formats/templates/*.tpl diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 33d80f8347b0a..08208973b70d2 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1331,6 +1331,7 @@ If indicated, a deprecation warning will be issued if you reference theses modul "pandas.hashtable", "pandas._libs.hashtable", "" "pandas.json", "pandas.io.json.libjson", "X" "pandas.parser", "pandas.io.libparsers", "X" + "pandas.formats", "pandas.io.formats", "" "pandas.sparse", "pandas.core.sparse", "" "pandas.types", "pandas.core.dtypes", "" "pandas.io.sas.saslib", "pandas.io.sas.libsas", "" diff --git a/pandas/core/api.py b/pandas/core/api.py index 8e8969e1f6b26..3c739d85d0074 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -8,7 +8,7 @@ from pandas.core.dtypes.missing import isnull, notnull from pandas.core.categorical import Categorical from pandas.core.groupby import Grouper -from pandas.formats.format import set_eng_float_format +from pandas.io.formats.format import set_eng_float_format from pandas.core.index import (Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, MultiIndex, IntervalIndex) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index d1f060113cf1d..50181486d8cf7 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1615,7 +1615,7 @@ def _repr_categories(self): """ return the base repr for the categories """ max_categories = (10 if get_option("display.max_categories") == 0 else get_option("display.max_categories")) - from pandas.formats import format as fmt + from pandas.io.formats import format as fmt if len(self.categories) > max_categories: num = max_categories // 2 head = fmt.format_array(self.categories[:num], None) @@ -1663,7 +1663,7 @@ def _repr_footer(self): return u('Length: %d\n%s') % (len(self), self._repr_categories_info()) def _get_repr(self, length=True, na_rep='NaN', footer=True): - from pandas.formats import format as fmt + from pandas.io.formats import format as fmt formatter = fmt.CategoricalFormatter(self, length=length, na_rep=na_rep, footer=footer) result = formatter.to_string() diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py index 675a3d5eca792..f45d0355e7442 100644 --- a/pandas/core/computation/engines.py +++ b/pandas/core/computation/engines.py @@ -6,7 +6,7 @@ from pandas import compat from pandas.compat import map -import pandas.formats.printing as printing +import pandas.io.formats.printing as printing from pandas.core.computation.align import _align, _reconstruct_object from pandas.core.computation.ops import ( UndefinedVariableError, diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index fc3986e317d13..15e13025a7c53 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -5,7 +5,7 @@ import warnings import tokenize -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas.core.computation import _NUMEXPR_INSTALLED from pandas.core.computation.expr import Expr, _parsers, tokenize_string from pandas.core.computation.scope import _ensure_scope diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 01c5d1f6f100c..51785ebcd9ec8 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -12,7 +12,7 @@ from pandas.compat import StringIO, lmap, zip, reduce, string_types from pandas.core.base import StringMixin from pandas.core import common as com -import pandas.formats.printing as printing +import pandas.io.formats.printing as printing from pandas.tools.util import compose from pandas.core.computation.ops import ( _cmp_ops_syms, _bool_ops_syms, diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 91c414bbc0ec1..7ba2c16530cad 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -11,7 +11,7 @@ import pandas as pd from pandas.compat import PY3, string_types, text_type import pandas.core.common as com -from pandas.formats.printing import pprint_thing, pprint_thing_encoded +from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded from pandas.core.base import StringMixin from pandas.core.computation.common import _ensure_decoded, _result_type_many from pandas.core.computation.scope import _DEFAULT_GLOBALS diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 8d0f23e28c0a2..285ff346158a0 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -9,7 +9,7 @@ import pandas.core.common as com from pandas.compat import u, string_types, DeepChainMap from pandas.core.base import StringMixin -from pandas.formats.printing import pprint_thing, pprint_thing_encoded +from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded from pandas.core.computation import expr, ops from pandas.core.computation.ops import is_term, UndefinedVariableError from pandas.core.computation.expr import BaseExprVisitor diff --git a/pandas/core/config.py b/pandas/core/config.py index 39ed2f9545266..b406f6724aa6d 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -774,7 +774,7 @@ def is_instance_factory(_type): """ if isinstance(_type, (tuple, list)): _type = tuple(_type) - from pandas.formats.printing import pprint_thing + from pandas.io.formats.printing import pprint_thing type_repr = "|".join(map(pprint_thing, _type)) else: type_repr = "'%s'" % _type @@ -792,7 +792,7 @@ def is_one_of_factory(legal_values): legal_values = [c for c in legal_values if not callable(c)] def inner(x): - from pandas.formats.printing import pprint_thing as pp + from pandas.io.formats.printing import pprint_thing as pp if x not in legal_values: if not any([c(x) for c in callables]): diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 7307980c8312e..f8cbdffa27bb4 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -15,7 +15,7 @@ from pandas.core.config import (is_int, is_bool, is_text, is_instance_factory, is_one_of_factory, get_default_val, is_callable) -from pandas.formats.format import detect_console_encoding +from pandas.io.formats.format import detect_console_encoding # # options from the "display" namespace diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3a5a0e7044e79..732ce7ce695b0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -90,8 +90,8 @@ import pandas.core.common as com import pandas.core.nanops as nanops import pandas.core.ops as ops -import pandas.formats.format as fmt -from pandas.formats.printing import pprint_thing +import pandas.io.formats.format as fmt +from pandas.io.formats.printing import pprint_thing import pandas.plotting._core as gfx from pandas._libs import lib, algos as libalgos @@ -636,9 +636,9 @@ def style(self): See Also -------- - pandas.formats.style.Styler + pandas.io.formats.style.Styler """ - from pandas.formats.style import Styler + from pandas.io.formats.style import Styler return Styler(self) def iteritems(self): @@ -1724,7 +1724,7 @@ def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, - If False, never show counts. """ - from pandas.formats.format import _put_lines + from pandas.io.formats.format import _put_lines if buf is None: # pragma: no cover buf = sys.stdout diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 167af8dfc0d8e..5f0c65ddfb9c3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -43,8 +43,8 @@ import pandas.core.algorithms as algos import pandas.core.common as com import pandas.core.missing as missing -from pandas.formats.printing import pprint_thing -from pandas.formats.format import format_percentiles +from pandas.io.formats.printing import pprint_thing +from pandas.io.formats.format import format_percentiles from pandas.tseries.frequencies import to_offset from pandas import compat from pandas.compat.numpy import function as nv diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 2cbcb9ef6efec..3fd41f3456732 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -55,7 +55,7 @@ decons_obs_group_ids, get_indexer_dict) from pandas.util.decorators import (cache_readonly, Substitution, Appender, make_signature) -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas.util.validators import validate_kwargs import pandas.core.algorithms as algorithms diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f7d7efd66f8db..c698bcb9fa5ee 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -55,7 +55,7 @@ from pandas.core.indexing import maybe_convert_indices, length_of_indexer from pandas.core.categorical import Categorical, maybe_to_categorical from pandas.tseries.index import DatetimeIndex -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing import pandas.core.missing as missing from pandas.core.sparse.array import _maybe_to_sparse, SparseArray @@ -1614,7 +1614,7 @@ def to_native_types(self, slicer=None, na_rep='', float_format=None, values[mask] = na_rep return values - from pandas.formats.format import FloatArrayFormatter + from pandas.io.formats.format import FloatArrayFormatter formatter = FloatArrayFormatter(values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, @@ -2328,7 +2328,7 @@ def to_native_types(self, slicer=None, na_rep=None, date_format=None, if slicer is not None: values = values[..., slicer] - from pandas.formats.format import _get_format_datetime64_from_values + from pandas.io.formats.format import _get_format_datetime64_from_values format = _get_format_datetime64_from_values(values, date_format) result = tslib.format_array_from_datetime( diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 76053b3bdb83d..fefe75163d033 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -26,7 +26,7 @@ from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, _get_combined_index) -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas.core.indexing import maybe_droplevels from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, diff --git a/pandas/core/series.py b/pandas/core/series.py index 596dae4345cb3..9022bff092ac3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -67,7 +67,7 @@ import pandas.core.common as com import pandas.core.nanops as nanops -import pandas.formats.format as fmt +import pandas.io.formats.format as fmt from pandas.util.decorators import Appender, deprecate_kwarg, Substitution from pandas.util.validators import validate_bool_kwarg diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 74e9be54ae6df..d3fdfe5533a03 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -34,7 +34,7 @@ from pandas._libs import index as libindex import pandas.core.algorithms as algos import pandas.core.ops as ops -import pandas.formats.printing as printing +import pandas.io.formats.printing as printing from pandas.util.decorators import Appender from pandas.indexes.base import _index_shared_docs diff --git a/pandas/core/sparse/list.py b/pandas/core/sparse/list.py index 381a811ac828b..e69ad6d0ab7ad 100644 --- a/pandas/core/sparse/list.py +++ b/pandas/core/sparse/list.py @@ -1,7 +1,7 @@ import warnings import numpy as np from pandas.core.base import PandasObject -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas.core.dtypes.common import is_scalar from pandas.core.sparse.array import SparseArray diff --git a/pandas/indexes/base.py b/pandas/indexes/base.py index b0439e122ea9e..d88e54dcc9521 100644 --- a/pandas/indexes/base.py +++ b/pandas/indexes/base.py @@ -48,7 +48,7 @@ import pandas.core.dtypes.concat as _concat import pandas.core.missing as missing import pandas.core.algorithms as algos -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas.core.ops import _comp_method_OBJECT_ARRAY from pandas.core.strings import StringAccessorMixin from pandas.core.config import get_option @@ -831,7 +831,7 @@ def _format_data(self): """ Return the formatted data as a unicode string """ - from pandas.formats.format import get_console_size, _get_adjustment + from pandas.io.formats.format import get_console_size, _get_adjustment display_width, _ = get_console_size() if display_width is None: display_width = get_option('display.width') or 80 @@ -1842,7 +1842,7 @@ def format(self, name=False, formatter=None, **kwargs): def _format_with_header(self, header, na_rep='NaN', **kwargs): values = self.values - from pandas.formats.format import format_array + from pandas.io.formats.format import format_array if is_categorical_dtype(values.dtype): values = np.array(values) diff --git a/pandas/indexes/frozen.py b/pandas/indexes/frozen.py index 19b04319b37f9..3c6b922178abf 100644 --- a/pandas/indexes/frozen.py +++ b/pandas/indexes/frozen.py @@ -11,7 +11,7 @@ import numpy as np from pandas.core.base import PandasObject from pandas.core.dtypes.cast import coerce_indexer_dtype -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing class FrozenList(PandasObject, list): diff --git a/pandas/indexes/interval.py b/pandas/indexes/interval.py index 88a2b0ff9595b..6f68e67d702fe 100644 --- a/pandas/indexes/interval.py +++ b/pandas/indexes/interval.py @@ -921,7 +921,7 @@ def _format_with_header(self, header, **kwargs): def _format_native_types(self, na_rep='', quoting=None, **kwargs): """ actually format my specific types """ - from pandas.formats.format import IntervalArrayFormatter + from pandas.io.formats.format import IntervalArrayFormatter return IntervalArrayFormatter(values=self, na_rep=na_rep, justify='all').get_result() diff --git a/pandas/indexes/multi.py b/pandas/indexes/multi.py index f410dbddb4428..b341bfe7b5215 100644 --- a/pandas/indexes/multi.py +++ b/pandas/indexes/multi.py @@ -31,7 +31,7 @@ import pandas.core.common as com import pandas.core.missing as missing import pandas.core.algorithms as algos -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas.core.config import get_option @@ -935,7 +935,7 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False, sentinel=sentinel) if adjoin: - from pandas.formats.format import _get_adjustment + from pandas.io.formats.format import _get_adjustment adj = _get_adjustment() return adj.adjoin(space, *result_levels).split('\n') else: diff --git a/pandas/indexes/numeric.py b/pandas/indexes/numeric.py index 2f68101520229..6b9999239cd88 100644 --- a/pandas/indexes/numeric.py +++ b/pandas/indexes/numeric.py @@ -302,7 +302,7 @@ def _convert_slice_indexer(self, key, kind=None): def _format_native_types(self, na_rep='', float_format=None, decimal='.', quoting=None, **kwargs): - from pandas.formats.format import FloatArrayFormatter + from pandas.io.formats.format import FloatArrayFormatter formatter = FloatArrayFormatter(self.values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, diff --git a/pandas/io/api.py b/pandas/io/api.py index 4744d41472ff1..58c388d306721 100644 --- a/pandas/io/api.py +++ b/pandas/io/api.py @@ -18,7 +18,7 @@ from pandas.io.packers import read_msgpack, to_msgpack from pandas.io.gbq import read_gbq try: - from pandas.formats.style import Styler + from pandas.io.formats.style import Styler except ImportError: from pandas.compat import add_metaclass as _add_metaclass from pandas.util.importing import _UnSubclassable diff --git a/pandas/io/common.py b/pandas/io/common.py index 5cd5a9cd3e8dc..28f90972f95de 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -8,7 +8,7 @@ from pandas.compat import StringIO, BytesIO, string_types, text_type from pandas import compat -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas.core.common import AbstractMethodError from pandas.core.dtypes.common import is_number, is_file_like diff --git a/pandas/io/excel.py b/pandas/io/excel.py index b19837973a94a..637635a64d4d0 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -24,7 +24,7 @@ from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass, string_types, OrderedDict) from pandas.core import config -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing import pandas.compat as compat import pandas.compat.openpyxl_compat as openpyxl_compat from warnings import warn diff --git a/pandas/formats/__init__.py b/pandas/io/formats/__init__.py similarity index 100% rename from pandas/formats/__init__.py rename to pandas/io/formats/__init__.py diff --git a/pandas/formats/format.py b/pandas/io/formats/format.py similarity index 99% rename from pandas/formats/format.py rename to pandas/io/formats/format.py index aad6c182416f6..20df60eb96299 100644 --- a/pandas/formats/format.py +++ b/pandas/io/formats/format.py @@ -33,7 +33,7 @@ from pandas.util.terminal import get_terminal_size from pandas.core.config import get_option, set_option from pandas.io.common import _get_handle, UnicodeWriter, _expand_user -from pandas.formats.printing import adjoin, justify, pprint_thing +from pandas.io.formats.printing import adjoin, justify, pprint_thing import pandas.core.common as com import pandas._libs.lib as lib from pandas._libs.tslib import (iNaT, Timestamp, Timedelta, diff --git a/pandas/formats/printing.py b/pandas/io/formats/printing.py similarity index 100% rename from pandas/formats/printing.py rename to pandas/io/formats/printing.py diff --git a/pandas/formats/style.py b/pandas/io/formats/style.py similarity index 99% rename from pandas/formats/style.py rename to pandas/io/formats/style.py index 3ca1d8259729d..9321c29c99790 100644 --- a/pandas/formats/style.py +++ b/pandas/io/formats/style.py @@ -107,7 +107,7 @@ class Styler(object): -------- pandas.DataFrame.style """ - loader = PackageLoader("pandas", "formats/templates") + loader = PackageLoader("pandas", "io/formats/templates") env = Environment( loader=loader, trim_blocks=True, diff --git a/pandas/formats/templates/html.tpl b/pandas/io/formats/templates/html.tpl similarity index 100% rename from pandas/formats/templates/html.tpl rename to pandas/io/formats/templates/html.tpl diff --git a/pandas/io/html.py b/pandas/io/html.py index 8e5b8def1ea91..2613f26ae5f52 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -21,7 +21,7 @@ raise_with_traceback, binary_type) from pandas import Series from pandas.core.common import AbstractMethodError -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing _IMPORTS = False _HAS_BS4 = False diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 19e84c04b7ddb..7149ab497a00d 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -9,7 +9,7 @@ from pandas import Series, DataFrame, to_datetime from pandas.io.common import get_filepath_or_buffer, _get_handle from pandas.core.common import AbstractMethodError -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from .normalize import _convert_to_line_delimits from .table_schema import build_table_schema from pandas.core.dtypes.common import is_period_dtype diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 4771134f3fe5c..17bedd016f617 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -32,7 +32,7 @@ from pandas.io.common import _stringify_path from pandas.core.sparse.array import BlockIndex, IntIndex from pandas.core.base import StringMixin -from pandas.formats.printing import adjoin, pprint_thing +from pandas.io.formats.printing import adjoin, pprint_thing from pandas.errors import PerformanceWarning from pandas.core.common import _asarray_tuplesafe from pandas.core.algorithms import match, unique diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 02f2df4949189..374244acfe173 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -24,7 +24,7 @@ from pandas.tseries.period import PeriodIndex from pandas.compat import range, lrange, map, zip, string_types import pandas.compat as compat -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas.util.decorators import Appender from pandas.plotting._compat import (_mpl_ge_1_3_1, diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index f09bcef82b45d..93eceba9a3f02 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -7,7 +7,7 @@ from pandas.util.decorators import deprecate_kwarg from pandas.core.dtypes.missing import notnull from pandas.compat import range, lrange, lmap, zip -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas.plotting._style import _get_standard_colors diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 7533e417b0026..f8c7c1ee9ee10 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -9,7 +9,7 @@ from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex from pandas.tseries.tdi import TimedeltaIndex -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing import pandas.compat as compat from pandas.plotting._converter import (TimeSeries_DateLocator, diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 221458e629055..32ed77d94f637 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -31,7 +31,7 @@ class TestPDApi(Base, tm.TestCase): # top-level sub-packages lib = ['api', 'compat', 'core', - 'indexes', 'formats', 'errors', 'pandas', + 'indexes', 'errors', 'pandas', 'plotting', 'test', 'tools', 'tseries', 'util', 'options', 'io'] diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index dda52bbc536c9..e165e30c59f0f 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -18,7 +18,7 @@ import pandas as pd import pandas.core.nanops as nanops import pandas.core.algorithms as algorithms -import pandas.formats.printing as printing +import pandas.io.formats.printing as printing import pandas.util.testing as tm from pandas.tests.frame.common import TestData diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 268854fe6b62d..afb26143f4540 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -15,7 +15,7 @@ from pandas import (DataFrame, Series, MultiIndex, Timestamp, date_range) import pandas.core.common as com -import pandas.formats.printing as printing +import pandas.io.formats.printing as printing import pandas as pd from pandas.util.testing import (assert_numpy_array_equal, diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 918938c1758ed..740a24f38c316 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -11,7 +11,7 @@ from pandas import (DataFrame, compat, option_context) from pandas.compat import StringIO, lrange, u -import pandas.formats.format as fmt +import pandas.io.formats.format as fmt import pandas as pd import pandas.util.testing as tm diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index 2abae97b3151b..53a92ece5d6cc 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -18,7 +18,7 @@ from pandas.util.testing import assert_frame_equal, assert_series_equal from pandas.core.groupby import SpecificationError, DataError from pandas.compat import OrderedDict -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing import pandas.util.testing as tm diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index 51c0889a6f091..b555a9c1fd0df 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -8,7 +8,7 @@ from pandas.core.dtypes.common import is_scalar from pandas import Series, DataFrame, Panel, date_range, UInt64Index from pandas.util import testing as tm -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing _verbose = False diff --git a/pandas/tests/formats/__init__.py b/pandas/tests/io/formats/__init__.py similarity index 100% rename from pandas/tests/formats/__init__.py rename to pandas/tests/io/formats/__init__.py diff --git a/pandas/tests/formats/data/unicode_series.csv b/pandas/tests/io/formats/data/unicode_series.csv similarity index 100% rename from pandas/tests/formats/data/unicode_series.csv rename to pandas/tests/io/formats/data/unicode_series.csv diff --git a/pandas/tests/formats/test_eng_formatting.py b/pandas/tests/io/formats/test_eng_formatting.py similarity index 99% rename from pandas/tests/formats/test_eng_formatting.py rename to pandas/tests/io/formats/test_eng_formatting.py index d2badd4fc160a..225a4921961cf 100644 --- a/pandas/tests/formats/test_eng_formatting.py +++ b/pandas/tests/io/formats/test_eng_formatting.py @@ -2,7 +2,7 @@ import pandas as pd from pandas import DataFrame from pandas.compat import u -import pandas.formats.format as fmt +import pandas.io.formats.format as fmt from pandas.util import testing as tm diff --git a/pandas/tests/formats/test_format.py b/pandas/tests/io/formats/test_format.py similarity index 99% rename from pandas/tests/formats/test_format.py rename to pandas/tests/io/formats/test_format.py index 92f6a600a9e2a..7c74f82741e8c 100644 --- a/pandas/tests/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -25,8 +25,8 @@ is_platform_32bit) import pandas.compat as compat -import pandas.formats.format as fmt -import pandas.formats.printing as printing +import pandas.io.formats.format as fmt +import pandas.io.formats.printing as printing import pandas.util.testing as tm from pandas.util.terminal import get_terminal_size @@ -889,7 +889,7 @@ def test_frame_info_encoding(self): fmt.set_option('display.max_rows', 200) def test_pprint_thing(self): - from pandas.formats.printing import pprint_thing as pp_t + from pandas.io.formats.printing import pprint_thing as pp_t if PY3: pytest.skip("doesn't work on Python 3") diff --git a/pandas/tests/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py similarity index 98% rename from pandas/tests/formats/test_printing.py rename to pandas/tests/io/formats/test_printing.py index cacba2ad3f3ba..23aaf472316ec 100644 --- a/pandas/tests/formats/test_printing.py +++ b/pandas/tests/io/formats/test_printing.py @@ -2,8 +2,8 @@ import pytest from pandas import compat import pandas as pd -import pandas.formats.printing as printing -import pandas.formats.format as fmt +import pandas.io.formats.printing as printing +import pandas.io.formats.format as fmt import pandas.util.testing as tm import pandas.core.config as cf diff --git a/pandas/tests/formats/test_style.py b/pandas/tests/io/formats/test_style.py similarity index 99% rename from pandas/tests/formats/test_style.py rename to pandas/tests/io/formats/test_style.py index 08f8f2f32763d..c02d94d8918b3 100644 --- a/pandas/tests/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -9,7 +9,7 @@ import pandas.util.testing as tm jinja2 = pytest.importorskip('jinja2') -from pandas.formats.style import Styler, _get_level_lengths # noqa +from pandas.io.formats.style import Styler, _get_level_lengths # noqa class TestStyler(TestCase): diff --git a/pandas/tests/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py similarity index 100% rename from pandas/tests/formats/test_to_csv.py rename to pandas/tests/io/formats/test_to_csv.py diff --git a/pandas/tests/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py similarity index 99% rename from pandas/tests/formats/test_to_html.py rename to pandas/tests/io/formats/test_to_html.py index 771c66e84037c..e90224dcb705a 100644 --- a/pandas/tests/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -11,7 +11,7 @@ from pandas import compat, DataFrame, MultiIndex, option_context, Index from pandas.compat import u, lrange, StringIO from pandas.util import testing as tm -import pandas.formats.format as fmt +import pandas.io.formats.format as fmt div_style = '' try: diff --git a/pandas/tests/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py similarity index 100% rename from pandas/tests/formats/test_to_latex.py rename to pandas/tests/io/formats/test_to_latex.py diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index d83e26995020c..2fada4e1dc2cc 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -2006,7 +2006,7 @@ def test_to_excel_styleconverter(self): self.assertEqual(kw['protection'], protection) def test_write_cells_merge_styled(self): - from pandas.formats.format import ExcelCell + from pandas.io.formats.format import ExcelCell from openpyxl import styles sheet_name = 'merge_styled' @@ -2119,7 +2119,7 @@ def test_write_cells_merge_styled(self): if not openpyxl_compat.is_compat(major_ver=2): pytest.skip('incompatible openpyxl version') - from pandas.formats.format import ExcelCell + from pandas.io.formats.format import ExcelCell sheet_name = 'merge_styled' diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 2df8872e23616..c6a54211e73ad 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -17,7 +17,7 @@ isnull) from pandas.compat import is_platform_windows, PY3, PY35 -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing tables = pytest.importorskip('tables') from pandas.io.pytables import TableIterator diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index fe07f5b9f193e..e8ff77f9cc0eb 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -13,7 +13,7 @@ bdate_range) from pandas.core.dtypes.api import is_list_like from pandas.compat import range, lrange, lmap, lzip, u, zip, PY3 -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing import pandas.util.testing as tm from pandas.util.testing import slow diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 2facbaf1fe31e..3cb1e29bde7d9 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -9,7 +9,7 @@ from pandas.compat import range from pandas import compat -import pandas.formats.printing as printing +import pandas.io.formats.printing as printing from pandas.util.testing import (assert_series_equal, ensure_clean) import pandas.util.testing as tm diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index b353f73f4004d..ddbaedc3ef919 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -17,7 +17,7 @@ from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_frame_equal, assert_panel_equal, assert_panel4d_equal, slow) -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing import pandas.util.testing as tm diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 80059277407c3..a2ded195d9343 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -16,7 +16,7 @@ date_range, period_range, Panel4D) from pandas.core.index import MultiIndex -import pandas.formats.printing as printing +import pandas.io.formats.printing as printing from pandas.compat import range, zip, PY3 from pandas import compat diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index f0e53046e3552..60173dda57e04 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -16,7 +16,7 @@ from pandas.core.panel import Panel from pandas.core.series import remove_na -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas import compat from pandas.compat import range, lrange, StringIO, OrderedDict, signature diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py index cf79cadef78dd..b419aae709683 100644 --- a/pandas/tseries/base.py +++ b/pandas/tseries/base.py @@ -22,7 +22,7 @@ from pandas.core.algorithms import checked_add_with_arr from pandas.core.common import AbstractMethodError -import pandas.formats.printing as printing +import pandas.io.formats.printing as printing from pandas._libs import (tslib as libts, lib, Timedelta, Timestamp, iNaT, NaT) from pandas._libs.period import Period diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 95594652e3943..a964b6d9e09d3 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -686,12 +686,12 @@ def _mpl_repr(self): @cache_readonly def _is_dates_only(self): - from pandas.formats.format import _is_dates_only + from pandas.io.formats.format import _is_dates_only return _is_dates_only(self.values) @property def _formatter_func(self): - from pandas.formats.format import _get_format_datetime64 + from pandas.io.formats.format import _get_format_datetime64 formatter = _get_format_datetime64(is_dates_only=self._is_dates_only) return lambda x: "'%s'" % formatter(x, tz=self.tz) @@ -830,7 +830,7 @@ def _add_offset(self, offset): return self.astype('O') + offset def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): - from pandas.formats.format import _get_format_datetime64_from_values + from pandas.io.formats.format import _get_format_datetime64_from_values format = _get_format_datetime64_from_values(self, date_format) return libts.format_array_from_datetime(self.asi8, diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py index c26f023ea942a..020b7328238b7 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/tseries/tdi.py @@ -290,7 +290,7 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs): @property def _formatter_func(self): - from pandas.formats.format import _get_format_timedelta64 + from pandas.io.formats.format import _get_format_timedelta64 return _get_format_timedelta64(self, box=True) def __setstate__(self, state): @@ -366,7 +366,7 @@ def _sub_datelike(self, other): def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs): - from pandas.formats.format import Timedelta64Formatter + from pandas.io.formats.format import Timedelta64Formatter return Timedelta64Formatter(values=self, nat_rep=na_rep, justify='all').get_result() diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 638a190d810a5..08323fc0c9050 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -33,7 +33,7 @@ is_interval_dtype, is_sequence, is_list_like) -from pandas.formats.printing import pprint_thing +from pandas.io.formats.printing import pprint_thing from pandas.core.algorithms import take_1d import pandas.compat as compat diff --git a/setup.py b/setup.py index b7c4581c4ecfe..5a7efa15452ac 100755 --- a/setup.py +++ b/setup.py @@ -648,8 +648,8 @@ def pxd(name): 'pandas.io.json', 'pandas.io.sas', 'pandas.io.msgpack', + 'pandas.io.formats', 'pandas._libs', - 'pandas.formats', 'pandas.plotting', 'pandas.stats', 'pandas.util', @@ -668,9 +668,9 @@ def pxd(name): 'pandas.tests.io.parser', 'pandas.tests.io.sas', 'pandas.tests.io.msgpack', + 'pandas.tests.io.formats', 'pandas.tests.groupby', 'pandas.tests.series', - 'pandas.tests.formats', 'pandas.tests.scalar', 'pandas.tests.tseries', 'pandas.tests.tools', From 8daf9a7e45344dc5a247410e037dccb41b97a3db Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Sun, 16 Apr 2017 16:05:17 +0200 Subject: [PATCH 34/56] API: expose public testing functions in pandas.testing (GH9895) (#16003) --- doc/source/api.rst | 10 ++++++++++ doc/source/whatsnew/v0.20.0.txt | 19 +++++++++++++++++-- pandas/__init__.py | 1 + pandas/testing.py | 8 ++++++++ pandas/tests/api/test_api.py | 13 ++++++++++++- pandas/util/testing.py | 2 +- 6 files changed, 49 insertions(+), 4 deletions(-) create mode 100644 pandas/testing.py diff --git a/doc/source/api.rst b/doc/source/api.rst index 6d1765ce65714..d0f548cc3d0b1 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1886,3 +1886,13 @@ Working with options get_option set_option option_context + +Testing functions +~~~~~~~~~~~~~~~~~ + +.. autosummary:: + :toctree: generated/ + + testing.assert_frame_equal + testing.assert_series_equal + testing.assert_index_equal diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 08208973b70d2..de33b7d4e3371 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -150,8 +150,8 @@ Commonly called 'unix epoch' or POSIX time. This was the previous default, so th .. _whatsnew_0200.enhancements.errors: -pandas errors -^^^^^^^^^^^^^ +``pandas.errors`` +^^^^^^^^^^^^^^^^^ We are adding a standard public module for all pandas exceptions & warnings ``pandas.errors``. (:issue:`14800`). Previously these exceptions & warnings could be imported from ``pandas.core.common`` or ``pandas.io.common``. These exceptions and warnings @@ -170,6 +170,21 @@ The following are now part of this API: 'UnsortedIndexError', 'UnsupportedFunctionCall'] + +.. _whatsnew_0200.enhancements.testing: + +``pandas.testing`` +^^^^^^^^^^^^^^^^^^ + +We are adding a standard module that exposes the public testing functions in ``pandas.testing``(:issue:`9895`). Those functions can be used when writing tests for functionality using pandas objects. + +The following testing functions are now part of this API: + +- :func:`testing.assert_frame_equal` +- :func:`testing.assert_series_equal` +- :func:`testing.assert_index_equal` + + .. _whatsnew_0200.enhancements.groupby_access: Groupby Enhancements diff --git a/pandas/__init__.py b/pandas/__init__.py index 4e1bcbd613965..b221f9e43876b 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -64,6 +64,7 @@ from pandas.util.print_versions import show_versions from pandas.io.api import * from pandas.util._tester import test +import pandas.testing # extension module deprecations from pandas.util.depr_module import _DeprecatedModule diff --git a/pandas/testing.py b/pandas/testing.py new file mode 100644 index 0000000000000..3baf99957cb33 --- /dev/null +++ b/pandas/testing.py @@ -0,0 +1,8 @@ +# flake8: noqa + +""" +Public testing utility functions. +""" + +from pandas.util.testing import ( + assert_frame_equal, assert_series_equal, assert_index_equal) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 32ed77d94f637..ec9e6039c6ee4 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -32,7 +32,7 @@ class TestPDApi(Base, tm.TestCase): # top-level sub-packages lib = ['api', 'compat', 'core', 'indexes', 'errors', 'pandas', - 'plotting', 'test', 'tools', 'tseries', + 'plotting', 'test', 'testing', 'tools', 'tseries', 'util', 'options', 'io'] # these are already deprecated; awaiting removal @@ -128,6 +128,17 @@ def test_api(self): self.check(api, self.allowed) +class TestTesting(Base): + + funcs = ['assert_frame_equal', 'assert_series_equal', + 'assert_index_equal'] + + def test_testing(self): + + from pandas import testing + self.check(testing, self.funcs) + + class TestDatetoolsDeprecation(tm.TestCase): def test_deprecation_access_func(self): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 08323fc0c9050..d1f88c7041e05 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -861,7 +861,7 @@ def assert_index_equal(left, right, exact='equiv', check_names=True, right : Index exact : bool / string {'equiv'}, default False Whether to check the Index class, dtype and inferred_type - are identical. If 'equiv', then RangeIndex can be substitued for + are identical. If 'equiv', then RangeIndex can be substituted for Int64Index as well check_names : bool, default True Whether to check the names attribute. From c4ede001c9c2853967d6c541ecbcda9e745f7686 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Sun, 16 Apr 2017 14:22:54 -0400 Subject: [PATCH 35/56] TST: fix location of formats data & templates --- doc/source/whatsnew/v0.19.0.txt | 2 +- setup.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt index 9b003034aa94a..0c9bb029b9b68 100644 --- a/doc/source/whatsnew/v0.19.0.txt +++ b/doc/source/whatsnew/v0.19.0.txt @@ -268,7 +268,7 @@ Categorical Concatenation .. ipython:: python - from pandas.types.concat import union_categoricals + from pandas.api.types import union_categoricals a = pd.Categorical(["b", "c"]) b = pd.Categorical(["a", "b"]) union_categoricals([a, b]) diff --git a/setup.py b/setup.py index 5a7efa15452ac..5e474153d0ee1 100755 --- a/setup.py +++ b/setup.py @@ -680,7 +680,6 @@ def pxd(name): 'pandas.util.clipboard' ], package_data={'pandas.tests': ['data/*.csv'], - 'pandas.tests.formats': ['data/*.csv'], 'pandas.tests.indexes': ['data/*.pickle'], 'pandas.tests.io': ['data/legacy_hdf/*.h5', 'data/legacy_pickle/*/*.pickle', @@ -703,9 +702,10 @@ def pxd(name): 'data/*.html', 'data/html_encoding/*.html', 'json/data/*.json'], + 'pandas.tests.io.formats': ['data/*.csv'], 'pandas.tests.tools': ['data/*.csv'], 'pandas.tests.tseries': ['data/*.pickle'], - 'pandas.formats': ['templates/*.tpl'] + 'pandas.io.formats': ['templates/*.tpl'] }, ext_modules=extensions, maintainer_email=EMAIL, From c364e1d10839bc3514a98fff26a658e5a6b9b336 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Sun, 16 Apr 2017 14:27:07 -0400 Subject: [PATCH 36/56] MAINT: Remove assert_equal from testing.py (#16017) --- pandas/tests/core/computation/test_compat.py | 2 +- pandas/tests/core/computation/test_eval.py | 24 ++++++++++---------- pandas/tests/core/sparse/test_frame.py | 10 ++++---- pandas/tests/frame/test_constructors.py | 4 ++-- pandas/tests/frame/test_replace.py | 4 ++-- pandas/tests/frame/test_subclass.py | 6 ++--- pandas/tests/groupby/test_groupby.py | 4 ++-- pandas/tests/indexes/datetimes/test_tools.py | 2 +- pandas/tests/io/formats/test_format.py | 4 ++-- pandas/tests/io/formats/test_printing.py | 4 ++-- pandas/tests/io/json/test_pandas.py | 2 +- pandas/tests/io/parser/common.py | 6 ++--- pandas/tests/io/test_excel.py | 2 +- pandas/tests/io/test_pickle.py | 22 +++++++++--------- pandas/tests/io/test_pytables.py | 6 ++--- pandas/tests/io/test_stata.py | 4 ++-- pandas/tests/plotting/test_converter.py | 3 +-- pandas/tests/series/test_api.py | 4 ++-- pandas/tests/test_common.py | 6 ++--- pandas/tests/test_generic.py | 4 ++-- pandas/tests/test_util.py | 4 ++-- pandas/tests/tools/test_util.py | 4 ++-- pandas/util/testing.py | 19 +--------------- 23 files changed, 66 insertions(+), 84 deletions(-) diff --git a/pandas/tests/core/computation/test_compat.py b/pandas/tests/core/computation/test_compat.py index 7b6c0f9c4c9aa..9ee9f674a1ddd 100644 --- a/pandas/tests/core/computation/test_compat.py +++ b/pandas/tests/core/computation/test_compat.py @@ -30,7 +30,7 @@ def test_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 # noqa res = pd.eval('a + b', engine=engine, parser=parser) - tm.assert_equal(res, 3) + assert res == 3 if engine == 'numexpr': try: diff --git a/pandas/tests/core/computation/test_eval.py b/pandas/tests/core/computation/test_eval.py index 1f519174ce210..38a310a17a9ab 100644 --- a/pandas/tests/core/computation/test_eval.py +++ b/pandas/tests/core/computation/test_eval.py @@ -1020,7 +1020,7 @@ def test_complex_series_frame_alignment(self, engine, parser): parser=parser) else: res = pd.eval('df2 + s + df', engine=engine, parser=parser) - tm.assert_equal(res.shape, expected.shape) + assert res.shape == expected.shape assert_frame_equal(res, expected) def test_performance_warning_for_poor_alignment(self, engine, parser): @@ -1057,13 +1057,13 @@ def test_performance_warning_for_poor_alignment(self, engine, parser): pd.eval('df + s', engine=engine, parser=parser) if not is_python_engine: - tm.assert_equal(len(w), 1) + assert len(w) == 1 msg = str(w[0].message) expected = ("Alignment difference on axis {0} is larger" " than an order of magnitude on term {1!r}, " "by more than {2:.4g}; performance may suffer" "".format(1, 'df', np.log10(s.size - df.shape[1]))) - tm.assert_equal(msg, expected) + assert msg == expected # ------------------------------------ @@ -1104,17 +1104,17 @@ def test_simple_arith_ops(self): else: expec = _eval_single_bin(1, op, 1, self.engine) x = self.eval(ex, engine=self.engine, parser=self.parser) - tm.assert_equal(x, expec) + assert x == expec expec = _eval_single_bin(x, op, 1, self.engine) y = self.eval(ex2, local_dict={'x': x}, engine=self.engine, parser=self.parser) - tm.assert_equal(y, expec) + assert y == expec expec = _eval_single_bin(1, op, x + 1, self.engine) y = self.eval(ex3, local_dict={'x': x}, engine=self.engine, parser=self.parser) - tm.assert_equal(y, expec) + assert y == expec def test_simple_bool_ops(self): for op, lhs, rhs in product(expr._bool_ops_syms, (True, False), @@ -1149,7 +1149,7 @@ def test_4d_ndarray_fails(self): def test_constant(self): x = self.eval('1') - tm.assert_equal(x, 1) + assert x == 1 def test_single_variable(self): df = DataFrame(randn(10, 2)) @@ -1508,7 +1508,7 @@ def test_check_many_exprs(self): expr = ' * '.join('a' * 33) expected = 1 res = pd.eval(expr, engine=self.engine, parser=self.parser) - tm.assert_equal(res, expected) + assert res == expected def test_fails_and(self): df = DataFrame(np.random.randn(5, 3)) @@ -1736,14 +1736,14 @@ def test_no_new_locals(self, engine, parser): pd.eval('x + 1', local_dict=lcls, engine=engine, parser=parser) lcls2 = locals().copy() lcls2.pop('lcls') - tm.assert_equal(lcls, lcls2) + assert lcls == lcls2 def test_no_new_globals(self, engine, parser): x = 1 # noqa gbls = globals().copy() pd.eval('x + 1', engine=engine, parser=parser) gbls2 = globals().copy() - tm.assert_equal(gbls, gbls2) + assert gbls == gbls2 def test_invalid_engine(): @@ -1810,7 +1810,7 @@ def test_numexpr_builtin_raises(engine, parser): pd.eval('sin + dotted_line', engine=engine, parser=parser) else: res = pd.eval('sin + dotted_line', engine=engine, parser=parser) - tm.assert_equal(res, sin + dotted_line) + assert res == sin + dotted_line def test_bad_resolver_raises(engine, parser): @@ -1854,7 +1854,7 @@ def test_inf(engine, parser): s = 'inf + 1' expected = np.inf result = pd.eval(s, engine=engine, parser=parser) - tm.assert_equal(result, expected) + assert result == expected def test_negate_lt_eq_le(engine, parser): diff --git a/pandas/tests/core/sparse/test_frame.py b/pandas/tests/core/sparse/test_frame.py index adb813a27e7e9..5774a74c6290e 100644 --- a/pandas/tests/core/sparse/test_frame.py +++ b/pandas/tests/core/sparse/test_frame.py @@ -1183,7 +1183,7 @@ def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype): tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense()) # Assert spmatrices equal - tm.assert_equal(dict(sdf.to_coo().todok()), dict(spm.todok())) + assert dict(sdf.to_coo().todok()) == dict(spm.todok()) # Ensure dtype is preserved if possible was_upcast = ((fill_value is None or is_float(fill_value)) and @@ -1193,11 +1193,11 @@ def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype): float if was_upcast else dtype) tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)}) - tm.assert_equal(sdf.to_coo().dtype, res_dtype) + assert sdf.to_coo().dtype == res_dtype # However, adding a str column results in an upcast to object sdf['strings'] = np.arange(len(sdf)).astype(str) - tm.assert_equal(sdf.to_coo().dtype, np.object_) + assert sdf.to_coo().dtype == np.object_ @pytest.mark.parametrize('fill_value', [None, 0, np.nan]) # noqa: F811 @@ -1237,12 +1237,12 @@ def test_from_to_scipy_object(spmatrix, fill_value): tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense()) # Assert spmatrices equal - tm.assert_equal(dict(sdf.to_coo().todok()), dict(spm.todok())) + assert dict(sdf.to_coo().todok()) == dict(spm.todok()) # Ensure dtype is preserved if possible res_dtype = object tm.assert_contains_all(sdf.dtypes, {np.dtype(res_dtype)}) - tm.assert_equal(sdf.to_coo().dtype, res_dtype) + assert sdf.to_coo().dtype == res_dtype class TestSparseDataFrameArithmetic(tm.TestCase): diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 508053a6367fa..b1d31aee53b6a 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -259,8 +259,8 @@ def test_constructor_dict(self): # Dict with None value frame_none = DataFrame(dict(a=None), index=[0]) frame_none_list = DataFrame(dict(a=[None]), index=[0]) - tm.assert_equal(frame_none.get_value(0, 'a'), None) - tm.assert_equal(frame_none_list.get_value(0, 'a'), None) + assert frame_none.get_value(0, 'a') is None + assert frame_none_list.get_value(0, 'a') is None tm.assert_frame_equal(frame_none, frame_none_list) # GH10856 diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py index fce59e10bf4bd..f8e411c30fe38 100644 --- a/pandas/tests/frame/test_replace.py +++ b/pandas/tests/frame/test_replace.py @@ -974,7 +974,7 @@ def test_replace_period(self): 'out_augmented_MAY_2011.json', 'out_augmented_AUG_2011.json', 'out_augmented_JAN_2011.json'], columns=['fname']) - tm.assert_equal(set(df.fname.values), set(d['fname'].keys())) + assert set(df.fname.values) == set(d['fname'].keys()) expected = DataFrame({'fname': [d['fname'][k] for k in df.fname.values]}) result = df.replace(d) @@ -997,7 +997,7 @@ def test_replace_datetime(self): 'out_augmented_MAY_2011.json', 'out_augmented_AUG_2011.json', 'out_augmented_JAN_2011.json'], columns=['fname']) - tm.assert_equal(set(df.fname.values), set(d['fname'].keys())) + assert set(df.fname.values) == set(d['fname'].keys()) expected = DataFrame({'fname': [d['fname'][k] for k in df.fname.values]}) result = df.replace(d) diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 7444490d18373..dbb2e04173faf 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -229,9 +229,9 @@ def test_subclass_sparse_slice(self): tm.SubclassedSparseDataFrame(rows[:2])) tm.assert_sp_frame_equal(ssdf[:2], tm.SubclassedSparseDataFrame(rows[:2])) - tm.assert_equal(ssdf.loc[:2].testattr, "testattr") - tm.assert_equal(ssdf.iloc[:2].testattr, "testattr") - tm.assert_equal(ssdf[:2].testattr, "testattr") + assert ssdf.loc[:2].testattr == "testattr" + assert ssdf.iloc[:2].testattr == "testattr" + assert ssdf[:2].testattr == "testattr" tm.assert_sp_series_equal(ssdf.loc[1], tm.SubclassedSparseSeries(rows[1]), diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 25f89b29021ce..f486c70d86f9d 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -3275,7 +3275,7 @@ def f(group): # we expect 2 zeros because we call ``f`` once to see if a faster route # can be used. expected_names = [0, 0, 1, 2] - tm.assert_equal(names, expected_names) + assert names == expected_names def test_no_dummy_key_names(self): # GH #1291 @@ -3987,7 +3987,7 @@ def test_grouping_string_repr(self): result = gr.grouper.groupings[0].__repr__() expected = "Grouping(('A', 'a'))" - tm.assert_equal(result, expected) + assert result == expected def test_group_shift_with_null_key(self): # This test is designed to replicate the segfault in issue #13813. diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 28fbce43bf983..f8eb923d51f75 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1295,7 +1295,7 @@ def test_parsers_time(self): res = tools.to_time(np.array(arg)) self.assertIsInstance(res, list) - self.assert_equal(res, expected_arr) + assert res == expected_arr def test_parsers_monthfreq(self): cases = {'201101': datetime(2011, 1, 1, 0, 0), diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 7c74f82741e8c..b880ba8b182e9 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -2536,11 +2536,11 @@ def test_nat_representations(self): def test_format_percentiles(): result = fmt.format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999]) expected = ['1.999%', '2.001%', '50%', '66.667%', '99.99%'] - tm.assert_equal(result, expected) + assert result == expected result = fmt.format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999]) expected = ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%'] - tm.assert_equal(result, expected) + assert result == expected tm.assertRaises(ValueError, fmt.format_percentiles, [0.1, np.nan, 0.5]) tm.assertRaises(ValueError, fmt.format_percentiles, [-0.001, 0.1, 0.5]) diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py index 23aaf472316ec..0df35da05578a 100644 --- a/pandas/tests/io/formats/test_printing.py +++ b/pandas/tests/io/formats/test_printing.py @@ -27,9 +27,9 @@ def test_repr_binary_type(): raw = btype(letters) b = compat.text_type(compat.bytes_to_str(raw)) res = printing.pprint_thing(b, quote_strings=True) - tm.assert_equal(res, repr(b)) + assert res == repr(b) res = printing.pprint_thing(b, quote_strings=False) - tm.assert_equal(res, b) + assert res == b class TestFormattBase(tm.TestCase): diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index a24e8cdaf0273..45ce87bf069aa 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -420,7 +420,7 @@ def test_frame_empty(self): # GH 7445 result = pd.DataFrame({'test': []}, index=[]).to_json(orient='columns') expected = '{"test":{}}' - tm.assert_equal(result, expected) + assert result == expected def test_frame_empty_mixedtype(self): # mixed type diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 6eadf2c61c974..120bb005fb3ff 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -1332,9 +1332,9 @@ def test_1000_sep_with_decimal(self): 'C': [5, 10.] }) - tm.assert_equal(expected.A.dtype, 'int64') - tm.assert_equal(expected.B.dtype, 'float') - tm.assert_equal(expected.C.dtype, 'float') + assert expected.A.dtype == 'int64' + assert expected.B.dtype == 'float' + assert expected.C.dtype == 'float' df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.') tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 2fada4e1dc2cc..d9e036481d0c2 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -412,7 +412,7 @@ def test_reading_all_sheets(self): tm.assert_contains_all(expected_keys, dfs.keys()) # Issue 9930 # Ensure sheet order is preserved - tm.assert_equal(expected_keys, list(dfs.keys())) + assert expected_keys == list(dfs.keys()) def test_reading_multiple_specific_sheets(self): # Test reading specific sheetnames by specifying a mixed list diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 0746cacb01bb9..e14c39d1de228 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -50,8 +50,8 @@ def compare_element(result, expected, typ, version=None): if expected is pd.NaT: assert result is pd.NaT else: - tm.assert_equal(result, expected) - tm.assert_equal(result.freq, expected.freq) + assert result == expected + assert result.freq == expected.freq else: comparator = getattr(tm, "assert_%s_equal" % typ, tm.assert_almost_equal) @@ -102,21 +102,21 @@ def compare_sp_series_ts(res, exp, typ, version): def compare_series_ts(result, expected, typ, version): # GH 7748 tm.assert_series_equal(result, expected) - tm.assert_equal(result.index.freq, expected.index.freq) - tm.assert_equal(result.index.freq.normalize, False) + assert result.index.freq == expected.index.freq + assert not result.index.freq.normalize tm.assert_series_equal(result > 0, expected > 0) # GH 9291 freq = result.index.freq - tm.assert_equal(freq + Day(1), Day(2)) + assert freq + Day(1) == Day(2) res = freq + pandas.Timedelta(hours=1) - tm.assert_equal(isinstance(res, pandas.Timedelta), True) - tm.assert_equal(res, pandas.Timedelta(days=1, hours=1)) + assert isinstance(res, pandas.Timedelta) + assert res == pandas.Timedelta(days=1, hours=1) res = freq + pandas.Timedelta(nanoseconds=1) - tm.assert_equal(isinstance(res, pandas.Timedelta), True) - tm.assert_equal(res, pandas.Timedelta(days=1, nanoseconds=1)) + assert isinstance(res, pandas.Timedelta) + assert res == pandas.Timedelta(days=1, nanoseconds=1) def compare_series_dt_tz(result, expected, typ, version): @@ -170,8 +170,8 @@ def compare_frame_cat_and_float(result, expected, typ, version): def compare_index_period(result, expected, typ, version): tm.assert_index_equal(result, expected) tm.assertIsInstance(result.freq, MonthEnd) - tm.assert_equal(result.freq, MonthEnd()) - tm.assert_equal(result.freqstr, 'M') + assert result.freq == MonthEnd() + assert result.freqstr == 'M' tm.assert_index_equal(result.shift(2), expected.shift(2)) diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index c6a54211e73ad..f28b2a0231433 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -3516,7 +3516,7 @@ def test_select_iterator_many_empty_frames(self): results = [s for s in store.select( 'df', where=where, chunksize=chunksize)] - tm.assert_equal(1, len(results)) + assert len(results) == 1 result = concat(results) rexpected = expected[expected.index <= end_dt] tm.assert_frame_equal(rexpected, result) @@ -3527,7 +3527,7 @@ def test_select_iterator_many_empty_frames(self): 'df', where=where, chunksize=chunksize)] # should be 1, is 10 - tm.assert_equal(1, len(results)) + assert len(results) == 1 result = concat(results) rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)] @@ -3545,7 +3545,7 @@ def test_select_iterator_many_empty_frames(self): 'df', where=where, chunksize=chunksize)] # should be [] - tm.assert_equal(0, len(results)) + assert len(results) == 0 def test_retain_index_attributes(self): diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 50d3342c56522..9ddd81ae53062 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -1129,14 +1129,14 @@ def test_write_variable_labels(self): 'a': 'City Rank', 'b': 'City Exponent', 'c': 'City'} - tm.assert_equal(read_labels, expected_labels) + assert read_labels == expected_labels variable_labels['index'] = 'The Index' with tm.ensure_clean() as path: original.to_stata(path, variable_labels=variable_labels) with StataReader(path) as sr: read_labels = sr.variable_labels() - tm.assert_equal(read_labels, variable_labels) + assert read_labels == variable_labels def test_write_variable_label_errors(self): original = pd.DataFrame({'a': [1, 2, 3, 4], diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 4629103d033f5..683f4ee89687f 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -153,8 +153,7 @@ class Axis(object): def test_convert_accepts_unicode(self): r1 = self.pc.convert("2012-1-1", None, self.axis) r2 = self.pc.convert(u("2012-1-1"), None, self.axis) - self.assert_equal(r1, r2, - "PeriodConverter.convert should accept unicode") + assert r1 == r2 def test_conversion(self): rs = self.pc.convert(['2012-1-1'], None, self.axis)[0] diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 3cb1e29bde7d9..faf987c9b3820 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -344,7 +344,7 @@ def test_str_attribute(self): def test_empty_method(self): s_empty = pd.Series() - tm.assert_equal(s_empty.empty, True) + assert s_empty.empty for full_series in [pd.Series([1]), pd.Series(index=[1])]: - tm.assert_equal(full_series.empty, False) + assert not full_series.empty diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 90b1157572be1..5222f8fc18520 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -142,12 +142,12 @@ def test_random_state(): import numpy.random as npr # Check with seed state = com._random_state(5) - tm.assert_equal(state.uniform(), npr.RandomState(5).uniform()) + assert state.uniform() == npr.RandomState(5).uniform() # Check with random state object state2 = npr.RandomState(10) - tm.assert_equal( - com._random_state(state2).uniform(), npr.RandomState(10).uniform()) + assert (com._random_state(state2).uniform() == + npr.RandomState(10).uniform()) # check with no arg random state assert com._random_state() is np.random diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index a2ded195d9343..8706a05cfe8a2 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -1810,12 +1810,12 @@ def test_squeeze(self): # axis argument df = tm.makeTimeDataFrame(nper=1).iloc[:, :1] - tm.assert_equal(df.shape, (1, 1)) + assert df.shape == (1, 1) tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0]) tm.assert_series_equal(df.squeeze(axis='index'), df.iloc[0]) tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0]) tm.assert_series_equal(df.squeeze(axis='columns'), df.iloc[:, 0]) - tm.assert_equal(df.squeeze(), df.iloc[0, 0]) + assert df.squeeze() == df.iloc[0, 0] tm.assertRaises(ValueError, df.squeeze, axis=2) tm.assertRaises(ValueError, df.squeeze, axis='x') diff --git a/pandas/tests/test_util.py b/pandas/tests/test_util.py index 1bf9f4da45bff..2793cc14df19a 100644 --- a/pandas/tests/test_util.py +++ b/pandas/tests/test_util.py @@ -213,7 +213,7 @@ def test_validate_bool_kwarg(self): validate_bool_kwarg(value, name) for value in valid_values: - tm.assert_equal(validate_bool_kwarg(value, name), value) + assert validate_bool_kwarg(value, name) == value class TestValidateKwargsAndArgs(tm.TestCase): @@ -400,4 +400,4 @@ def test_numpy_errstate_is_default(): import numpy as np from pandas.compat import numpy # noqa # The errstate should be unchanged after that import. - tm.assert_equal(np.geterr(), expected) + assert np.geterr() == expected diff --git a/pandas/tests/tools/test_util.py b/pandas/tests/tools/test_util.py index ed64e8f42d84b..3ac7d8b32516e 100644 --- a/pandas/tests/tools/test_util.py +++ b/pandas/tests/tools/test_util.py @@ -50,7 +50,7 @@ def test_empty(self): # empty product (empty input): result = cartesian_product([]) expected = [] - tm.assert_equal(result, expected) + assert result == expected def test_invalid_input(self): invalid_inputs = [1, [1], [1, 2], [[1], 2], @@ -482,4 +482,4 @@ def test_downcast_limits(self): for dtype, downcast, min_max in dtype_downcast_min_max: series = pd.to_numeric(pd.Series(min_max), downcast=downcast) - tm.assert_equal(series.dtype, dtype) + assert series.dtype == dtype diff --git a/pandas/util/testing.py b/pandas/util/testing.py index d1f88c7041e05..47ed762b3e561 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -833,23 +833,6 @@ def equalContents(arr1, arr2): return frozenset(arr1) == frozenset(arr2) -def assert_equal(a, b, msg=""): - """asserts that a equals b, like nose's assert_equal, - but allows custom message to start. Passes a and b to - format string as well. So you can use '{0}' and '{1}' - to display a and b. - - Examples - -------- - >>> assert_equal(2, 2, "apples") - >>> assert_equal(5.2, 1.2, "{0} was really a dead parrot") - Traceback (most recent call last): - ... - AssertionError: 5.2 was really a dead parrot: 5.2 != 1.2 - """ - assert a == b, "%s: %r != %r" % (msg.format(a, b), a, b) - - def assert_index_equal(left, right, exact='equiv', check_names=True, check_less_precise=False, check_exact=True, check_categorical=True, obj='Index'): @@ -862,7 +845,7 @@ def assert_index_equal(left, right, exact='equiv', check_names=True, exact : bool / string {'equiv'}, default False Whether to check the Index class, dtype and inferred_type are identical. If 'equiv', then RangeIndex can be substituted for - Int64Index as well + Int64Index as well. check_names : bool, default True Whether to check the names attribute. check_less_precise : bool or int, default False From d7913621195424f538748915e82c5c2fcc4164bd Mon Sep 17 00:00:00 2001 From: gfyoung Date: Sun, 16 Apr 2017 14:27:46 -0400 Subject: [PATCH 37/56] MAINT: Strip internals from TestCase class (#16016) * MAINT: Move reset_display_options outside of TestCase * MAINT: Move round_trip_pickle outside of TestCase * MAINT: Remove all deprecated aliases from TestCase * DOC: Add doc explaining TestCase usage under pytest --- pandas/tests/core/dtypes/test_dtypes.py | 2 +- pandas/tests/core/sparse/test_array.py | 2 +- pandas/tests/core/sparse/test_frame.py | 2 +- pandas/tests/core/sparse/test_series.py | 2 +- pandas/tests/frame/test_block_internals.py | 6 +-- pandas/tests/frame/test_repr_info.py | 2 +- pandas/tests/frame/test_subclass.py | 2 +- pandas/tests/indexes/common.py | 2 +- .../tests/indexes/datetimes/test_datetime.py | 2 +- pandas/tests/indexes/datetimes/test_ops.py | 4 +- pandas/tests/indexes/period/test_period.py | 4 +- pandas/tests/indexes/test_multi.py | 4 +- .../indexes/timedeltas/test_timedelta.py | 2 +- .../tests/io/formats/test_eng_formatting.py | 4 +- pandas/tests/io/formats/test_format.py | 16 +++--- pandas/tests/scalar/test_period.py | 2 +- pandas/tests/scalar/test_timedelta.py | 2 +- pandas/tests/series/test_io.py | 2 +- pandas/tests/series/test_timeseries.py | 6 +-- pandas/tests/test_multilevel.py | 2 +- pandas/tests/test_panel.py | 2 +- pandas/tests/test_testing.py | 20 -------- pandas/tests/tseries/test_offsets.py | 4 +- pandas/util/decorators.py | 2 +- pandas/util/testing.py | 51 ++++++++++--------- 25 files changed, 66 insertions(+), 83 deletions(-) diff --git a/pandas/tests/core/dtypes/test_dtypes.py b/pandas/tests/core/dtypes/test_dtypes.py index ec9876df14e3b..46569fecf553f 100644 --- a/pandas/tests/core/dtypes/test_dtypes.py +++ b/pandas/tests/core/dtypes/test_dtypes.py @@ -40,7 +40,7 @@ def f(): self.assertNotEqual(np.str_, self.dtype) def test_pickle(self): - result = self.round_trip_pickle(self.dtype) + result = tm.round_trip_pickle(self.dtype) self.assertEqual(result, self.dtype) diff --git a/pandas/tests/core/sparse/test_array.py b/pandas/tests/core/sparse/test_array.py index b7b664e7bfb8a..9a1346430175d 100644 --- a/pandas/tests/core/sparse/test_array.py +++ b/pandas/tests/core/sparse/test_array.py @@ -562,7 +562,7 @@ def _check_inplace_op(op): def test_pickle(self): def _check_roundtrip(obj): - unpickled = self.round_trip_pickle(obj) + unpickled = tm.round_trip_pickle(obj) tm.assert_sp_array_equal(unpickled, obj) _check_roundtrip(self.arr) diff --git a/pandas/tests/core/sparse/test_frame.py b/pandas/tests/core/sparse/test_frame.py index 5774a74c6290e..279fe9ea75e53 100644 --- a/pandas/tests/core/sparse/test_frame.py +++ b/pandas/tests/core/sparse/test_frame.py @@ -278,7 +278,7 @@ def test_array_interface(self): def test_pickle(self): def _test_roundtrip(frame, orig): - result = self.round_trip_pickle(frame) + result = tm.round_trip_pickle(frame) tm.assert_sp_frame_equal(frame, result) tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False) diff --git a/pandas/tests/core/sparse/test_series.py b/pandas/tests/core/sparse/test_series.py index 0b71dffe1782b..52032b618cd1d 100644 --- a/pandas/tests/core/sparse/test_series.py +++ b/pandas/tests/core/sparse/test_series.py @@ -390,7 +390,7 @@ def test_to_frame(self): def test_pickle(self): def _test_roundtrip(series): - unpickled = self.round_trip_pickle(series) + unpickled = tm.round_trip_pickle(series) tm.assert_sp_series_equal(series, unpickled) tm.assert_series_equal(series.to_dense(), unpickled.to_dense()) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index accd3ddeb03d7..bfe1b0aae90b1 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -350,18 +350,18 @@ def test_copy(self): self.assertIsNot(copy._data, self.mixed_frame._data) def test_pickle(self): - unpickled = self.round_trip_pickle(self.mixed_frame) + unpickled = tm.round_trip_pickle(self.mixed_frame) assert_frame_equal(self.mixed_frame, unpickled) # buglet self.mixed_frame._data.ndim # empty - unpickled = self.round_trip_pickle(self.empty) + unpickled = tm.round_trip_pickle(self.empty) repr(unpickled) # tz frame - unpickled = self.round_trip_pickle(self.tzframe) + unpickled = tm.round_trip_pickle(self.tzframe) assert_frame_equal(self.tzframe, unpickled) def test_consolidate_datetime64(self): diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index 740a24f38c316..be55efac2992b 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -118,7 +118,7 @@ def test_repr_unsortable(self): fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000) repr(self.frame) - self.reset_display_options() + tm.reset_display_options() warnings.filters = warn_filters diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index dbb2e04173faf..1899df74c60ab 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -85,7 +85,7 @@ def test_dataframe_metadata(self): self.assertEqual(df.iloc[0:1, :].testattr, 'XXX') # GH10553 - unpickled = self.round_trip_pickle(df) + unpickled = tm.round_trip_pickle(df) tm.assert_frame_equal(df, unpickled) self.assertEqual(df._metadata, unpickled._metadata) self.assertEqual(df.testattr, unpickled.testattr) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 25214e6b170b5..15eceac6b00c9 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -28,7 +28,7 @@ def setup_indices(self): setattr(self, name, idx) def verify_pickle(self, index): - unpickled = self.round_trip_pickle(index) + unpickled = tm.round_trip_pickle(index) self.assertTrue(index.equals(unpickled)) def test_pickle_compat_construction(self): diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 78c37f773547a..feedde77ebdd2 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -106,7 +106,7 @@ def test_roundtrip_pickle_with_tz(self): # GH 8367 # round-trip of timezone index = date_range('20130101', periods=3, tz='US/Eastern', name='foo') - unpickled = self.round_trip_pickle(index) + unpickled = tm.round_trip_pickle(index) self.assert_index_equal(index, unpickled) def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self): diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 4be9999982f12..6e6d6bf190291 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -1121,7 +1121,7 @@ def test_comparison(self): self.assertFalse(comp[9]) def test_pickle_unpickle(self): - unpickled = self.round_trip_pickle(self.rng) + unpickled = tm.round_trip_pickle(self.rng) self.assertIsNotNone(unpickled.offset) def test_copy(self): @@ -1272,7 +1272,7 @@ def test_shift(self): self.assertEqual(shifted[0], rng[0] + CDay()) def test_pickle_unpickle(self): - unpickled = self.round_trip_pickle(self.rng) + unpickled = tm.round_trip_pickle(self.rng) self.assertIsNotNone(unpickled.offset) def test_summary(self): diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 6639fcd985ac4..fcbb1c10426bc 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -58,7 +58,7 @@ def test_pickle_compat_construction(self): def test_pickle_round_trip(self): for freq in ['D', 'M', 'Y']: idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D') - result = self.round_trip_pickle(idx) + result = tm.round_trip_pickle(idx) tm.assert_index_equal(result, idx) def test_get_loc(self): @@ -761,7 +761,7 @@ def test_append_concat(self): def test_pickle_freq(self): # GH2891 prng = period_range('1/1/2011', '1/1/2012', freq='M') - new_prng = self.round_trip_pickle(prng) + new_prng = tm.round_trip_pickle(prng) self.assertEqual(new_prng.freq, offsets.MonthEnd()) self.assertEqual(new_prng.freqstr, 'M') diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index e93319a30d5d8..b33a317eefd44 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -1044,7 +1044,7 @@ def test_roundtrip_pickle_with_tz(self): [[1, 2], ['a', 'b'], date_range('20130101', periods=3, tz='US/Eastern') ], names=['one', 'two', 'three']) - unpickled = self.round_trip_pickle(index) + unpickled = tm.round_trip_pickle(index) self.assertTrue(index.equal_levels(unpickled)) def test_from_tuples_index_values(self): @@ -1392,7 +1392,7 @@ def test_format_sparse_config(self): result = self.index.format() self.assertEqual(result[1], 'foo two') - self.reset_display_options() + tm.reset_display_options() warnings.filters = warn_filters diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 3abc2d8422fd3..f434938a6e803 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -454,7 +454,7 @@ def test_pass_TimedeltaIndex_to_index(self): def test_pickle(self): rng = timedelta_range('1 days', periods=10) - rng_p = self.round_trip_pickle(rng) + rng_p = tm.round_trip_pickle(rng) tm.assert_index_equal(rng, rng_p) def test_hash_error(self): diff --git a/pandas/tests/io/formats/test_eng_formatting.py b/pandas/tests/io/formats/test_eng_formatting.py index 225a4921961cf..8eb4ed576fff1 100644 --- a/pandas/tests/io/formats/test_eng_formatting.py +++ b/pandas/tests/io/formats/test_eng_formatting.py @@ -38,7 +38,7 @@ def test_eng_float_formatter(self): '3 1E+06') self.assertEqual(result, expected) - self.reset_display_options() + tm.reset_display_options() def compare(self, formatter, input, output): formatted_input = formatter(input) @@ -185,7 +185,7 @@ def test_nan(self): fmt.set_eng_float_format(accuracy=1) result = pt.to_string() self.assertTrue('NaN' in result) - self.reset_display_options() + tm.reset_display_options() def test_inf(self): # Issue #11981 diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index b880ba8b182e9..bb766ae389a10 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -138,7 +138,7 @@ def test_eng_float_formatter(self): fmt.set_eng_float_format(accuracy=0) repr(self.frame) - self.reset_display_options() + tm.reset_display_options() def test_show_null_counts(self): @@ -1197,7 +1197,7 @@ def test_to_string_line_width_no_index(self): self.assertEqual(df_s, expected) def test_to_string_float_formatting(self): - self.reset_display_options() + tm.reset_display_options() fmt.set_option('display.precision', 5, 'display.column_space', 12, 'display.notebook_repr_html', False) @@ -1226,7 +1226,7 @@ def test_to_string_float_formatting(self): expected = (' x\n' '0 3234.000\n' '1 0.253') self.assertEqual(df_s, expected) - self.reset_display_options() + tm.reset_display_options() self.assertEqual(get_option("display.precision"), 6) df = DataFrame({'x': [1e9, 0.2512]}) @@ -1310,14 +1310,14 @@ def test_to_string_index_formatter(self): self.assertEqual(rs, xp) def test_to_string_left_justify_cols(self): - self.reset_display_options() + tm.reset_display_options() df = DataFrame({'x': [3234, 0.253]}) df_s = df.to_string(justify='left') expected = (' x \n' '0 3234.000\n' '1 0.253') self.assertEqual(df_s, expected) def test_to_string_format_na(self): - self.reset_display_options() + tm.reset_display_options() df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4], 'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']}) result = df.to_string() @@ -1380,7 +1380,7 @@ def test_repr_html(self): fmt.set_option('display.notebook_repr_html', False) self.frame._repr_html_() - self.reset_display_options() + tm.reset_display_options() df = DataFrame([[1, 2], [3, 4]]) fmt.set_option('display.show_dimensions', True) @@ -1388,7 +1388,7 @@ def test_repr_html(self): fmt.set_option('display.show_dimensions', False) self.assertFalse('2 rows' in df._repr_html_()) - self.reset_display_options() + tm.reset_display_options() def test_repr_html_wide(self): max_cols = get_option('display.max_columns') @@ -1552,7 +1552,7 @@ def get_ipython(): repstr = self.frame._repr_html_() self.assertIn('class', repstr) # info fallback - self.reset_display_options() + tm.reset_display_options() def test_pprint_pathological_object(self): """ diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py index 7a15600d6041e..98af0028469bf 100644 --- a/pandas/tests/scalar/test_period.py +++ b/pandas/tests/scalar/test_period.py @@ -909,7 +909,7 @@ def test_multiples(self): def test_round_trip(self): p = Period('2000Q1') - new_p = self.round_trip_pickle(p) + new_p = tm.round_trip_pickle(p) self.assertEqual(new_p, p) diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index c22d1d2329fba..abdbf29008b7e 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -559,7 +559,7 @@ def test_overflow(self): def test_pickle(self): v = Timedelta('1 days 10:11:12.0123456') - v_p = self.round_trip_pickle(v) + v_p = tm.round_trip_pickle(v) self.assertEqual(v, v_p) def test_timedelta_hash_equality(self): diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index d514fbfc142f0..a86ca880e75e4 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -134,7 +134,7 @@ def test_timeseries_periodindex(self): from pandas import period_range prng = period_range('1/1/2011', '1/1/2012', freq='M') ts = Series(np.random.randn(len(prng)), prng) - new_ts = self.round_trip_pickle(ts) + new_ts = tm.round_trip_pickle(ts) self.assertEqual(new_ts.index.freq, 'M') def test_pickle_preserve_name(self): diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index ce7d5a573bfab..5a88b5bf98699 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -827,11 +827,11 @@ def test_asfreq_resample_set_correct_freq(self): def test_pickle(self): # GH4606 - p = self.round_trip_pickle(NaT) + p = tm.round_trip_pickle(NaT) self.assertTrue(p is NaT) idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06']) - idx_p = self.round_trip_pickle(idx) + idx_p = tm.round_trip_pickle(idx) self.assertTrue(idx_p[0] == idx[0]) self.assertTrue(idx_p[1] is NaT) self.assertTrue(idx_p[2] == idx[2]) @@ -839,7 +839,7 @@ def test_pickle(self): # GH11002 # don't infer freq idx = date_range('1750-1-1', '2050-1-1', freq='7D') - idx_p = self.round_trip_pickle(idx) + idx_p = tm.round_trip_pickle(idx) tm.assert_index_equal(idx, idx_p) def test_setops_preserve_freq(self): diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 648a3b98b245a..24bbf895508d7 100755 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -179,7 +179,7 @@ def _check_op(opname): def test_pickle(self): def _test_roundtrip(frame): - unpickled = self.round_trip_pickle(frame) + unpickled = tm.round_trip_pickle(frame) tm.assert_frame_equal(frame, unpickled) _test_roundtrip(self.frame) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 60173dda57e04..4e9805ca9d5a6 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -43,7 +43,7 @@ class PanelTests(object): def test_pickle(self): with catch_warnings(record=True): - unpickled = self.round_trip_pickle(self.panel) + unpickled = tm.round_trip_pickle(self.panel) assert_frame_equal(unpickled['ItemA'], self.panel['ItemA']) def test_rank(self): diff --git a/pandas/tests/test_testing.py b/pandas/tests/test_testing.py index e5cb953cb35a5..fe4149583182d 100644 --- a/pandas/tests/test_testing.py +++ b/pandas/tests/test_testing.py @@ -746,26 +746,6 @@ def test_RNGContext(self): self.assertEqual(np.random.randn(), expected0) -class TestDeprecatedTests(tm.TestCase): - - def test_warning(self): - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - self.assertEquals(1, 1) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - self.assertNotEquals(1, 2) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - self.assert_(True) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - self.assertAlmostEquals(1.0, 1.0000000001) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - self.assertNotAlmostEquals(1, 2) - - class TestLocale(tm.TestCase): def test_locale(self): diff --git a/pandas/tests/tseries/test_offsets.py b/pandas/tests/tseries/test_offsets.py index f644c353982f6..2dc2485550bc5 100644 --- a/pandas/tests/tseries/test_offsets.py +++ b/pandas/tests/tseries/test_offsets.py @@ -1906,7 +1906,7 @@ def test_calendar(self): def test_roundtrip_pickle(self): def _check_roundtrip(obj): - unpickled = self.round_trip_pickle(obj) + unpickled = tm.round_trip_pickle(obj) self.assertEqual(unpickled, obj) _check_roundtrip(self.offset) @@ -1967,7 +1967,7 @@ def test_offsets_compare_equal(self): def test_roundtrip_pickle(self): def _check_roundtrip(obj): - unpickled = self.round_trip_pickle(obj) + unpickled = tm.round_trip_pickle(obj) self.assertEqual(unpickled, obj) _check_roundtrip(self._object()) diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py index ca588e2a0432e..772b206f82e69 100644 --- a/pandas/util/decorators.py +++ b/pandas/util/decorators.py @@ -24,7 +24,7 @@ def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2): old_arg_name : str Name of argument in function to deprecate new_arg_name : str - Name of prefered argument in function + Name of preferred argument in function mapping : dict or callable If mapping is present, use it to translate old arguments to new arguments. A callable must do its own value checking; diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 47ed762b3e561..e9ec9d553d3e4 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -50,7 +50,6 @@ Index, MultiIndex, Series, DataFrame, Panel, Panel4D) -from pandas.util.decorators import deprecate from pandas.util import libtesting from pandas.io.common import urlopen slow = pytest.mark.slow @@ -83,6 +82,14 @@ def reset_testing_mode(): class TestCase(unittest.TestCase): + """ + The test case class that we originally used when using the + nosetests framework. Under the new pytest framework, we are + moving away from this class. + + Do not create new test classes derived from this one. Rather, + they should inherit from object directly. + """ @classmethod def setUpClass(cls): @@ -92,36 +99,32 @@ def setUpClass(cls): def tearDownClass(cls): pass - def reset_display_options(self): - # reset the display options - pd.reset_option('^display.', silent=True) - - def round_trip_pickle(self, obj, path=None): - return round_trip_pickle(obj, path=path) - # https://docs.python.org/3/library/unittest.html#deprecated-aliases - def assertEquals(self, *args, **kwargs): - return deprecate('assertEquals', - self.assertEqual)(*args, **kwargs) +def reset_display_options(): + """ + Reset the display options for printing and representing objects. + """ - def assertNotEquals(self, *args, **kwargs): - return deprecate('assertNotEquals', - self.assertNotEqual)(*args, **kwargs) + pd.reset_option('^display.', silent=True) - def assert_(self, *args, **kwargs): - return deprecate('assert_', - self.assertTrue)(*args, **kwargs) - def assertAlmostEquals(self, *args, **kwargs): - return deprecate('assertAlmostEquals', - self.assertAlmostEqual)(*args, **kwargs) +def round_trip_pickle(obj, path=None): + """ + Pickle an object and then read it again. - def assertNotAlmostEquals(self, *args, **kwargs): - return deprecate('assertNotAlmostEquals', - self.assertNotAlmostEqual)(*args, **kwargs) + Parameters + ---------- + obj : pandas object + The object to pickle and then re-read. + path : str, default None + The path where the pickled object is written and then read. + Returns + ------- + round_trip_pickled_object : pandas object + The original object that was pickled and then re-read. + """ -def round_trip_pickle(obj, path=None): if path is None: path = u('__%s__.pickle' % rands(10)) with ensure_clean(path) as path: From a25272ba72741c3e2862f5b0e06279242ffef2b2 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Sun, 16 Apr 2017 20:28:33 +0200 Subject: [PATCH 38/56] CLN: update pandas.lib deprecation messages (GH15936) (#16021) --- pandas/__init__.py | 7 +++++-- pandas/util/depr_module.py | 25 ++++++++++++++++--------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/pandas/__init__.py b/pandas/__init__.py index b221f9e43876b..01bf22bcc5e73 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -75,8 +75,11 @@ parser = _DeprecatedModule(deprmod='pandas.parser', removals=['na_values'], moved={'CParserError': 'pandas.errors.ParserError'}) -lib = _DeprecatedModule(deprmod='pandas.lib', deprmodto='pandas._libs.lib', - moved={'infer_dtype': 'pandas.api.lib.infer_dtype'}) +lib = _DeprecatedModule(deprmod='pandas.lib', deprmodto=False, + moved={'Timestamp': 'pandas.Timestamp', + 'Timedelta': 'pandas.Timedelta', + 'NaT': 'pandas.NaT', + 'infer_dtype': 'pandas.api.lib.infer_dtype'}) tslib = _DeprecatedModule(deprmod='pandas.tslib', moved={'Timestamp': 'pandas.Timestamp', 'Timedelta': 'pandas.Timedelta', diff --git a/pandas/util/depr_module.py b/pandas/util/depr_module.py index 1f428198c19f3..b438c91d980af 100644 --- a/pandas/util/depr_module.py +++ b/pandas/util/depr_module.py @@ -75,15 +75,22 @@ def __getattr__(self, name): FutureWarning, stacklevel=2) else: deprmodto = self.deprmodto - if deprmodto is None: - deprmodto = "{modname}.{name}".format( - modname=obj.__module__, name=name) - # The object is actually located in another module. - warnings.warn( - "{deprmod}.{name} is deprecated. Please use " - "{deprmodto}.{name} instead.".format( - deprmod=self.deprmod, name=name, deprmodto=deprmodto), - FutureWarning, stacklevel=2) + if deprmodto is False: + warnings.warn( + "{deprmod}.{name} is deprecated and will be removed in " + "a future version.".format( + deprmod=self.deprmod, name=name), + FutureWarning, stacklevel=2) + else: + if deprmodto is None: + deprmodto = "{modname}.{name}".format( + modname=obj.__module__, name=name) + # The object is actually located in another module. + warnings.warn( + "{deprmod}.{name} is deprecated. Please use " + "{deprmodto}.{name} instead.".format( + deprmod=self.deprmod, name=name, deprmodto=deprmodto), + FutureWarning, stacklevel=2) return obj From f60b914e2100d44740df377d55f4d43b3709478c Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Sun, 16 Apr 2017 21:31:38 +0000 Subject: [PATCH 39/56] CLN: move infer_dtype to pandas.api.types (#16023) --- pandas/__init__.py | 2 +- pandas/api/lib/__init__.py | 5 ----- pandas/api/types/__init__.py | 1 + pandas/tests/api/test_types.py | 2 +- 4 files changed, 3 insertions(+), 7 deletions(-) delete mode 100644 pandas/api/lib/__init__.py diff --git a/pandas/__init__.py b/pandas/__init__.py index 01bf22bcc5e73..5f6d54fd904b1 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -79,7 +79,7 @@ moved={'Timestamp': 'pandas.Timestamp', 'Timedelta': 'pandas.Timedelta', 'NaT': 'pandas.NaT', - 'infer_dtype': 'pandas.api.lib.infer_dtype'}) + 'infer_dtype': 'pandas.api.types.infer_dtype'}) tslib = _DeprecatedModule(deprmod='pandas.tslib', moved={'Timestamp': 'pandas.Timestamp', 'Timedelta': 'pandas.Timedelta', diff --git a/pandas/api/lib/__init__.py b/pandas/api/lib/__init__.py deleted file mode 100644 index c86bfc6148655..0000000000000 --- a/pandas/api/lib/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# flake8: noqa - -""" public toolkit API """ - -from pandas._libs.lib import infer_dtype diff --git a/pandas/api/types/__init__.py b/pandas/api/types/__init__.py index 06fb5742ba067..dcf010dcf4bc2 100644 --- a/pandas/api/types/__init__.py +++ b/pandas/api/types/__init__.py @@ -2,4 +2,5 @@ from pandas.core.dtypes.api import * # noqa from pandas.core.dtypes.concat import union_categoricals # noqa +from pandas._libs.lib import infer_dtype # noqa del np # noqa diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py index e0267d2990085..057f7d8f3e286 100644 --- a/pandas/tests/api/test_types.py +++ b/pandas/tests/api/test_types.py @@ -30,7 +30,7 @@ class TestTypes(Base, tm.TestCase): 'is_dict_like', 'is_iterator', 'is_file_like', 'is_list_like', 'is_hashable', 'is_named_tuple', 'is_sequence', - 'pandas_dtype', 'union_categoricals'] + 'pandas_dtype', 'union_categoricals', 'infer_dtype'] def test_types(self): From 5146b5971324009873cf1a54c8f0828de63874ae Mon Sep 17 00:00:00 2001 From: gfyoung Date: Sun, 16 Apr 2017 18:07:25 -0400 Subject: [PATCH 40/56] MAINT: Remove tm.assertIsNot from testing (#16024) --- pandas/tests/core/sparse/test_frame.py | 18 +++--- pandas/tests/core/sparse/test_indexing.py | 6 +- pandas/tests/core/sparse/test_series.py | 18 +++--- .../tests/frame/test_axis_select_reindex.py | 56 ++++++++++--------- pandas/tests/frame/test_block_internals.py | 15 ++--- pandas/tests/frame/test_indexing.py | 8 +-- pandas/tests/frame/test_missing.py | 8 +-- pandas/tests/frame/test_timeseries.py | 7 +-- pandas/tests/indexes/common.py | 28 ++++++---- pandas/tests/indexes/test_base.py | 20 +++---- pandas/tests/indexes/test_category.py | 12 ++-- pandas/tests/indexes/test_frozen.py | 27 +++++---- pandas/tests/indexes/test_interval.py | 6 +- pandas/tests/indexes/test_multi.py | 23 ++++---- pandas/tests/series/test_apply.py | 14 ++--- pandas/tests/series/test_timeseries.py | 12 ++-- pandas/tests/test_generic.py | 2 +- pandas/tests/test_internals.py | 2 +- pandas/tests/test_panel.py | 18 +++--- pandas/tests/test_panel4d.py | 26 ++++----- pandas/tests/tools/test_concat.py | 32 +++++------ pandas/util/testing.py | 19 ++++--- 22 files changed, 194 insertions(+), 183 deletions(-) diff --git a/pandas/tests/core/sparse/test_frame.py b/pandas/tests/core/sparse/test_frame.py index 279fe9ea75e53..0a58713125a30 100644 --- a/pandas/tests/core/sparse/test_frame.py +++ b/pandas/tests/core/sparse/test_frame.py @@ -422,24 +422,24 @@ def test_iloc(self): def test_set_value(self): - # ok as the index gets conver to object + # ok, as the index gets converted to object frame = self.frame.copy() res = frame.set_value('foobar', 'B', 1.5) - self.assertEqual(res.index.dtype, 'object') + assert res.index.dtype == 'object' res = self.frame res.index = res.index.astype(object) res = self.frame.set_value('foobar', 'B', 1.5) - self.assertIsNot(res, self.frame) - self.assertEqual(res.index[-1], 'foobar') - self.assertEqual(res.get_value('foobar', 'B'), 1.5) + assert res is not self.frame + assert res.index[-1] == 'foobar' + assert res.get_value('foobar', 'B') == 1.5 res2 = res.set_value('foobar', 'qux', 1.5) - self.assertIsNot(res2, res) - self.assert_index_equal(res2.columns, - pd.Index(list(self.frame.columns) + ['qux'])) - self.assertEqual(res2.get_value('foobar', 'qux'), 1.5) + assert res2 is not res + tm.assert_index_equal(res2.columns, + pd.Index(list(self.frame.columns) + ['qux'])) + assert res2.get_value('foobar', 'qux') == 1.5 def test_fancy_index_misc(self): # axis = 0 diff --git a/pandas/tests/core/sparse/test_indexing.py b/pandas/tests/core/sparse/test_indexing.py index 1a0782c0a3db9..4a9bea798be36 100644 --- a/pandas/tests/core/sparse/test_indexing.py +++ b/pandas/tests/core/sparse/test_indexing.py @@ -1,6 +1,6 @@ # pylint: disable-msg=E1101,W0612 -import pytest # noqa +import pytest import numpy as np import pandas as pd import pandas.util.testing as tm @@ -578,7 +578,7 @@ def test_reindex(self): exp = orig.reindex(['A'], level=0).to_sparse() tm.assert_sp_series_equal(res, exp) - with tm.assertRaises(TypeError): + with pytest.raises(TypeError): # Incomplete keys are not accepted for reindexing: sparse.reindex(['A', 'C']) @@ -586,7 +586,7 @@ def test_reindex(self): res = sparse.reindex(sparse.index, copy=True) exp = orig.reindex(orig.index, copy=True).to_sparse() tm.assert_sp_series_equal(res, exp) - self.assertIsNot(sparse, res) + assert sparse is not res class TestSparseDataFrameIndexing(tm.TestCase): diff --git a/pandas/tests/core/sparse/test_series.py b/pandas/tests/core/sparse/test_series.py index 52032b618cd1d..f5a27a8161909 100644 --- a/pandas/tests/core/sparse/test_series.py +++ b/pandas/tests/core/sparse/test_series.py @@ -314,9 +314,9 @@ def test_constructor_empty(self): def test_copy_astype(self): cop = self.bseries.astype(np.float64) - self.assertIsNot(cop, self.bseries) - self.assertIs(cop.sp_index, self.bseries.sp_index) - self.assertEqual(cop.dtype, np.float64) + assert cop is not self.bseries + assert cop.sp_index is self.bseries.sp_index + assert cop.dtype == np.float64 cop2 = self.iseries.copy() @@ -325,8 +325,8 @@ def test_copy_astype(self): # test that data is copied cop[:5] = 97 - self.assertEqual(cop.sp_values[0], 97) - self.assertNotEqual(self.bseries.sp_values[0], 97) + assert cop.sp_values[0] == 97 + assert self.bseries.sp_values[0] != 97 # correct fill value zbcop = self.zbseries.copy() @@ -338,7 +338,7 @@ def test_copy_astype(self): # no deep copy view = self.bseries.copy(deep=False) view.sp_values[:5] = 5 - self.assertTrue((self.bseries.sp_values[:5] == 5).all()) + assert (self.bseries.sp_values[:5] == 5).all() def test_shape(self): # GH 10452 @@ -639,7 +639,7 @@ def _compare_with_series(sps, new_index): # special cases same_index = self.bseries.reindex(self.bseries.index) tm.assert_sp_series_equal(self.bseries, same_index) - self.assertIsNot(same_index, self.bseries) + assert same_index is not self.bseries # corner cases sp = SparseSeries([], index=[]) @@ -650,7 +650,7 @@ def _compare_with_series(sps, new_index): # with copy=False reindexed = self.bseries.reindex(self.bseries.index, copy=True) reindexed.sp_values[:] = 1. - self.assertTrue((self.bseries.sp_values != 1.).all()) + assert (self.bseries.sp_values != 1.).all() reindexed = self.bseries.reindex(self.bseries.index, copy=False) reindexed.sp_values[:] = 1. @@ -824,7 +824,7 @@ def test_shift(self): series = SparseSeries([nan, 1., 2., 3., nan, nan], index=np.arange(6)) shifted = series.shift(0) - self.assertIsNot(shifted, series) + assert shifted is not series tm.assert_sp_series_equal(shifted, series) f = lambda s: s.shift(1) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 7ed2bfb601eb8..c814b6ad0e30a 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -2,6 +2,8 @@ from __future__ import print_function +import pytest + from datetime import datetime from numpy import random @@ -409,33 +411,35 @@ def test_reindex_dups(self): def test_align(self): af, bf = self.frame.align(self.frame) - self.assertIsNot(af._data, self.frame._data) + assert af._data is not self.frame._data af, bf = self.frame.align(self.frame, copy=False) - self.assertIs(af._data, self.frame._data) + assert af._data is self.frame._data # axis = 0 other = self.frame.iloc[:-5, :3] af, bf = self.frame.align(other, axis=0, fill_value=-1) - self.assert_index_equal(bf.columns, other.columns) + + tm.assert_index_equal(bf.columns, other.columns) + # test fill value join_idx = self.frame.index.join(other.index) diff_a = self.frame.index.difference(join_idx) diff_b = other.index.difference(join_idx) diff_a_vals = af.reindex(diff_a).values diff_b_vals = bf.reindex(diff_b).values - self.assertTrue((diff_a_vals == -1).all()) + assert (diff_a_vals == -1).all() af, bf = self.frame.align(other, join='right', axis=0) - self.assert_index_equal(bf.columns, other.columns) - self.assert_index_equal(bf.index, other.index) - self.assert_index_equal(af.index, other.index) + tm.assert_index_equal(bf.columns, other.columns) + tm.assert_index_equal(bf.index, other.index) + tm.assert_index_equal(af.index, other.index) # axis = 1 other = self.frame.iloc[:-5, :3].copy() af, bf = self.frame.align(other, axis=1) - self.assert_index_equal(bf.columns, self.frame.columns) - self.assert_index_equal(bf.index, other.index) + tm.assert_index_equal(bf.columns, self.frame.columns) + tm.assert_index_equal(bf.index, other.index) # test fill value join_idx = self.frame.index.join(other.index) @@ -446,42 +450,42 @@ def test_align(self): # TODO(wesm): unused? diff_b_vals = bf.reindex(diff_b).values # noqa - self.assertTrue((diff_a_vals == -1).all()) + assert (diff_a_vals == -1).all() af, bf = self.frame.align(other, join='inner', axis=1) - self.assert_index_equal(bf.columns, other.columns) + tm.assert_index_equal(bf.columns, other.columns) af, bf = self.frame.align(other, join='inner', axis=1, method='pad') - self.assert_index_equal(bf.columns, other.columns) + tm.assert_index_equal(bf.columns, other.columns) # test other non-float types af, bf = self.intframe.align(other, join='inner', axis=1, method='pad') - self.assert_index_equal(bf.columns, other.columns) + tm.assert_index_equal(bf.columns, other.columns) af, bf = self.mixed_frame.align(self.mixed_frame, join='inner', axis=1, method='pad') - self.assert_index_equal(bf.columns, self.mixed_frame.columns) + tm.assert_index_equal(bf.columns, self.mixed_frame.columns) af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1, method=None, fill_value=None) - self.assert_index_equal(bf.index, Index([])) + tm.assert_index_equal(bf.index, Index([])) af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1, method=None, fill_value=0) - self.assert_index_equal(bf.index, Index([])) + tm.assert_index_equal(bf.index, Index([])) # mixed floats/ints af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1, method=None, fill_value=0) - self.assert_index_equal(bf.index, Index([])) + tm.assert_index_equal(bf.index, Index([])) af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1, method=None, fill_value=0) - self.assert_index_equal(bf.index, Index([])) + tm.assert_index_equal(bf.index, Index([])) - # try to align dataframe to series along bad axis - self.assertRaises(ValueError, self.frame.align, af.iloc[0, :3], - join='inner', axis=2) + # Try to align DataFrame to Series along bad axis + with pytest.raises(ValueError): + self.frame.align(af.iloc[0, :3], join='inner', axis=2) # align dataframe to series with broadcast or not idx = self.frame.index @@ -490,7 +494,7 @@ def test_align(self): left, right = self.frame.align(s, axis=0) tm.assert_index_equal(left.index, self.frame.index) tm.assert_index_equal(right.index, self.frame.index) - self.assertTrue(isinstance(right, Series)) + assert isinstance(right, Series) left, right = self.frame.align(s, broadcast_axis=1) tm.assert_index_equal(left.index, self.frame.index) @@ -499,17 +503,17 @@ def test_align(self): expected[c] = s expected = DataFrame(expected, index=self.frame.index, columns=self.frame.columns) - assert_frame_equal(right, expected) + tm.assert_frame_equal(right, expected) - # GH 9558 + # see gh-9558 df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}) result = df[df['a'] == 2] expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b']) - assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected) result = df.where(df['a'] == 2, 0) expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]}) - assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected) def _check_align(self, a, b, axis, fill_axis, how, method, limit=None): aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit, diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index bfe1b0aae90b1..74ae89a876294 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -41,17 +41,18 @@ def test_cast_internals(self): def test_consolidate(self): self.frame['E'] = 7. consolidated = self.frame._consolidate() - self.assertEqual(len(consolidated._data.blocks), 1) + assert len(consolidated._data.blocks) == 1 # Ensure copy, do I want this? recons = consolidated._consolidate() - self.assertIsNot(recons, consolidated) - assert_frame_equal(recons, consolidated) + assert recons is not consolidated + tm.assert_frame_equal(recons, consolidated) self.frame['F'] = 8. - self.assertEqual(len(self.frame._data.blocks), 3) + assert len(self.frame._data.blocks) == 3 + self.frame._consolidate(inplace=True) - self.assertEqual(len(self.frame._data.blocks), 1) + assert len(self.frame._data.blocks) == 1 def test_consolidate_deprecation(self): self.frame['E'] = 7 @@ -343,11 +344,11 @@ def test_no_copy_blocks(self): def test_copy(self): cop = self.frame.copy() cop['E'] = cop['A'] - self.assertNotIn('E', self.frame) + assert 'E' not in self.frame # copy objects copy = self.mixed_frame.copy() - self.assertIsNot(copy._data, self.mixed_frame._data) + assert copy._data is not self.mixed_frame._data def test_pickle(self): unpickled = tm.round_trip_pickle(self.mixed_frame) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index b624657ca4b4b..a1705084c0edf 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -672,19 +672,19 @@ def test_setitem_ambig(self): self.assertEqual(dm[2].dtype, np.object_) def test_setitem_clear_caches(self): - # GH #304 + # see gh-304 df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]}, index=[0, 1, 2, 3]) df.insert(2, 'z', np.nan) # cache it foo = df['z'] - df.loc[df.index[2:], 'z'] = 42 expected = Series([np.nan, np.nan, 42, 42], index=df.index, name='z') - self.assertIsNot(df['z'], foo) - assert_series_equal(df['z'], expected) + + assert df['z'] is not foo + tm.assert_series_equal(df['z'], expected) def test_setitem_None(self): # GH #766 diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index eacf032bbcc85..9bb77a57f0f37 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -403,18 +403,18 @@ def test_fillna_inplace(self): df[3][-4:] = np.nan expected = df.fillna(value=0) - self.assertIsNot(expected, df) + assert expected is not df df.fillna(value=0, inplace=True) - assert_frame_equal(df, expected) + tm.assert_frame_equal(df, expected) df[1][:4] = np.nan df[3][-4:] = np.nan expected = df.fillna(method='ffill') - self.assertIsNot(expected, df) + assert expected is not df df.fillna(method='ffill', inplace=True) - assert_frame_equal(df, expected) + tm.assert_frame_equal(df, expected) def test_fillna_dict_series(self): df = DataFrame({'a': [nan, 1, 2, nan, nan], diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 862f76b4ecc05..37b6f0c261789 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -14,8 +14,7 @@ import pandas as pd import pandas.tseries.offsets as offsets -from pandas.util.testing import (assert_almost_equal, - assert_series_equal, +from pandas.util.testing import (assert_series_equal, assert_frame_equal, assertRaisesRegexp) @@ -355,7 +354,7 @@ def test_asfreq(self): offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd()) rule_monthly = self.tsframe.asfreq('BM') - assert_almost_equal(offset_monthly['A'], rule_monthly['A']) + tm.assert_almost_equal(offset_monthly['A'], rule_monthly['A']) filled = rule_monthly.asfreq('B', method='pad') # noqa # TODO: actually check that this worked. @@ -366,7 +365,7 @@ def test_asfreq(self): # test does not blow up on length-0 DataFrame zero_length = self.tsframe.reindex([]) result = zero_length.asfreq('BM') - self.assertIsNot(result, zero_length) + assert result is not zero_length def test_asfreq_datetimeindex(self): df = DataFrame({'A': [1, 2, 3]}, diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 15eceac6b00c9..bec55083829b6 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -214,8 +214,9 @@ def test_hash_error(self): hash(ind) def test_copy_name(self): - # Check that "name" argument passed at initialization is honoured - # GH12309 + # gh-12309: Check that the "name" argument + # passed at initialization is honored. + for name, index in compat.iteritems(self.indices): if isinstance(index, MultiIndex): continue @@ -224,18 +225,21 @@ def test_copy_name(self): second = first.__class__(first, copy=False) # Even though "copy=False", we want a new object. - self.assertIsNot(first, second) - # Not using tm.assert_index_equal() since names differ: - self.assertTrue(index.equals(first)) + assert first is not second - self.assertEqual(first.name, 'mario') - self.assertEqual(second.name, 'mario') + # Not using tm.assert_index_equal() since names differ. + assert index.equals(first) + + assert first.name == 'mario' + assert second.name == 'mario' s1 = Series(2, index=first) s2 = Series(3, index=second[:-1]) - if not isinstance(index, CategoricalIndex): # See GH13365 + + if not isinstance(index, CategoricalIndex): + # See gh-13365 s3 = s1 * s2 - self.assertEqual(s3.index.name, 'mario') + assert s3.index.name == 'mario' def test_ensure_copied_data(self): # Check the "copy" argument of each Index.__new__ is honoured @@ -283,11 +287,11 @@ def test_copy_and_deepcopy(self): for func in (copy, deepcopy): idx_copy = func(ind) - self.assertIsNot(idx_copy, ind) - self.assertTrue(idx_copy.equals(ind)) + assert idx_copy is not ind + assert idx_copy.equals(ind) new_copy = ind.copy(deep=True, name="banana") - self.assertEqual(new_copy.name, "banana") + assert new_copy.name == "banana" def test_duplicates(self): for ind in self.indices.values(): diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index cc819ff83b1dd..165ad91086d0a 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1851,22 +1851,22 @@ def test_copy_name(self): second = first.__class__(first, copy=False) # Even though "copy=False", we want a new object. - self.assertIsNot(first, second) + assert first is not second # Not using tm.assert_index_equal() since names differ: - self.assertTrue(idx.equals(first)) + assert idx.equals(first) - self.assertEqual(first.name, 'mario') - self.assertEqual(second.name, 'mario') + assert first.name == 'mario' + assert second.name == 'mario' s1 = Series(2, index=first) s2 = Series(3, index=second[:-1]) - if PY3: - with tm.assert_produces_warning(RuntimeWarning): - # unorderable types - s3 = s1 * s2 - else: + + warning_type = RuntimeWarning if PY3 else None + with tm.assert_produces_warning(warning_type): + # Python 3: Unorderable types s3 = s1 * s2 - self.assertEqual(s3.index.name, 'mario') + + assert s3.index.name == 'mario' def test_copy_name2(self): # Check that adding a "name" parameter to the copy is honored diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index f2e409deb2ce4..e714bbd4f9d44 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -536,18 +536,20 @@ def test_identical(self): self.assertFalse(ci1.identical(ci2)) def test_ensure_copied_data(self): - # Check the "copy" argument of each Index.__new__ is honoured - # GH12309 + # gh-12309: Check the "copy" argument of each + # Index.__new__ is honored. + # # Must be tested separately from other indexes because - # self.value is not an ndarray + # self.value is not an ndarray. _base = lambda ar: ar if ar.base is None else ar.base + for index in self.indices.values(): result = CategoricalIndex(index.values, copy=True) tm.assert_index_equal(index, result) - self.assertIsNot(_base(index.values), _base(result.values)) + assert _base(index.values) is not _base(result.values) result = CategoricalIndex(index.values, copy=False) - self.assertIs(_base(index.values), _base(result.values)) + assert _base(index.values) is _base(result.values) def test_equals_categorical(self): ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True) diff --git a/pandas/tests/indexes/test_frozen.py b/pandas/tests/indexes/test_frozen.py index a82409fbf9513..cb90beb6a5bfb 100644 --- a/pandas/tests/indexes/test_frozen.py +++ b/pandas/tests/indexes/test_frozen.py @@ -42,13 +42,13 @@ def setUp(self): def test_shallow_copying(self): original = self.container.copy() - self.assertIsInstance(self.container.view(), FrozenNDArray) - self.assertFalse(isinstance( - self.container.view(np.ndarray), FrozenNDArray)) - self.assertIsNot(self.container.view(), self.container) - self.assert_numpy_array_equal(self.container, original) - # shallow copy should be the same too - self.assertIsInstance(self.container._shallow_copy(), FrozenNDArray) + assert isinstance(self.container.view(), FrozenNDArray) + assert not isinstance(self.container.view(np.ndarray), FrozenNDArray) + assert self.container.view() is not self.container + tm.assert_numpy_array_equal(self.container, original) + + # Shallow copy should be the same too + assert isinstance(self.container._shallow_copy(), FrozenNDArray) # setting should not be allowed def testit(container): @@ -59,10 +59,13 @@ def testit(container): def test_values(self): original = self.container.view(np.ndarray).copy() n = original[0] + 15 + vals = self.container.values() - self.assert_numpy_array_equal(original, vals) - self.assertIsNot(original, vals) + tm.assert_numpy_array_equal(original, vals) + + assert original is not vals vals[0] = n - self.assertIsInstance(self.container, FrozenNDArray) - self.assert_numpy_array_equal(self.container.values(), original) - self.assertEqual(vals[0], n) + + assert isinstance(self.container, FrozenNDArray) + tm.assert_numpy_array_equal(self.container.values(), original) + assert vals[0] == n diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index 2d0015a5258ed..d99ef9538c5b1 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -165,11 +165,11 @@ def test_with_nans(self): def test_copy(self): actual = self.index.copy() - self.assertTrue(actual.equals(self.index)) + assert actual.equals(self.index) actual = self.index.copy(deep=True) - self.assertTrue(actual.equals(self.index)) - self.assertIsNot(actual.left, self.index.left) + assert actual.equals(self.index) + assert actual.left is not self.index.left def test_ensure_copied_data(self): # exercise the copy flag in the constructor diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index b33a317eefd44..75ced9439c398 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -584,21 +584,20 @@ def test_constructor_mismatched_label_levels(self): self.index.copy().labels = [[0, 0, 0, 0], [0, 0]] def assert_multiindex_copied(self, copy, original): - # levels should be (at least, shallow copied) - assert_copy(copy.levels, original.levels) + # Levels should be (at least, shallow copied) + tm.assert_copy(copy.levels, original.levels) + tm.assert_almost_equal(copy.labels, original.labels) - assert_almost_equal(copy.labels, original.labels) + # Labels doesn't matter which way copied + tm.assert_almost_equal(copy.labels, original.labels) + assert copy.labels is not original.labels - # labels doesn't matter which way copied - assert_almost_equal(copy.labels, original.labels) - self.assertIsNot(copy.labels, original.labels) + # Names doesn't matter which way copied + assert copy.names == original.names + assert copy.names is not original.names - # names doesn't matter which way copied - self.assertEqual(copy.names, original.names) - self.assertIsNot(copy.names, original.names) - - # sort order should be copied - self.assertEqual(copy.sortorder, original.sortorder) + # Sort order should be copied + assert copy.sortorder == original.sortorder def test_copy(self): i_copy = self.index.copy() diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index a4a49e3aeb826..d2116c71048ef 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -18,11 +18,11 @@ class TestSeriesApply(TestData, tm.TestCase): def test_apply(self): with np.errstate(all='ignore'): - assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts)) + tm.assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts)) - # elementwise-apply + # element-wise apply import math - assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts)) + tm.assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts)) # empty series s = Series(dtype=object, name='foo', index=pd.Index([], name='bar')) @@ -30,10 +30,10 @@ def test_apply(self): tm.assert_series_equal(s, rs) # check all metadata (GH 9322) - self.assertIsNot(s, rs) - self.assertIs(s.index, rs.index) - self.assertEqual(s.dtype, rs.dtype) - self.assertEqual(s.name, rs.name) + assert s is not rs + assert s.index is rs.index + assert s.dtype == rs.dtype + assert s.name == rs.name # index but no data s = Series(index=[1, 2, 3]) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 5a88b5bf98699..431e26ae4fdf9 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -240,25 +240,25 @@ def test_asfreq(self): daily_ts = ts.asfreq('B') monthly_ts = daily_ts.asfreq('BM') - assert_series_equal(monthly_ts, ts) + tm.assert_series_equal(monthly_ts, ts) daily_ts = ts.asfreq('B', method='pad') monthly_ts = daily_ts.asfreq('BM') - assert_series_equal(monthly_ts, ts) + tm.assert_series_equal(monthly_ts, ts) daily_ts = ts.asfreq(BDay()) monthly_ts = daily_ts.asfreq(BMonthEnd()) - assert_series_equal(monthly_ts, ts) + tm.assert_series_equal(monthly_ts, ts) result = ts[:0].asfreq('M') - self.assertEqual(len(result), 0) - self.assertIsNot(result, ts) + assert len(result) == 0 + assert result is not ts daily_ts = ts.asfreq('D', fill_value=-1) result = daily_ts.value_counts().sort_index() expected = Series([60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0]).sort_index() - assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) def test_asfreq_datetimeindex_empty_series(self): # GH 14320 diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py index 8706a05cfe8a2..d740d8bd26581 100644 --- a/pandas/tests/test_generic.py +++ b/pandas/tests/test_generic.py @@ -687,7 +687,7 @@ def test_copy_and_deepcopy(self): lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)]: obj_copy = func(obj) - self.assertIsNot(obj_copy, obj) + assert obj_copy is not obj self._compare(obj_copy, obj) diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py index b18214bbef926..adca47488413d 100644 --- a/pandas/tests/test_internals.py +++ b/pandas/tests/test_internals.py @@ -248,7 +248,7 @@ def test_merge(self): def test_copy(self): cop = self.fblock.copy() - self.assertIsNot(cop, self.fblock) + assert cop is not self.fblock assert_block_equal(self.fblock, cop) def test_reindex_index(self): diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 4e9805ca9d5a6..184052741aa11 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -883,20 +883,20 @@ def test_set_value(self): for mjr in self.panel.major_axis[::2]: for mnr in self.panel.minor_axis: self.panel.set_value(item, mjr, mnr, 1.) - assert_almost_equal(self.panel[item][mnr][mjr], 1.) + tm.assert_almost_equal(self.panel[item][mnr][mjr], 1.) # resize res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5) - tm.assertIsInstance(res, Panel) - self.assertIsNot(res, self.panel) - self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5) + assert isinstance(res, Panel) + assert res is not self.panel + assert res.get_value('ItemE', 'foo', 'bar') == 1.5 res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5) - self.assertTrue(is_float_dtype(res3['ItemE'].values)) - with tm.assertRaisesRegexp(TypeError, - "There must be an argument " - "for each axis" - " plus the value provided"): + assert is_float_dtype(res3['ItemE'].values) + + msg = ("There must be an argument for each " + "axis plus the value provided") + with tm.assertRaisesRegexp(TypeError, msg): self.panel.set_value('a') diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index 3af47a2b408bc..f704c94cff9f0 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -587,20 +587,20 @@ def test_set_value(self): for mjr in self.panel4d.major_axis[::2]: for mnr in self.panel4d.minor_axis: self.panel4d.set_value(label, item, mjr, mnr, 1.) - assert_almost_equal( + tm.assert_almost_equal( self.panel4d[label][item][mnr][mjr], 1.) res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5) - self.assertTrue(is_float_dtype(res3['l4'].values)) + assert is_float_dtype(res3['l4'].values) # resize res = self.panel4d.set_value('l4', 'ItemE', 'foo', 'bar', 1.5) - tm.assertIsInstance(res, Panel4D) - self.assertIsNot(res, self.panel4d) - self.assertEqual(res.get_value('l4', 'ItemE', 'foo', 'bar'), 1.5) + assert isinstance(res, Panel4D) + assert res is not self.panel4d + assert res.get_value('l4', 'ItemE', 'foo', 'bar') == 1.5 res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5) - self.assertTrue(is_float_dtype(res3['l4'].values)) + assert is_float_dtype(res3['l4'].values) class TestPanel4d(tm.TestCase, CheckIndexing, SafeForSparse, @@ -619,21 +619,21 @@ def test_constructor(self): with catch_warnings(record=True): panel4d = Panel4D(self.panel4d._data) - self.assertIs(panel4d._data, self.panel4d._data) + assert panel4d._data is self.panel4d._data panel4d = Panel4D(self.panel4d._data, copy=True) - self.assertIsNot(panel4d._data, self.panel4d._data) - assert_panel4d_equal(panel4d, self.panel4d) + assert panel4d._data is not self.panel4d._data + tm.assert_panel4d_equal(panel4d, self.panel4d) vals = self.panel4d.values # no copy panel4d = Panel4D(vals) - self.assertIs(panel4d.values, vals) + assert panel4d.values is vals # copy panel4d = Panel4D(vals, copy=True) - self.assertIsNot(panel4d.values, vals) + assert panel4d.values is not vals # GH #8285, test when scalar data is used to construct a Panel4D # if dtype is not passed, it should be inferred @@ -645,7 +645,7 @@ def test_constructor(self): vals = np.empty((2, 3, 4, 5), dtype=dtype) vals.fill(val) expected = Panel4D(vals, dtype=dtype) - assert_panel4d_equal(panel4d, expected) + tm.assert_panel4d_equal(panel4d, expected) # test the case when dtype is passed panel4d = Panel4D(1, labels=range(2), items=range( @@ -654,7 +654,7 @@ def test_constructor(self): vals.fill(1) expected = Panel4D(vals, dtype='float32') - assert_panel4d_equal(panel4d, expected) + tm.assert_panel4d_equal(panel4d, expected) def test_constructor_cast(self): with catch_warnings(record=True): diff --git a/pandas/tests/tools/test_concat.py b/pandas/tests/tools/test_concat.py index 2ff287acc4c47..bcfa3351ce181 100644 --- a/pandas/tests/tools/test_concat.py +++ b/pandas/tests/tools/test_concat.py @@ -12,8 +12,7 @@ DatetimeIndex) from pandas.util import testing as tm from pandas.util.testing import (assert_frame_equal, - makeCustomDataframe as mkdf, - assert_almost_equal) + makeCustomDataframe as mkdf) import pytest @@ -708,25 +707,25 @@ def test_append(self): end_frame = self.frame.reindex(end_index) appended = begin_frame.append(end_frame) - assert_almost_equal(appended['A'], self.frame['A']) + tm.assert_almost_equal(appended['A'], self.frame['A']) del end_frame['A'] partial_appended = begin_frame.append(end_frame) - self.assertIn('A', partial_appended) + assert 'A' in partial_appended partial_appended = end_frame.append(begin_frame) - self.assertIn('A', partial_appended) + assert 'A' in partial_appended # mixed type handling appended = self.mixed_frame[:5].append(self.mixed_frame[5:]) - assert_frame_equal(appended, self.mixed_frame) + tm.assert_frame_equal(appended, self.mixed_frame) # what to test here mixed_appended = self.mixed_frame[:5].append(self.frame[5:]) mixed_appended2 = self.frame[:5].append(self.mixed_frame[5:]) # all equal except 'foo' column - assert_frame_equal( + tm.assert_frame_equal( mixed_appended.reindex(columns=['A', 'B', 'C', 'D']), mixed_appended2.reindex(columns=['A', 'B', 'C', 'D'])) @@ -734,25 +733,24 @@ def test_append(self): empty = DataFrame({}) appended = self.frame.append(empty) - assert_frame_equal(self.frame, appended) - self.assertIsNot(appended, self.frame) + tm.assert_frame_equal(self.frame, appended) + assert appended is not self.frame appended = empty.append(self.frame) - assert_frame_equal(self.frame, appended) - self.assertIsNot(appended, self.frame) + tm.assert_frame_equal(self.frame, appended) + assert appended is not self.frame - # overlap - self.assertRaises(ValueError, self.frame.append, self.frame, - verify_integrity=True) + # Overlap + with pytest.raises(ValueError): + self.frame.append(self.frame, verify_integrity=True) - # new columns - # GH 6129 + # see gh-6129: new columns df = DataFrame({'a': {'x': 1, 'y': 2}, 'b': {'x': 3, 'y': 4}}) row = Series([5, 6, 7], index=['a', 'b', 'c'], name='z') expected = DataFrame({'a': {'x': 1, 'y': 2, 'z': 5}, 'b': { 'x': 3, 'y': 4, 'z': 6}, 'c': {'z': 7}}) result = df.append(row) - assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected) def test_append_length0_frame(self): df = DataFrame(columns=['A', 'B', 'C']) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index e9ec9d553d3e4..45c66627ad4d6 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1043,12 +1043,6 @@ def assertIs(first, second, msg=''): assert a is b, "%s: %r is not %r" % (msg.format(a, b), a, b) -def assertIsNot(first, second, msg=''): - """Checks that 'first' is not 'second'""" - a, b = first, second - assert a is not b, "%s: %r is %r" % (msg.format(a, b), a, b) - - def assertIn(first, second, msg=''): """Checks that 'first' is in 'second'""" a, b = first, second @@ -1068,7 +1062,7 @@ def assertIsNone(expr, msg=''): def assertIsNotNone(expr, msg=''): """Checks that 'expr' is not None""" - return assertIsNot(expr, None, msg) + assert expr is not None, msg def assertIsInstance(obj, cls, msg=''): @@ -1178,10 +1172,17 @@ def assert_numpy_array_equal(left, right, strict_nan=False, def _get_base(obj): return obj.base if getattr(obj, 'base', None) is not None else obj + left_base = _get_base(left) + right_base = _get_base(right) + if check_same == 'same': - assertIs(_get_base(left), _get_base(right)) + if left_base is not right_base: + msg = "%r is not %r" % (left_base, right_base) + raise AssertionError(msg) elif check_same == 'copy': - assertIsNot(_get_base(left), _get_base(right)) + if left_base is right_base: + msg = "%r is %r" % (left_base, right_base) + raise AssertionError(msg) def _raise(left, right, err_msg): if err_msg is None: From 0e2bbcf95624e5312c9ba0f9de48e9b2a1f2ede0 Mon Sep 17 00:00:00 2001 From: gfyoung Date: Mon, 17 Apr 2017 03:52:52 -0400 Subject: [PATCH 41/56] MAINT: Remove assertIsNotNone from testing (#16027) Follow-up to gh-16024. Also removes some vestigial assertIsNot calls missed in #16024. Partially addresses #15990. --- pandas/tests/core/computation/test_eval.py | 9 +- pandas/tests/frame/test_indexing.py | 41 ++++--- .../indexes/datetimes/test_construction.py | 98 +++++++-------- pandas/tests/indexes/datetimes/test_ops.py | 4 +- pandas/tests/indexes/test_multi.py | 43 ++++--- .../indexing/test_chaining_and_caching.py | 114 ++++++++---------- pandas/tests/io/formats/test_format.py | 4 +- pandas/tests/io/formats/test_printing.py | 6 +- pandas/tests/io/test_pytables.py | 36 +++--- pandas/tests/plotting/test_datetimelike.py | 6 +- pandas/tests/series/test_indexing.py | 12 +- pandas/tests/series/test_timeseries.py | 4 +- pandas/tests/test_base.py | 26 ++-- pandas/tests/test_panel.py | 29 +++-- pandas/tests/test_panel4d.py | 11 +- pandas/tests/tools/test_concat.py | 26 ++-- pandas/util/testing.py | 5 - 17 files changed, 237 insertions(+), 237 deletions(-) diff --git a/pandas/tests/core/computation/test_eval.py b/pandas/tests/core/computation/test_eval.py index 38a310a17a9ab..0ba4fe61ae78f 100644 --- a/pandas/tests/core/computation/test_eval.py +++ b/pandas/tests/core/computation/test_eval.py @@ -1308,16 +1308,17 @@ def test_column_in(self): assert_series_equal(result, expected) def assignment_not_inplace(self): - # GH 9297 + # see gh-9297 df = DataFrame(np.random.randn(5, 2), columns=list('ab')) actual = df.eval('c = a + b', inplace=False) - self.assertIsNotNone(actual) + assert actual is not None + expected = df.copy() expected['c'] = expected['a'] + expected['b'] - assert_frame_equal(df, expected) + tm.assert_frame_equal(df, expected) - # default for inplace will change + # Default for inplace will change with tm.assert_produces_warnings(FutureWarning): df.eval('c = a + b') diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index a1705084c0edf..fcd972cb7e09b 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -10,6 +10,8 @@ from numpy import nan from numpy.random import randn + +import pytest import numpy as np import pandas.core.common as com @@ -25,7 +27,6 @@ is_integer, is_scalar) from pandas.util.testing import (assert_almost_equal, - assert_numpy_array_equal, assert_series_equal, assert_frame_equal, assertRaisesRegexp, @@ -40,30 +41,33 @@ class TestDataFrameIndexing(tm.TestCase, TestData): def test_getitem(self): - # slicing + # Slicing sl = self.frame[:20] - self.assertEqual(20, len(sl.index)) - - # column access + assert len(sl.index) == 20 + # Column access for _, series in compat.iteritems(sl): - self.assertEqual(20, len(series.index)) - self.assertTrue(tm.equalContents(series.index, sl.index)) + assert len(series.index) == 20 + assert tm.equalContents(series.index, sl.index) for key, _ in compat.iteritems(self.frame._series): - self.assertIsNotNone(self.frame[key]) + assert self.frame[key] is not None - self.assertNotIn('random', self.frame) + assert 'random' not in self.frame with assertRaisesRegexp(KeyError, 'random'): self.frame['random'] df = self.frame.copy() df['$10'] = randn(len(df)) + ad = randn(len(df)) df['@awesome_domain'] = ad - self.assertRaises(KeyError, df.__getitem__, 'df["$10"]') + + with pytest.raises(KeyError): + df.__getitem__('df["$10"]') + res = df['@awesome_domain'] - assert_numpy_array_equal(ad, res.values) + tm.assert_numpy_array_equal(ad, res.values) def test_getitem_dupe_cols(self): df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b']) @@ -648,10 +652,10 @@ def test_setitem_corner2(self): self.assertEqual(df.loc[1, 'cruft'], 0) def test_setitem_ambig(self): - # difficulties with mixed-type data + # Difficulties with mixed-type data from decimal import Decimal - # created as float type + # Created as float type dm = DataFrame(index=lrange(3), columns=lrange(3)) coercable_series = Series([Decimal(1) for _ in range(3)], @@ -659,17 +663,14 @@ def test_setitem_ambig(self): uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3)) dm[0] = np.ones(3) - self.assertEqual(len(dm.columns), 3) - # self.assertIsNone(dm.objects) + assert len(dm.columns) == 3 dm[1] = coercable_series - self.assertEqual(len(dm.columns), 3) - # self.assertIsNone(dm.objects) + assert len(dm.columns) == 3 dm[2] = uncoercable_series - self.assertEqual(len(dm.columns), 3) - # self.assertIsNotNone(dm.objects) - self.assertEqual(dm[2].dtype, np.object_) + assert len(dm.columns) == 3 + assert dm[2].dtype == np.object_ def test_setitem_clear_caches(self): # see gh-304 diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index d4e672d0584cf..a7c33dd2e00e9 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -53,15 +53,14 @@ def test_construction_with_alt(self): i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific')) def test_construction_index_with_mixed_timezones(self): - # GH 11488 - # no tz results in DatetimeIndex + # gh-11488: no tz results in DatetimeIndex result = Index([Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx') exp = DatetimeIndex([Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertTrue(isinstance(result, DatetimeIndex)) - self.assertIsNone(result.tz) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is None # same tz results in DatetimeIndex result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), @@ -70,10 +69,10 @@ def test_construction_index_with_mixed_timezones(self): exp = DatetimeIndex( [Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00') ], tz='Asia/Tokyo', name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertTrue(isinstance(result, DatetimeIndex)) - self.assertIsNotNone(result.tz) - self.assertEqual(result.tz, exp.tz) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz # same tz results in DatetimeIndex (DST) result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'), @@ -82,20 +81,20 @@ def test_construction_index_with_mixed_timezones(self): exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), Timestamp('2011-08-01 10:00')], tz='US/Eastern', name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertTrue(isinstance(result, DatetimeIndex)) - self.assertIsNotNone(result.tz) - self.assertEqual(result.tz, exp.tz) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz - # different tz results in Index(dtype=object) + # Different tz results in Index(dtype=object) result = Index([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], name='idx') exp = Index([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], dtype='object', name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertFalse(isinstance(result, DatetimeIndex)) + tm.assert_index_equal(result, exp, exact=True) + assert not isinstance(result, DatetimeIndex) result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], @@ -103,37 +102,37 @@ def test_construction_index_with_mixed_timezones(self): exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), Timestamp('2011-01-02 10:00', tz='US/Eastern')], dtype='object', name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertFalse(isinstance(result, DatetimeIndex)) + tm.assert_index_equal(result, exp, exact=True) + assert not isinstance(result, DatetimeIndex) # length = 1 result = Index([Timestamp('2011-01-01')], name='idx') exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertTrue(isinstance(result, DatetimeIndex)) - self.assertIsNone(result.tz) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is None # length = 1 with tz result = Index( [Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx') exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo', name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertTrue(isinstance(result, DatetimeIndex)) - self.assertIsNotNone(result.tz) - self.assertEqual(result.tz, exp.tz) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz def test_construction_index_with_mixed_timezones_with_NaT(self): - # GH 11488 + # see gh-11488 result = Index([pd.NaT, Timestamp('2011-01-01'), pd.NaT, Timestamp('2011-01-02')], name='idx') exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'), pd.NaT, Timestamp('2011-01-02')], name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertTrue(isinstance(result, DatetimeIndex)) - self.assertIsNone(result.tz) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is None - # same tz results in DatetimeIndex + # Same tz results in DatetimeIndex result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), pd.NaT, Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')], @@ -141,10 +140,10 @@ def test_construction_index_with_mixed_timezones_with_NaT(self): exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp('2011-01-02 10:00')], tz='Asia/Tokyo', name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertTrue(isinstance(result, DatetimeIndex)) - self.assertIsNotNone(result.tz) - self.assertEqual(result.tz, exp.tz) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz # same tz results in DatetimeIndex (DST) result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'), @@ -154,10 +153,10 @@ def test_construction_index_with_mixed_timezones_with_NaT(self): exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp('2011-08-01 10:00')], tz='US/Eastern', name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertTrue(isinstance(result, DatetimeIndex)) - self.assertIsNotNone(result.tz) - self.assertEqual(result.tz, exp.tz) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz # different tz results in Index(dtype=object) result = Index([pd.NaT, Timestamp('2011-01-01 10:00'), @@ -167,8 +166,8 @@ def test_construction_index_with_mixed_timezones_with_NaT(self): exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')], dtype='object', name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertFalse(isinstance(result, DatetimeIndex)) + tm.assert_index_equal(result, exp, exact=True) + assert not isinstance(result, DatetimeIndex) result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), pd.NaT, Timestamp('2011-01-02 10:00', @@ -176,23 +175,24 @@ def test_construction_index_with_mixed_timezones_with_NaT(self): exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')], dtype='object', name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertFalse(isinstance(result, DatetimeIndex)) + tm.assert_index_equal(result, exp, exact=True) + assert not isinstance(result, DatetimeIndex) # all NaT result = Index([pd.NaT, pd.NaT], name='idx') exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertTrue(isinstance(result, DatetimeIndex)) - self.assertIsNone(result.tz) + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is None # all NaT with tz result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx') exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx') - self.assert_index_equal(result, exp, exact=True) - self.assertTrue(isinstance(result, DatetimeIndex)) - self.assertIsNotNone(result.tz) - self.assertEqual(result.tz, exp.tz) + + tm.assert_index_equal(result, exp, exact=True) + assert isinstance(result, DatetimeIndex) + assert result.tz is not None + assert result.tz == exp.tz def test_construction_dti_with_mixed_timezones(self): # GH 11488 (not changed, added explicit tests) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 6e6d6bf190291..8ab29c0c0b6f2 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -1122,7 +1122,7 @@ def test_comparison(self): def test_pickle_unpickle(self): unpickled = tm.round_trip_pickle(self.rng) - self.assertIsNotNone(unpickled.offset) + assert unpickled.offset is not None def test_copy(self): cp = self.rng.copy() @@ -1273,7 +1273,7 @@ def test_shift(self): def test_pickle_unpickle(self): unpickled = tm.round_trip_pickle(self.rng) - self.assertIsNotNone(unpickled.offset) + assert unpickled.offset is not None def test_summary(self): self.rng.summary() diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 75ced9439c398..f907741950b01 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -393,39 +393,46 @@ def test_inplace_mutation_resets_values(self): levels = [['a', 'b', 'c'], [4]] levels2 = [[1, 2, 3], ['a']] labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]] + mi1 = MultiIndex(levels=levels, labels=labels) mi2 = MultiIndex(levels=levels2, labels=labels) vals = mi1.values.copy() vals2 = mi2.values.copy() - self.assertIsNotNone(mi1._tuples) - # make sure level setting works + assert mi1._tuples is not None + + # Make sure level setting works new_vals = mi1.set_levels(levels2).values - assert_almost_equal(vals2, new_vals) - # non-inplace doesn't kill _tuples [implementation detail] - assert_almost_equal(mi1._tuples, vals) - # and values is still same too - assert_almost_equal(mi1.values, vals) + tm.assert_almost_equal(vals2, new_vals) + + # Non-inplace doesn't kill _tuples [implementation detail] + tm.assert_almost_equal(mi1._tuples, vals) + + # ...and values is still same too + tm.assert_almost_equal(mi1.values, vals) - # inplace should kill _tuples + # Inplace should kill _tuples mi1.set_levels(levels2, inplace=True) - assert_almost_equal(mi1.values, vals2) + tm.assert_almost_equal(mi1.values, vals2) - # make sure label setting works too + # Make sure label setting works too labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]] exp_values = np.empty((6, ), dtype=object) exp_values[:] = [(long(1), 'a')] * 6 - # must be 1d array of tuples - self.assertEqual(exp_values.shape, (6, )) + + # Must be 1d array of tuples + assert exp_values.shape == (6, ) new_values = mi2.set_labels(labels2).values - # not inplace shouldn't change - assert_almost_equal(mi2._tuples, vals2) - # should have correct values - assert_almost_equal(exp_values, new_values) - # and again setting inplace should kill _tuples, etc + # Not inplace shouldn't change + tm.assert_almost_equal(mi2._tuples, vals2) + + # Should have correct values + tm.assert_almost_equal(exp_values, new_values) + + # ...and again setting inplace should kill _tuples, etc mi2.set_labels(labels2, inplace=True) - assert_almost_equal(mi2.values, new_values) + tm.assert_almost_equal(mi2.values, new_values) def test_copy_in_constructor(self): levels = np.array(["a", "b", "c"]) diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index 72e704537ba3f..725de7ce20f5c 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -1,5 +1,7 @@ from warnings import catch_warnings +import pytest + import numpy as np import pandas as pd from pandas.core import common as com @@ -134,7 +136,8 @@ def test_detect_chained_assignment(self): expected = DataFrame([[-5, 1], [-6, 3]], columns=list('AB')) df = DataFrame(np.arange(4).reshape(2, 2), columns=list('AB'), dtype='int64') - self.assertIsNone(df.is_copy) + assert df.is_copy is None + df['A'][0] = -5 df['A'][1] = -6 tm.assert_frame_equal(df, expected) @@ -142,71 +145,56 @@ def test_detect_chained_assignment(self): # test with the chaining df = DataFrame({'A': Series(range(2), dtype='int64'), 'B': np.array(np.arange(2, 4), dtype=np.float64)}) - self.assertIsNone(df.is_copy) + assert df.is_copy is None - def f(): + with pytest.raises(com.SettingWithCopyError): df['A'][0] = -5 - self.assertRaises(com.SettingWithCopyError, f) - - def f(): + with pytest.raises(com.SettingWithCopyError): df['A'][1] = np.nan - self.assertRaises(com.SettingWithCopyError, f) - self.assertIsNone(df['A'].is_copy) + assert df['A'].is_copy is None - # using a copy (the chain), fails + # Using a copy (the chain), fails df = DataFrame({'A': Series(range(2), dtype='int64'), 'B': np.array(np.arange(2, 4), dtype=np.float64)}) - def f(): + with pytest.raises(com.SettingWithCopyError): df.loc[0]['A'] = -5 - self.assertRaises(com.SettingWithCopyError, f) - - # doc example + # Doc example df = DataFrame({'a': ['one', 'one', 'two', 'three', 'two', 'one', 'six'], 'c': Series(range(7), dtype='int64')}) - self.assertIsNone(df.is_copy) - expected = DataFrame({'a': ['one', 'one', 'two', 'three', - 'two', 'one', 'six'], - 'c': [42, 42, 2, 3, 4, 42, 6]}) + assert df.is_copy is None - def f(): + with pytest.raises(com.SettingWithCopyError): indexer = df.a.str.startswith('o') df[indexer]['c'] = 42 - self.assertRaises(com.SettingWithCopyError, f) - expected = DataFrame({'A': [111, 'bbb', 'ccc'], 'B': [1, 2, 3]}) df = DataFrame({'A': ['aaa', 'bbb', 'ccc'], 'B': [1, 2, 3]}) - def f(): + with pytest.raises(com.SettingWithCopyError): df['A'][0] = 111 - self.assertRaises(com.SettingWithCopyError, f) - - def f(): + with pytest.raises(com.SettingWithCopyError): df.loc[0]['A'] = 111 - self.assertRaises(com.SettingWithCopyError, f) - df.loc[0, 'A'] = 111 tm.assert_frame_equal(df, expected) - # make sure that is_copy is picked up reconstruction - # GH5475 + # gh-5475: Make sure that is_copy is picked up reconstruction df = DataFrame({"A": [1, 2]}) - self.assertIsNone(df.is_copy) + assert df.is_copy is None + with tm.ensure_clean('__tmp__pickle') as path: df.to_pickle(path) df2 = pd.read_pickle(path) df2["B"] = df2["A"] df2["B"] = df2["A"] - # a suprious raise as we are setting the entire column here - # GH5597 + # gh-5597: a spurious raise as we are setting the entire column here from string import ascii_letters as letters def random_text(nobs=100): @@ -214,42 +202,48 @@ def random_text(nobs=100): for i in range(nobs): idx = np.random.randint(len(letters), size=2) idx.sort() + df.append([letters[idx[0]:idx[1]]]) return DataFrame(df, columns=['letters']) df = random_text(100000) - # always a copy + # Always a copy x = df.iloc[[0, 1, 2]] - self.assertIsNotNone(x.is_copy) + assert x.is_copy is not None + x = df.iloc[[0, 1, 2, 4]] - self.assertIsNotNone(x.is_copy) + assert x.is_copy is not None - # explicity copy + # Explicitly copy indexer = df.letters.apply(lambda x: len(x) > 10) df = df.loc[indexer].copy() - self.assertIsNone(df.is_copy) + + assert df.is_copy is None df['letters'] = df['letters'].apply(str.lower) - # implicity take + # Implicitly take df = random_text(100000) indexer = df.letters.apply(lambda x: len(x) > 10) df = df.loc[indexer] - self.assertIsNotNone(df.is_copy) + + assert df.is_copy is not None df['letters'] = df['letters'].apply(str.lower) - # implicity take 2 + # Implicitly take 2 df = random_text(100000) indexer = df.letters.apply(lambda x: len(x) > 10) + df = df.loc[indexer] - self.assertIsNotNone(df.is_copy) + assert df.is_copy is not None df.loc[:, 'letters'] = df['letters'].apply(str.lower) - # should be ok even though it's a copy! - self.assertIsNone(df.is_copy) + # Should be ok even though it's a copy! + assert df.is_copy is None + df['letters'] = df['letters'].apply(str.lower) - self.assertIsNone(df.is_copy) + assert df.is_copy is None df = random_text(100000) indexer = df.letters.apply(lambda x: len(x) > 10) @@ -258,11 +252,10 @@ def random_text(nobs=100): # an identical take, so no copy df = DataFrame({'a': [1]}).dropna() - self.assertIsNone(df.is_copy) + assert df.is_copy is None df['a'] += 1 - # inplace ops - # original from: + # Inplace ops, originally from: # http://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug a = [12, 23] b = [123, None] @@ -277,23 +270,25 @@ def random_text(nobs=100): multiind = MultiIndex.from_tuples(tuples, names=['part', 'side']) zed = DataFrame(events, index=['a', 'b'], columns=multiind) - def f(): + with pytest.raises(com.SettingWithCopyError): zed['eyes']['right'].fillna(value=555, inplace=True) - self.assertRaises(com.SettingWithCopyError, f) - df = DataFrame(np.random.randn(10, 4)) s = df.iloc[:, 0].sort_values() + tm.assert_series_equal(s, df.iloc[:, 0].sort_values()) tm.assert_series_equal(s, df[0].sort_values()) - # false positives GH6025 + # see gh-6025: false positives df = DataFrame({'column1': ['a', 'a', 'a'], 'column2': [4, 8, 9]}) str(df) + df['column1'] = df['column1'] + 'b' str(df) + df = df[df['column2'] != 8] str(df) + df['column1'] = df['column1'] + 'c' str(df) @@ -302,33 +297,24 @@ def f(): df = DataFrame(np.arange(0, 9), columns=['count']) df['group'] = 'b' - def f(): + with pytest.raises(com.SettingWithCopyError): df.iloc[0:5]['group'] = 'a' - self.assertRaises(com.SettingWithCopyError, f) - - # mixed type setting - # same dtype & changing dtype + # Mixed type setting but same dtype & changing dtype df = DataFrame(dict(A=date_range('20130101', periods=5), B=np.random.randn(5), C=np.arange(5, dtype='int64'), D=list('abcde'))) - def f(): + with pytest.raises(com.SettingWithCopyError): df.loc[2]['D'] = 'foo' - self.assertRaises(com.SettingWithCopyError, f) - - def f(): + with pytest.raises(com.SettingWithCopyError): df.loc[2]['C'] = 'foo' - self.assertRaises(com.SettingWithCopyError, f) - - def f(): + with pytest.raises(com.SettingWithCopyError): df['C'][2] = 'foo' - self.assertRaises(com.SettingWithCopyError, f) - def test_setting_with_copy_bug(self): # operating on a copy diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index bb766ae389a10..354ce99f567ea 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1546,12 +1546,12 @@ def get_ipython(): {'parent_appname': 'ipython-qtconsole'}}} repstr = self.frame._repr_html_() - self.assertIsNotNone(repstr) + assert repstr is not None fmt.set_option('display.max_rows', 5, 'display.max_columns', 2) repstr = self.frame._repr_html_() - self.assertIn('class', repstr) # info fallback + assert 'class' in repstr # info fallback tm.reset_display_options() def test_pprint_pathological_object(self): diff --git a/pandas/tests/io/formats/test_printing.py b/pandas/tests/io/formats/test_printing.py index 0df35da05578a..d2c3b47aba042 100644 --- a/pandas/tests/io/formats/test_printing.py +++ b/pandas/tests/io/formats/test_printing.py @@ -170,13 +170,15 @@ def test_config_on(self): df = pd.DataFrame({"A": [1, 2]}) with pd.option_context("display.html.table_schema", True): result = df._repr_table_schema_() - self.assertIsNotNone(result) + + assert result is not None def test_config_default_off(self): df = pd.DataFrame({"A": [1, 2]}) with pd.option_context("display.html.table_schema", False): result = df._repr_table_schema_() - self.assertIsNone(result) + + assert result is None # TODO: fix this broken test diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index f28b2a0231433..57effd8163be5 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -4667,7 +4667,7 @@ def test_categorical(self): with ensure_clean_store(self.path) as store: - # basic + # Basic _maybe_remove(store, 's') s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[ 'a', 'b', 'c', 'd'], ordered=False)) @@ -4683,12 +4683,13 @@ def test_categorical(self): tm.assert_series_equal(s, result) _maybe_remove(store, 'df') + df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]}) store.append('df', df, format='table') result = store.select('df') tm.assert_frame_equal(result, df) - # dtypes + # Dtypes s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category') store.append('si', s) result = store.select('si') @@ -4699,17 +4700,17 @@ def test_categorical(self): result = store.select('si2') tm.assert_series_equal(result, s) - # multiple + # Multiple df2 = df.copy() df2['s2'] = Series(list('abcdefg')).astype('category') store.append('df2', df2) result = store.select('df2') tm.assert_frame_equal(result, df2) - # make sure the metadata is ok - self.assertTrue('/df2 ' in str(store)) - self.assertTrue('/df2/meta/values_block_0/meta' in str(store)) - self.assertTrue('/df2/meta/values_block_1/meta' in str(store)) + # Make sure the metadata is OK + assert '/df2 ' in str(store) + assert '/df2/meta/values_block_0/meta' in str(store) + assert '/df2/meta/values_block_1/meta' in str(store) # unordered s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[ @@ -4718,7 +4719,7 @@ def test_categorical(self): result = store.select('s2') tm.assert_series_equal(result, s) - # query + # Query store.append('df3', df, data_columns=['s']) expected = df[df.s.isin(['b', 'c'])] result = store.select('df3', where=['s in ["b","c"]']) @@ -4736,7 +4737,7 @@ def test_categorical(self): result = store.select('df3', where=['s in ["f"]']) tm.assert_frame_equal(result, expected) - # appending with same categories is ok + # Appending with same categories is ok store.append('df3', df) df = concat([df, df]) @@ -4744,20 +4745,21 @@ def test_categorical(self): result = store.select('df3', where=['s in ["b","c"]']) tm.assert_frame_equal(result, expected) - # appending must have the same categories + # Appending must have the same categories df3 = df.copy() df3['s'].cat.remove_unused_categories(inplace=True) - self.assertRaises(ValueError, lambda: store.append('df3', df3)) + with pytest.raises(ValueError): + store.append('df3', df3) - # remove - # make sure meta data is removed (its a recursive removal so should - # be) + # Remove, and make sure meta data is removed (its a recursive + # removal so should be). result = store.select('df3/meta/s/meta') - self.assertIsNotNone(result) + assert result is not None store.remove('df3') - self.assertRaises( - KeyError, lambda: store.select('df3/meta/s/meta')) + + with pytest.raises(KeyError): + store.select('df3/meta/s/meta') def test_categorical_conversion(self): diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index b3692c5a8d2d2..547770ebcf6e5 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -296,12 +296,14 @@ def test_irregular_datetime64_repr_bug(self): fig = plt.gcf() plt.clf() + ax = fig.add_subplot(211) + ret = ser.plot() - self.assertIsNotNone(ret) + assert ret is not None for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index): - self.assertEqual(rs, xp) + assert rs == xp def test_business_freq(self): import matplotlib.pyplot as plt # noqa diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index 6c1d77acd70d5..38251ab0b228b 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -1873,14 +1873,14 @@ def test_align_nocopy(self): rb[:2] = 5 self.assertTrue((b[:2] == 5).all()) - def test_align_sameindex(self): + def test_align_same_index(self): a, b = self.ts.align(self.ts, copy=False) - self.assertIs(a.index, self.ts.index) - self.assertIs(b.index, self.ts.index) + assert a.index is self.ts.index + assert b.index is self.ts.index - # a, b = self.ts.align(self.ts, copy=True) - # self.assertIsNot(a.index, self.ts.index) - # self.assertIsNot(b.index, self.ts.index) + a, b = self.ts.align(self.ts, copy=True) + assert a.index is not self.ts.index + assert b.index is not self.ts.index def test_align_multiindex(self): # GH 10665 diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 431e26ae4fdf9..0f960a890e72b 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -411,12 +411,12 @@ def test_contiguous_boolean_preserve_freq(self): masked = rng[mask] expected = rng[10:20] - self.assertIsNotNone(expected.freq) + assert expected.freq is not None assert_range_equal(masked, expected) mask[22] = True masked = rng[mask] - self.assertIsNone(masked.freq) + assert masked.freq is None def test_to_datetime_unit(self): diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 91c06a2c30e50..148f2ae425629 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -304,26 +304,28 @@ def test_none_comparison(self): def test_ndarray_compat_properties(self): for o in self.objs: + # Check that we work. + for p in ['shape', 'dtype', 'flags', 'T', + 'strides', 'itemsize', 'nbytes']: + assert getattr(o, p, None) is not None - # check that we work - for p in ['shape', 'dtype', 'flags', 'T', 'strides', 'itemsize', - 'nbytes']: - self.assertIsNotNone(getattr(o, p, None)) - self.assertTrue(hasattr(o, 'base')) + assert hasattr(o, 'base') - # if we have a datetimelike dtype then needs a view to work + # If we have a datetime-like dtype then needs a view to work # but the user is responsible for that try: - self.assertIsNotNone(o.data) + assert o.data is not None except ValueError: pass - self.assertRaises(ValueError, o.item) # len > 1 - self.assertEqual(o.ndim, 1) - self.assertEqual(o.size, len(o)) + with pytest.raises(ValueError): + o.item() # len > 1 - self.assertEqual(Index([1]).item(), 1) - self.assertEqual(Series([1]).item(), 1) + assert o.ndim == 1 + assert o.size == len(o) + + assert Index([1]).item() == 1 + assert Series([1]).item() == 1 def test_ops(self): for op in ['max', 'min']: diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 184052741aa11..55e0e512169fb 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -597,17 +597,18 @@ def test_xs(self): with catch_warnings(record=True): itemA = self.panel.xs('ItemA', axis=0) expected = self.panel['ItemA'] - assert_frame_equal(itemA, expected) + tm.assert_frame_equal(itemA, expected) - # get a view by default + # Get a view by default. itemA_view = self.panel.xs('ItemA', axis=0) itemA_view.values[:] = np.nan - self.assertTrue(np.isnan(self.panel['ItemA'].values).all()) - # mixed-type yields a copy + assert np.isnan(self.panel['ItemA'].values).all() + + # Mixed-type yields a copy. self.panel['strings'] = 'foo' result = self.panel.xs('D', axis=2) - self.assertIsNotNone(result.is_copy) + assert result.is_copy is not None def test_getitem_fancy_labels(self): with catch_warnings(record=True): @@ -917,25 +918,25 @@ def test_constructor(self): with catch_warnings(record=True): # with BlockManager wp = Panel(self.panel._data) - self.assertIs(wp._data, self.panel._data) + assert wp._data is self.panel._data wp = Panel(self.panel._data, copy=True) - self.assertIsNot(wp._data, self.panel._data) - assert_panel_equal(wp, self.panel) + assert wp._data is not self.panel._data + tm.assert_panel_equal(wp, self.panel) # strings handled prop wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]]) - self.assertEqual(wp.values.dtype, np.object_) + assert wp.values.dtype == np.object_ vals = self.panel.values # no copy wp = Panel(vals) - self.assertIs(wp.values, vals) + assert wp.values is vals # copy wp = Panel(vals, copy=True) - self.assertIsNot(wp.values, vals) + assert wp.values is not vals # GH #8285, test when scalar data is used to construct a Panel # if dtype is not passed, it should be inferred @@ -946,7 +947,8 @@ def test_constructor(self): minor_axis=range(4)) vals = np.empty((2, 3, 4), dtype=dtype) vals.fill(val) - assert_panel_equal(wp, Panel(vals, dtype=dtype)) + + tm.assert_panel_equal(wp, Panel(vals, dtype=dtype)) # test the case when dtype is passed wp = Panel(1, items=range(2), major_axis=range(3), @@ -954,7 +956,8 @@ def test_constructor(self): dtype='float32') vals = np.empty((2, 3, 4), dtype='float32') vals.fill(1) - assert_panel_equal(wp, Panel(vals, dtype='float32')) + + tm.assert_panel_equal(wp, Panel(vals, dtype='float32')) def test_constructor_cast(self): with catch_warnings(record=True): diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index f704c94cff9f0..fa3bb2d66b573 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -510,18 +510,19 @@ def test_minor_xs_mixed(self): def test_xs(self): l1 = self.panel4d.xs('l1', axis=0) expected = self.panel4d['l1'] - assert_panel_equal(l1, expected) + tm.assert_panel_equal(l1, expected) - # view if possible + # View if possible l1_view = self.panel4d.xs('l1', axis=0) l1_view.values[:] = np.nan - self.assertTrue(np.isnan(self.panel4d['l1'].values).all()) + assert np.isnan(self.panel4d['l1'].values).all() - # mixed-type + # Mixed-type self.panel4d['strings'] = 'foo' with catch_warnings(record=True): result = self.panel4d.xs('D', axis=3) - self.assertIsNotNone(result.is_copy) + + assert result.is_copy is not None def test_getitem_fancy_labels(self): with catch_warnings(record=True): diff --git a/pandas/tests/tools/test_concat.py b/pandas/tests/tools/test_concat.py index bcfa3351ce181..e6514a1e2e81e 100644 --- a/pandas/tests/tools/test_concat.py +++ b/pandas/tests/tools/test_concat.py @@ -854,39 +854,37 @@ def test_append_missing_column_proper_upcast(self): class TestConcatenate(ConcatenateBase): def test_concat_copy(self): - df = DataFrame(np.random.randn(4, 3)) df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1)) df3 = DataFrame({5: 'foo'}, index=range(4)) - # these are actual copies + # These are actual copies. result = concat([df, df2, df3], axis=1, copy=True) + for b in result._data.blocks: - self.assertIsNone(b.values.base) + assert b.values.base is None - # these are the same + # These are the same. result = concat([df, df2, df3], axis=1, copy=False) + for b in result._data.blocks: if b.is_float: - self.assertTrue( - b.values.base is df._data.blocks[0].values.base) + assert b.values.base is df._data.blocks[0].values.base elif b.is_integer: - self.assertTrue( - b.values.base is df2._data.blocks[0].values.base) + assert b.values.base is df2._data.blocks[0].values.base elif b.is_object: - self.assertIsNotNone(b.values.base) + assert b.values.base is not None - # float block was consolidated + # Float block was consolidated. df4 = DataFrame(np.random.randn(4, 1)) result = concat([df, df2, df3, df4], axis=1, copy=False) for b in result._data.blocks: if b.is_float: - self.assertIsNone(b.values.base) + assert b.values.base is None elif b.is_integer: - self.assertTrue( - b.values.base is df2._data.blocks[0].values.base) + assert b.values.base is df2._data.blocks[0].values.base elif b.is_object: - self.assertIsNotNone(b.values.base) + assert b.values.base is not None def test_concat_with_group_keys(self): df = DataFrame(np.random.randn(4, 3)) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 45c66627ad4d6..b5797674641c8 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1060,11 +1060,6 @@ def assertIsNone(expr, msg=''): return assertIs(expr, None, msg) -def assertIsNotNone(expr, msg=''): - """Checks that 'expr' is not None""" - assert expr is not None, msg - - def assertIsInstance(obj, cls, msg=''): """Test that obj is an instance of cls (which can be a class or a tuple of classes, From c6060a80933f9b2495f9f8c1e7a215bac7f85f19 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Mon, 17 Apr 2017 13:14:38 +0200 Subject: [PATCH 42/56] CLN: clean benchmarks to get them running (#16025) * fix lib and algos import * fix take_1d import * string uppercase -> ascii_uppercase (py3 compat) * sas test file path * fix datetools usage * fix hashing benchmarks * dict values py3 compat * avoid overflow by using higher freq * xrange -> range * fix xport path * revised hdfstore_bench to use new query syntax rename table variables * change default python version to 3.6 --- asv_bench/asv.conf.json | 2 +- asv_bench/benchmarks/algorithms.py | 13 +++++-- asv_bench/benchmarks/frame_ctor.py | 4 +- asv_bench/benchmarks/frame_methods.py | 4 +- asv_bench/benchmarks/gil.py | 18 ++++++--- asv_bench/benchmarks/groupby.py | 2 +- asv_bench/benchmarks/hdfstore_bench.py | 48 ++++++++++++------------ asv_bench/benchmarks/inference.py | 4 +- asv_bench/benchmarks/join_merge.py | 4 +- asv_bench/benchmarks/packers.py | 16 ++++---- asv_bench/benchmarks/pandas_vb_common.py | 4 +- asv_bench/benchmarks/panel_ctor.py | 5 ++- asv_bench/benchmarks/replace.py | 2 - asv_bench/benchmarks/timeseries.py | 3 +- 14 files changed, 68 insertions(+), 61 deletions(-) diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index 4fc6f9f634426..62f1c090a7462 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -26,7 +26,7 @@ // The Pythons you'd like to test against. If not provided, defaults // to the current version of Python used to run `asv`. // "pythons": ["2.7", "3.4"], - "pythons": ["2.7"], + "pythons": ["3.6"], // The matrix of dependencies to test. Each key is the name of a // package (in PyPI) and the values are version numbers. An empty diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py index fe657936c403e..0e2182c58d44c 100644 --- a/asv_bench/benchmarks/algorithms.py +++ b/asv_bench/benchmarks/algorithms.py @@ -2,6 +2,11 @@ import pandas as pd from pandas.util import testing as tm +try: + from pandas.tools.hashing import hash_pandas_object +except ImportError: + pass + class Algorithms(object): goal_time = 0.2 @@ -103,13 +108,13 @@ def setup(self): self.df.iloc[10:20] = np.nan def time_frame(self): - self.df.hash() + hash_pandas_object(self.df) def time_series_int(self): - self.df.E.hash() + hash_pandas_object(self.df.E) def time_series_string(self): - self.df.B.hash() + hash_pandas_object(self.df.B) def time_series_categorical(self): - self.df.C.hash() + hash_pandas_object(self.df.C) diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py index 05c1a27fdf8ca..dec4fcba0eb5e 100644 --- a/asv_bench/benchmarks/frame_ctor.py +++ b/asv_bench/benchmarks/frame_ctor.py @@ -20,12 +20,12 @@ def setup(self): self.data = self.frame.to_dict() except: self.data = self.frame.toDict() - self.some_dict = self.data.values()[0] + self.some_dict = list(self.data.values())[0] self.dict_list = [dict(zip(self.columns, row)) for row in self.frame.values] self.data2 = dict( ((i, dict(((j, float(j)) for j in range(100)))) for i in - xrange(2000))) + range(2000))) def time_frame_ctor_list_of_dict(self): DataFrame(self.dict_list) diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 9f491302a4d6f..af72ca1e9a6ab 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -56,7 +56,7 @@ def time_reindex_both_axes_ix(self): self.df.ix[(self.idx, self.idx)] def time_reindex_upcast(self): - self.df2.reindex(permutation(range(1200))) + self.df2.reindex(np.random.permutation(range(1200))) #---------------------------------------------------------------------- @@ -583,7 +583,7 @@ class frame_assign_timeseries_index(object): goal_time = 0.2 def setup(self): - self.idx = date_range('1/1/2000', periods=100000, freq='D') + self.idx = date_range('1/1/2000', periods=100000, freq='H') self.df = DataFrame(randn(100000, 1), columns=['A'], index=self.idx) def time_frame_assign_timeseries_index(self): diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py index 1c5e59672cb57..78a94976e732d 100644 --- a/asv_bench/benchmarks/gil.py +++ b/asv_bench/benchmarks/gil.py @@ -1,11 +1,17 @@ from .pandas_vb_common import * -from pandas.core import common as com + +from pandas.core.algorithms import take_1d try: from cStringIO import StringIO except ImportError: from io import StringIO +try: + from pandas._libs import algos +except ImportError: + from pandas import algos + try: from pandas.util.testing import test_parallel @@ -167,11 +173,11 @@ def time_nogil_take1d_float64(self): @test_parallel(num_threads=2) def take_1d_pg2_int64(self): - com.take_1d(self.df.int64.values, self.indexer) + take_1d(self.df.int64.values, self.indexer) @test_parallel(num_threads=2) def take_1d_pg2_float64(self): - com.take_1d(self.df.float64.values, self.indexer) + take_1d(self.df.float64.values, self.indexer) class nogil_take1d_int64(object): @@ -193,11 +199,11 @@ def time_nogil_take1d_int64(self): @test_parallel(num_threads=2) def take_1d_pg2_int64(self): - com.take_1d(self.df.int64.values, self.indexer) + take_1d(self.df.int64.values, self.indexer) @test_parallel(num_threads=2) def take_1d_pg2_float64(self): - com.take_1d(self.df.float64.values, self.indexer) + take_1d(self.df.float64.values, self.indexer) class nogil_kth_smallest(object): @@ -226,7 +232,7 @@ class nogil_datetime_fields(object): def setup(self): self.N = 100000000 - self.dti = pd.date_range('1900-01-01', periods=self.N, freq='D') + self.dti = pd.date_range('1900-01-01', periods=self.N, freq='T') self.period = self.dti.to_period('D') if (not have_real_test_parallel): raise NotImplementedError diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index b8d8e8b7912d7..c0c3a42cc4464 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -331,7 +331,7 @@ def setup(self): def get_test_data(self, ngroups=100, n=100000): self.unique_groups = range(self.ngroups) - self.arr = np.asarray(np.tile(self.unique_groups, (n / self.ngroups)), dtype=object) + self.arr = np.asarray(np.tile(self.unique_groups, int(n / self.ngroups)), dtype=object) if (len(self.arr) < n): self.arr = np.asarray((list(self.arr) + self.unique_groups[:(n - len(self.arr))]), dtype=object) random.shuffle(self.arr) diff --git a/asv_bench/benchmarks/hdfstore_bench.py b/asv_bench/benchmarks/hdfstore_bench.py index 78de5267a2969..dc72f3d548aaf 100644 --- a/asv_bench/benchmarks/hdfstore_bench.py +++ b/asv_bench/benchmarks/hdfstore_bench.py @@ -31,16 +31,12 @@ def setup(self): self.remove(self.f) self.store = HDFStore(self.f) - self.store.put('df1', self.df) - self.store.put('df_mixed', self.df_mixed) - - self.store.append('df5', self.df_mixed) - self.store.append('df7', self.df) - - self.store.append('df9', self.df_wide) - - self.store.append('df11', self.df_wide2) - self.store.append('df12', self.df2) + self.store.put('fixed', self.df) + self.store.put('fixed_mixed', self.df_mixed) + self.store.append('table', self.df2) + self.store.append('table_mixed', self.df_mixed) + self.store.append('table_wide', self.df_wide) + self.store.append('table_wide2', self.df_wide2) def teardown(self): self.store.close() @@ -52,45 +48,47 @@ def remove(self, f): pass def time_read_store(self): - self.store.get('df1') + self.store.get('fixed') def time_read_store_mixed(self): - self.store.get('df_mixed') + self.store.get('fixed_mixed') def time_write_store(self): - self.store.put('df2', self.df) + self.store.put('fixed_write', self.df) def time_write_store_mixed(self): - self.store.put('df_mixed2', self.df_mixed) + self.store.put('fixed_mixed_write', self.df_mixed) def time_read_store_table_mixed(self): - self.store.select('df5') + self.store.select('table_mixed') def time_write_store_table_mixed(self): - self.store.append('df6', self.df_mixed) + self.store.append('table_mixed_write', self.df_mixed) def time_read_store_table(self): - self.store.select('df7') + self.store.select('table') def time_write_store_table(self): - self.store.append('df8', self.df) + self.store.append('table_write', self.df) def time_read_store_table_wide(self): - self.store.select('df9') + self.store.select('table_wide') def time_write_store_table_wide(self): - self.store.append('df10', self.df_wide) + self.store.append('table_wide_write', self.df_wide) def time_write_store_table_dc(self): - self.store.append('df15', self.df, data_columns=True) + self.store.append('table_dc_write', self.df_dc, data_columns=True) def time_query_store_table_wide(self): - self.store.select('df11', [('index', '>', self.df_wide2.index[10000]), - ('index', '<', self.df_wide2.index[15000])]) + start = self.df_wide2.index[10000] + stop = self.df_wide2.index[15000] + self.store.select('table_wide', where="index > start and index < stop") def time_query_store_table(self): - self.store.select('df12', [('index', '>', self.df2.index[10000]), - ('index', '<', self.df2.index[15000])]) + start = self.df2.index[10000] + stop = self.df2.index[15000] + self.store.select('table', where="index > start and index < stop") class HDF5Panel(object): diff --git a/asv_bench/benchmarks/inference.py b/asv_bench/benchmarks/inference.py index 3635438a7f76b..dc1d6de73f8ae 100644 --- a/asv_bench/benchmarks/inference.py +++ b/asv_bench/benchmarks/inference.py @@ -113,5 +113,5 @@ def setup(self): self.na_values = set() def time_convert(self): - pd.lib.maybe_convert_numeric(self.data, self.na_values, - coerce_numeric=False) + lib.maybe_convert_numeric(self.data, self.na_values, + coerce_numeric=False) diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 776316343e009..3b0e33b72ddc1 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -314,12 +314,12 @@ def setup(self): self.df1 = pd.DataFrame( {'time': np.random.randint(0, one_count / 20, one_count), - 'key': np.random.choice(list(string.uppercase), one_count), + 'key': np.random.choice(list(string.ascii_uppercase), one_count), 'key2': np.random.randint(0, 25, one_count), 'value1': np.random.randn(one_count)}) self.df2 = pd.DataFrame( {'time': np.random.randint(0, two_count / 20, two_count), - 'key': np.random.choice(list(string.uppercase), two_count), + 'key': np.random.choice(list(string.ascii_uppercase), two_count), 'key2': np.random.randint(0, 25, two_count), 'value2': np.random.randn(two_count)}) diff --git a/asv_bench/benchmarks/packers.py b/asv_bench/benchmarks/packers.py index cd43e305ead8f..24f80cc836dd4 100644 --- a/asv_bench/benchmarks/packers.py +++ b/asv_bench/benchmarks/packers.py @@ -153,18 +153,20 @@ def time_packers_read_stata_with_validation(self): class packers_read_sas(_Packers): def setup(self): - self.f = os.path.join(os.path.dirname(__file__), '..', '..', - 'pandas', 'io', 'tests', 'sas', 'data', - 'test1.sas7bdat') - self.f2 = os.path.join(os.path.dirname(__file__), '..', '..', - 'pandas', 'io', 'tests', 'sas', 'data', - 'paxraw_d_short.xpt') + + testdir = os.path.join(os.path.dirname(__file__), '..', '..', + 'pandas', 'tests', 'io', 'sas') + if not os.path.exists(testdir): + testdir = os.path.join(os.path.dirname(__file__), '..', '..', + 'pandas', 'io', 'tests', 'sas') + self.f = os.path.join(testdir, 'data', 'test1.sas7bdat') + self.f2 = os.path.join(testdir, 'data', 'paxraw_d_short.xpt') def time_read_sas7bdat(self): pd.read_sas(self.f, format='sas7bdat') def time_read_xport(self): - pd.read_sas(self.f, format='xport') + pd.read_sas(self.f2, format='xport') class CSV(_Packers): diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index 56ccc94c414fb..b1a58e49fe86c 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -1,9 +1,7 @@ from pandas import * import pandas as pd -from datetime import timedelta from numpy.random import randn from numpy.random import randint -from numpy.random import permutation import pandas.util.testing as tm import random import numpy as np @@ -18,7 +16,7 @@ np.random.seed(1234) # try em until it works! -for imp in ['pandas_tseries', 'pandas.lib', 'pandas._libs.lib']: +for imp in ['pandas._libs.lib', 'pandas.lib', 'pandas_tseries']: try: lib = import_module(imp) break diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py index faedce6c574ec..cc6071b054662 100644 --- a/asv_bench/benchmarks/panel_ctor.py +++ b/asv_bench/benchmarks/panel_ctor.py @@ -1,4 +1,5 @@ from .pandas_vb_common import * +from datetime import timedelta class Constructors1(object): @@ -24,7 +25,7 @@ class Constructors2(object): def setup(self): self.data_frames = {} for x in range(100): - self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq=datetools.Day(1))) + self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq='D')) self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr) self.data_frames[x] = self.df @@ -36,7 +37,7 @@ class Constructors3(object): goal_time = 0.2 def setup(self): - self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq=datetools.Day(1))) + self.dr = np.asarray(DatetimeIndex(start=datetime(1990, 1, 1), end=datetime(2012, 1, 1), freq='D')) self.data_frames = {} for x in range(100): self.df = DataFrame({'a': ([0] * len(self.dr)), 'b': ([1] * len(self.dr)), 'c': ([2] * len(self.dr)), }, index=self.dr) diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py index 66b8af53801ac..63562f90eab2b 100644 --- a/asv_bench/benchmarks/replace.py +++ b/asv_bench/benchmarks/replace.py @@ -1,6 +1,4 @@ from .pandas_vb_common import * -from pandas.compat import range -from datetime import timedelta class replace_fillna(object): diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index b63b3386a7563..f5ea4d7875931 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -4,7 +4,6 @@ from pandas.tseries.converter import DatetimeConverter from .pandas_vb_common import * import pandas as pd -from datetime import timedelta import datetime as dt try: import pandas.tseries.holiday @@ -57,7 +56,7 @@ def setup(self): self.a = self.rng7[:50000].append(self.rng7[50002:]) def time_add_timedelta(self): - (self.rng + timedelta(minutes=2)) + (self.rng + dt.timedelta(minutes=2)) def time_add_offset_delta(self): (self.rng + self.delta_offset) From 90dd3f94cf120f628c086fdd965fc82878951bd9 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Mon, 17 Apr 2017 11:20:53 +0000 Subject: [PATCH 43/56] TST: reduce amount of nesting in tests, specifically move core routines up higher (#16030) --- pandas/tests/{core => computation}/__init__.py | 0 pandas/tests/{core => }/computation/test_compat.py | 0 pandas/tests/{core => }/computation/test_eval.py | 0 pandas/tests/core/sparse/__init__.py | 0 pandas/tests/{core/computation => dtypes}/__init__.py | 0 pandas/tests/{core => }/dtypes/test_cast.py | 0 pandas/tests/{core => }/dtypes/test_common.py | 0 pandas/tests/{core => }/dtypes/test_concat.py | 0 pandas/tests/{core => }/dtypes/test_dtypes.py | 0 pandas/tests/{core => }/dtypes/test_generic.py | 0 pandas/tests/{core => }/dtypes/test_inference.py | 0 pandas/tests/{core => }/dtypes/test_io.py | 0 pandas/tests/{core => }/dtypes/test_missing.py | 0 pandas/tests/{core/dtypes => sparse}/__init__.py | 0 pandas/tests/{core => }/sparse/common.py | 0 pandas/tests/{core => }/sparse/test_arithmetics.py | 0 pandas/tests/{core => }/sparse/test_array.py | 0 pandas/tests/{core => }/sparse/test_combine_concat.py | 0 pandas/tests/{core => }/sparse/test_format.py | 0 pandas/tests/{core => }/sparse/test_frame.py | 0 pandas/tests/{core => }/sparse/test_groupby.py | 0 pandas/tests/{core => }/sparse/test_indexing.py | 0 pandas/tests/{core => }/sparse/test_libsparse.py | 0 pandas/tests/{core => }/sparse/test_list.py | 0 pandas/tests/{core => }/sparse/test_pivot.py | 0 pandas/tests/{core => }/sparse/test_series.py | 0 setup.py | 6 +++--- 27 files changed, 3 insertions(+), 3 deletions(-) rename pandas/tests/{core => computation}/__init__.py (100%) rename pandas/tests/{core => }/computation/test_compat.py (100%) rename pandas/tests/{core => }/computation/test_eval.py (100%) delete mode 100644 pandas/tests/core/sparse/__init__.py rename pandas/tests/{core/computation => dtypes}/__init__.py (100%) rename pandas/tests/{core => }/dtypes/test_cast.py (100%) rename pandas/tests/{core => }/dtypes/test_common.py (100%) rename pandas/tests/{core => }/dtypes/test_concat.py (100%) rename pandas/tests/{core => }/dtypes/test_dtypes.py (100%) rename pandas/tests/{core => }/dtypes/test_generic.py (100%) rename pandas/tests/{core => }/dtypes/test_inference.py (100%) rename pandas/tests/{core => }/dtypes/test_io.py (100%) rename pandas/tests/{core => }/dtypes/test_missing.py (100%) rename pandas/tests/{core/dtypes => sparse}/__init__.py (100%) rename pandas/tests/{core => }/sparse/common.py (100%) rename pandas/tests/{core => }/sparse/test_arithmetics.py (100%) rename pandas/tests/{core => }/sparse/test_array.py (100%) rename pandas/tests/{core => }/sparse/test_combine_concat.py (100%) rename pandas/tests/{core => }/sparse/test_format.py (100%) rename pandas/tests/{core => }/sparse/test_frame.py (100%) rename pandas/tests/{core => }/sparse/test_groupby.py (100%) rename pandas/tests/{core => }/sparse/test_indexing.py (100%) rename pandas/tests/{core => }/sparse/test_libsparse.py (100%) rename pandas/tests/{core => }/sparse/test_list.py (100%) rename pandas/tests/{core => }/sparse/test_pivot.py (100%) rename pandas/tests/{core => }/sparse/test_series.py (100%) diff --git a/pandas/tests/core/__init__.py b/pandas/tests/computation/__init__.py similarity index 100% rename from pandas/tests/core/__init__.py rename to pandas/tests/computation/__init__.py diff --git a/pandas/tests/core/computation/test_compat.py b/pandas/tests/computation/test_compat.py similarity index 100% rename from pandas/tests/core/computation/test_compat.py rename to pandas/tests/computation/test_compat.py diff --git a/pandas/tests/core/computation/test_eval.py b/pandas/tests/computation/test_eval.py similarity index 100% rename from pandas/tests/core/computation/test_eval.py rename to pandas/tests/computation/test_eval.py diff --git a/pandas/tests/core/sparse/__init__.py b/pandas/tests/core/sparse/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/tests/core/computation/__init__.py b/pandas/tests/dtypes/__init__.py similarity index 100% rename from pandas/tests/core/computation/__init__.py rename to pandas/tests/dtypes/__init__.py diff --git a/pandas/tests/core/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py similarity index 100% rename from pandas/tests/core/dtypes/test_cast.py rename to pandas/tests/dtypes/test_cast.py diff --git a/pandas/tests/core/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py similarity index 100% rename from pandas/tests/core/dtypes/test_common.py rename to pandas/tests/dtypes/test_common.py diff --git a/pandas/tests/core/dtypes/test_concat.py b/pandas/tests/dtypes/test_concat.py similarity index 100% rename from pandas/tests/core/dtypes/test_concat.py rename to pandas/tests/dtypes/test_concat.py diff --git a/pandas/tests/core/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py similarity index 100% rename from pandas/tests/core/dtypes/test_dtypes.py rename to pandas/tests/dtypes/test_dtypes.py diff --git a/pandas/tests/core/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py similarity index 100% rename from pandas/tests/core/dtypes/test_generic.py rename to pandas/tests/dtypes/test_generic.py diff --git a/pandas/tests/core/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py similarity index 100% rename from pandas/tests/core/dtypes/test_inference.py rename to pandas/tests/dtypes/test_inference.py diff --git a/pandas/tests/core/dtypes/test_io.py b/pandas/tests/dtypes/test_io.py similarity index 100% rename from pandas/tests/core/dtypes/test_io.py rename to pandas/tests/dtypes/test_io.py diff --git a/pandas/tests/core/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py similarity index 100% rename from pandas/tests/core/dtypes/test_missing.py rename to pandas/tests/dtypes/test_missing.py diff --git a/pandas/tests/core/dtypes/__init__.py b/pandas/tests/sparse/__init__.py similarity index 100% rename from pandas/tests/core/dtypes/__init__.py rename to pandas/tests/sparse/__init__.py diff --git a/pandas/tests/core/sparse/common.py b/pandas/tests/sparse/common.py similarity index 100% rename from pandas/tests/core/sparse/common.py rename to pandas/tests/sparse/common.py diff --git a/pandas/tests/core/sparse/test_arithmetics.py b/pandas/tests/sparse/test_arithmetics.py similarity index 100% rename from pandas/tests/core/sparse/test_arithmetics.py rename to pandas/tests/sparse/test_arithmetics.py diff --git a/pandas/tests/core/sparse/test_array.py b/pandas/tests/sparse/test_array.py similarity index 100% rename from pandas/tests/core/sparse/test_array.py rename to pandas/tests/sparse/test_array.py diff --git a/pandas/tests/core/sparse/test_combine_concat.py b/pandas/tests/sparse/test_combine_concat.py similarity index 100% rename from pandas/tests/core/sparse/test_combine_concat.py rename to pandas/tests/sparse/test_combine_concat.py diff --git a/pandas/tests/core/sparse/test_format.py b/pandas/tests/sparse/test_format.py similarity index 100% rename from pandas/tests/core/sparse/test_format.py rename to pandas/tests/sparse/test_format.py diff --git a/pandas/tests/core/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py similarity index 100% rename from pandas/tests/core/sparse/test_frame.py rename to pandas/tests/sparse/test_frame.py diff --git a/pandas/tests/core/sparse/test_groupby.py b/pandas/tests/sparse/test_groupby.py similarity index 100% rename from pandas/tests/core/sparse/test_groupby.py rename to pandas/tests/sparse/test_groupby.py diff --git a/pandas/tests/core/sparse/test_indexing.py b/pandas/tests/sparse/test_indexing.py similarity index 100% rename from pandas/tests/core/sparse/test_indexing.py rename to pandas/tests/sparse/test_indexing.py diff --git a/pandas/tests/core/sparse/test_libsparse.py b/pandas/tests/sparse/test_libsparse.py similarity index 100% rename from pandas/tests/core/sparse/test_libsparse.py rename to pandas/tests/sparse/test_libsparse.py diff --git a/pandas/tests/core/sparse/test_list.py b/pandas/tests/sparse/test_list.py similarity index 100% rename from pandas/tests/core/sparse/test_list.py rename to pandas/tests/sparse/test_list.py diff --git a/pandas/tests/core/sparse/test_pivot.py b/pandas/tests/sparse/test_pivot.py similarity index 100% rename from pandas/tests/core/sparse/test_pivot.py rename to pandas/tests/sparse/test_pivot.py diff --git a/pandas/tests/core/sparse/test_series.py b/pandas/tests/sparse/test_series.py similarity index 100% rename from pandas/tests/core/sparse/test_series.py rename to pandas/tests/sparse/test_series.py diff --git a/setup.py b/setup.py index 5e474153d0ee1..a1ec567a20ee2 100755 --- a/setup.py +++ b/setup.py @@ -655,9 +655,9 @@ def pxd(name): 'pandas.util', 'pandas.tests', 'pandas.tests.api', - 'pandas.tests.core.dtypes', - 'pandas.tests.core.computation', - 'pandas.tests.core.sparse', + 'pandas.tests.dtypes', + 'pandas.tests.computation', + 'pandas.tests.sparse', 'pandas.tests.frame', 'pandas.tests.indexes', 'pandas.tests.indexes.datetimes', From 89bd26871727615aeef63be6243ced4ed5501f26 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Mon, 17 Apr 2017 08:37:23 -0400 Subject: [PATCH 44/56] DOC: fix Styler import in api docs --- doc/source/api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/api.rst b/doc/source/api.rst index d0f548cc3d0b1..868f0d7f9c962 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1821,7 +1821,7 @@ Computations / Descriptive Stats Style ----- -.. currentmodule:: pandas.formats.style +.. currentmodule:: pandas.io.formats.style ``Styler`` objects are returned by :attr:`pandas.DataFrame.style`. From 1a117fc52bd07bba3e42121cf900d16c7184e622 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Mon, 17 Apr 2017 14:10:27 +0000 Subject: [PATCH 45/56] CLN: move pandas.indexes -> pandas.core.indexes (#16031) --- doc/source/whatsnew/v0.20.0.txt | 1 + pandas/compat/pickle_compat.py | 16 +++++++++++++--- pandas/core/api.py | 2 +- pandas/core/categorical.py | 2 +- pandas/core/index.py | 4 ++-- pandas/{ => core}/indexes/__init__.py | 0 pandas/{ => core}/indexes/api.py | 12 ++++++------ pandas/{ => core}/indexes/base.py | 4 ++-- pandas/{ => core}/indexes/category.py | 4 ++-- pandas/{ => core}/indexes/frozen.py | 0 pandas/{ => core}/indexes/interval.py | 14 ++++++++------ pandas/{ => core}/indexes/multi.py | 12 +++++++----- pandas/{ => core}/indexes/numeric.py | 5 +++-- pandas/{ => core}/indexes/range.py | 6 +++--- pandas/core/sparse/array.py | 2 +- pandas/tests/api/test_api.py | 3 +-- pandas/tests/indexes/common.py | 2 +- pandas/tests/indexes/test_base.py | 2 +- pandas/tests/indexes/test_category.py | 2 +- pandas/tests/indexes/test_frozen.py | 2 +- pandas/tests/indexes/test_multi.py | 2 +- pandas/tseries/base.py | 4 ++-- pandas/tseries/index.py | 2 +- pandas/tseries/period.py | 4 ++-- pandas/tseries/tdi.py | 2 +- setup.py | 2 +- 26 files changed, 63 insertions(+), 48 deletions(-) rename pandas/{ => core}/indexes/__init__.py (100%) rename pandas/{ => core}/indexes/api.py (90%) rename pandas/{ => core}/indexes/base.py (99%) rename pandas/{ => core}/indexes/category.py (99%) rename pandas/{ => core}/indexes/frozen.py (100%) rename pandas/{ => core}/indexes/interval.py (99%) rename pandas/{ => core}/indexes/multi.py (99%) rename pandas/{ => core}/indexes/numeric.py (99%) rename pandas/{ => core}/indexes/range.py (99%) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index de33b7d4e3371..9df82b8ac7338 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1344,6 +1344,7 @@ If indicated, a deprecation warning will be issued if you reference theses modul "pandas.index", "pandas._libs.index", "" "pandas.algos", "pandas._libs.algos", "" "pandas.hashtable", "pandas._libs.hashtable", "" + "pandas.indexes", "pandas.core.indexes", "" "pandas.json", "pandas.io.json.libjson", "X" "pandas.parser", "pandas.io.libparsers", "X" "pandas.formats", "pandas.io.formats", "" diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index e977fdc3a267d..f7d451ce7c92f 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -59,9 +59,9 @@ def load_reduce(self): # 15477 ('pandas.core.base', 'FrozenNDArray'): - ('pandas.indexes.frozen', 'FrozenNDArray'), + ('pandas.core.indexes.frozen', 'FrozenNDArray'), ('pandas.core.base', 'FrozenList'): - ('pandas.indexes.frozen', 'FrozenList'), + ('pandas.core.indexes.frozen', 'FrozenList'), # 10890 ('pandas.core.series', 'TimeSeries'): @@ -84,7 +84,17 @@ def load_reduce(self): ('pandas.sparse.series', 'SparseSeries'): ('pandas.core.sparse.series', 'SparseSeries'), ('pandas.sparse.frame', 'SparseDataFrame'): - ('pandas.core.sparse.frame', 'SparseDataFrame') + ('pandas.core.sparse.frame', 'SparseDataFrame'), + ('pandas.indexes.base', '_new_Index'): + ('pandas.core.indexes.base', '_new_Index'), + ('pandas.indexes.base', 'Index'): + ('pandas.core.indexes.base', 'Index'), + ('pandas.indexes.numeric', 'Int64Index'): + ('pandas.core.indexes.numeric', 'Int64Index'), + ('pandas.indexes.range', 'RangeIndex'): + ('pandas.core.indexes.range', 'RangeIndex'), + ('pandas.indexes.multi', 'MultiIndex'): + ('pandas.core.indexes.multi', 'MultiIndex') } diff --git a/pandas/core/api.py b/pandas/core/api.py index 3c739d85d0074..865fe367873d8 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -12,7 +12,7 @@ from pandas.core.index import (Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, MultiIndex, IntervalIndex) -from pandas.indexes.interval import Interval, interval_range +from pandas.core.indexes.interval import Interval, interval_range from pandas.core.series import Series from pandas.core.frame import DataFrame diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 50181486d8cf7..a12cec33fb350 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -2128,7 +2128,7 @@ def _factorize_from_iterable(values): If `values` has a categorical dtype, then `categories` is a CategoricalIndex keeping the categories and order of `values`. """ - from pandas.indexes.category import CategoricalIndex + from pandas.core.indexes.category import CategoricalIndex if not is_list_like(values): raise TypeError("Input must be list-like") diff --git a/pandas/core/index.py b/pandas/core/index.py index 05f98d59a1f56..2d1c22f5623a1 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1,3 +1,3 @@ # flake8: noqa -from pandas.indexes.api import * -from pandas.indexes.multi import _sparsify +from pandas.core.indexes.api import * +from pandas.core.indexes.multi import _sparsify diff --git a/pandas/indexes/__init__.py b/pandas/core/indexes/__init__.py similarity index 100% rename from pandas/indexes/__init__.py rename to pandas/core/indexes/__init__.py diff --git a/pandas/indexes/api.py b/pandas/core/indexes/api.py similarity index 90% rename from pandas/indexes/api.py rename to pandas/core/indexes/api.py index db076b60ab34e..d40f6da4c4ee5 100644 --- a/pandas/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -1,12 +1,12 @@ -from pandas.indexes.base import (Index, _new_Index, # noqa +from pandas.core.indexes.base import (Index, _new_Index, # noqa _ensure_index, _get_na_value, InvalidIndexError) -from pandas.indexes.category import CategoricalIndex # noqa -from pandas.indexes.multi import MultiIndex # noqa -from pandas.indexes.interval import IntervalIndex # noqa -from pandas.indexes.numeric import (NumericIndex, Float64Index, # noqa +from pandas.core.indexes.category import CategoricalIndex # noqa +from pandas.core.indexes.multi import MultiIndex # noqa +from pandas.core.indexes.interval import IntervalIndex # noqa +from pandas.core.indexes.numeric import (NumericIndex, Float64Index, # noqa Int64Index, UInt64Index) -from pandas.indexes.range import RangeIndex # noqa +from pandas.core.indexes.range import RangeIndex # noqa import pandas.core.common as com import pandas._libs.lib as lib diff --git a/pandas/indexes/base.py b/pandas/core/indexes/base.py similarity index 99% rename from pandas/indexes/base.py rename to pandas/core/indexes/base.py index d88e54dcc9521..5149d45514e2e 100644 --- a/pandas/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -43,7 +43,7 @@ import pandas.core.base as base from pandas.util.decorators import (Appender, Substitution, cache_readonly, deprecate, deprecate_kwarg) -from pandas.indexes.frozen import FrozenList +from pandas.core.indexes.frozen import FrozenList import pandas.core.common as com import pandas.core.dtypes.concat as _concat import pandas.core.missing as missing @@ -1675,7 +1675,7 @@ def append(self, other): if self.is_categorical(): # if calling index is category, don't check dtype of others - from pandas.indexes.category import CategoricalIndex + from pandas.core.indexes.category import CategoricalIndex return CategoricalIndex._append_same_dtype(self, to_concat, name) typs = _concat.get_dtype_kinds(to_concat) diff --git a/pandas/indexes/category.py b/pandas/core/indexes/category.py similarity index 99% rename from pandas/indexes/category.py rename to pandas/core/indexes/category.py index 5f9d106189767..257ca86947f2b 100644 --- a/pandas/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -16,10 +16,10 @@ from pandas.util.decorators import Appender, cache_readonly from pandas.core.config import get_option -from pandas.indexes.base import Index, _index_shared_docs +from pandas.core.indexes.base import Index, _index_shared_docs import pandas.core.base as base import pandas.core.missing as missing -import pandas.indexes.base as ibase +import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update(dict(target_klass='CategoricalIndex')) diff --git a/pandas/indexes/frozen.py b/pandas/core/indexes/frozen.py similarity index 100% rename from pandas/indexes/frozen.py rename to pandas/core/indexes/frozen.py diff --git a/pandas/indexes/interval.py b/pandas/core/indexes/interval.py similarity index 99% rename from pandas/indexes/interval.py rename to pandas/core/indexes/interval.py index 6f68e67d702fe..f14e7bf6bd183 100644 --- a/pandas/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -16,20 +16,22 @@ is_interval_dtype, is_scalar, is_integer) -from pandas.indexes.base import (Index, _ensure_index, - default_pprint, _index_shared_docs) +from pandas.core.indexes.base import ( + Index, _ensure_index, + default_pprint, _index_shared_docs) from pandas._libs import Timestamp, Timedelta -from pandas._libs.interval import (Interval, IntervalMixin, IntervalTree, - intervals_to_interval_bounds) +from pandas._libs.interval import ( + Interval, IntervalMixin, IntervalTree, + intervals_to_interval_bounds) -from pandas.indexes.multi import MultiIndex +from pandas.core.indexes.multi import MultiIndex from pandas.compat.numpy import function as nv from pandas.core import common as com from pandas.util.decorators import cache_readonly, Appender from pandas.core.config import get_option -import pandas.indexes.base as ibase +import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update( dict(klass='IntervalIndex', diff --git a/pandas/indexes/multi.py b/pandas/core/indexes/multi.py similarity index 99% rename from pandas/indexes/multi.py rename to pandas/core/indexes/multi.py index b341bfe7b5215..40e7118ca0f6a 100644 --- a/pandas/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -35,11 +35,13 @@ from pandas.core.config import get_option -from pandas.indexes.base import (Index, _ensure_index, - _get_na_value, InvalidIndexError, - _index_shared_docs) -from pandas.indexes.frozen import FrozenNDArray, FrozenList, _ensure_frozen -import pandas.indexes.base as ibase +from pandas.core.indexes.base import ( + Index, _ensure_index, + _get_na_value, InvalidIndexError, + _index_shared_docs) +from pandas.core.indexes.frozen import ( + FrozenNDArray, FrozenList, _ensure_frozen) +import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update( dict(klass='MultiIndex', diff --git a/pandas/indexes/numeric.py b/pandas/core/indexes/numeric.py similarity index 99% rename from pandas/indexes/numeric.py rename to pandas/core/indexes/numeric.py index 6b9999239cd88..21ba2a386d96a 100644 --- a/pandas/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -9,9 +9,10 @@ from pandas import compat from pandas.core import algorithms -from pandas.indexes.base import Index, InvalidIndexError, _index_shared_docs +from pandas.core.indexes.base import ( + Index, InvalidIndexError, _index_shared_docs) from pandas.util.decorators import Appender, cache_readonly -import pandas.indexes.base as ibase +import pandas.core.indexes.base as ibase _num_index_shared_docs = dict() diff --git a/pandas/indexes/range.py b/pandas/core/indexes/range.py similarity index 99% rename from pandas/indexes/range.py rename to pandas/core/indexes/range.py index 1eedfcc619aec..acd040693af2e 100644 --- a/pandas/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -12,11 +12,11 @@ from pandas import compat from pandas.compat import lrange, range from pandas.compat.numpy import function as nv -from pandas.indexes.base import Index, _index_shared_docs +from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util.decorators import Appender, cache_readonly -import pandas.indexes.base as ibase +import pandas.core.indexes.base as ibase -from pandas.indexes.numeric import Int64Index +from pandas.core.indexes.numeric import Int64Index class RangeIndex(Int64Index): diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index d3fdfe5533a03..ef3600266c037 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -36,7 +36,7 @@ import pandas.core.ops as ops import pandas.io.formats.printing as printing from pandas.util.decorators import Appender -from pandas.indexes.base import _index_shared_docs +from pandas.core.indexes.base import _index_shared_docs _sparray_doc_kwargs = dict(klass='SparseArray') diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index ec9e6039c6ee4..13e6d065382a6 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -30,8 +30,7 @@ class TestPDApi(Base, tm.TestCase): ignored = ['tests', 'locale', 'conftest'] # top-level sub-packages - lib = ['api', 'compat', 'core', - 'indexes', 'errors', 'pandas', + lib = ['api', 'compat', 'core', 'errors', 'pandas', 'plotting', 'test', 'testing', 'tools', 'tseries', 'util', 'options', 'io'] diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index bec55083829b6..9003a3707e417 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -419,7 +419,7 @@ def test_numpy_argsort(self): # pandas compatibility input validation - the # rest already perform separate (or no) such # validation via their 'values' attribute as - # defined in pandas.indexes/base.py - they + # defined in pandas.core.indexes/base.py - they # cannot be changed at the moment due to # backwards compatibility concerns if isinstance(type(ind), (CategoricalIndex, RangeIndex)): diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 165ad91086d0a..de15abe89712a 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -3,7 +3,7 @@ from datetime import datetime, timedelta import pandas.util.testing as tm -from pandas.indexes.api import Index, MultiIndex +from pandas.core.indexes.api import Index, MultiIndex from pandas.tests.indexes.common import Base from pandas.compat import (range, lrange, lzip, u, diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index e714bbd4f9d44..6e869890bfcd6 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import pandas.util.testing as tm -from pandas.indexes.api import Index, CategoricalIndex +from pandas.core.indexes.api import Index, CategoricalIndex from .common import Base from pandas.compat import range, PY3 diff --git a/pandas/tests/indexes/test_frozen.py b/pandas/tests/indexes/test_frozen.py index cb90beb6a5bfb..ed2e3d94aa4a4 100644 --- a/pandas/tests/indexes/test_frozen.py +++ b/pandas/tests/indexes/test_frozen.py @@ -1,7 +1,7 @@ import numpy as np from pandas.util import testing as tm from pandas.tests.test_base import CheckImmutable, CheckStringMixin -from pandas.indexes.frozen import FrozenList, FrozenNDArray +from pandas.core.indexes.frozen import FrozenList, FrozenNDArray from pandas.compat import u diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index f907741950b01..d45182d8d82c5 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -16,7 +16,7 @@ compat, date_range, period_range) from pandas.compat import PY3, long, lrange, lzip, range, u from pandas.errors import PerformanceWarning, UnsortedIndexError -from pandas.indexes.base import InvalidIndexError +from pandas.core.indexes.base import InvalidIndexError from pandas._libs import lib from pandas._libs.lib import Timestamp diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py index b419aae709683..3daa88fe396f6 100644 --- a/pandas/tseries/base.py +++ b/pandas/tseries/base.py @@ -28,12 +28,12 @@ from pandas._libs.period import Period from pandas.core.index import Index -from pandas.indexes.base import _index_shared_docs +from pandas.core.indexes.base import _index_shared_docs from pandas.util.decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat import pandas.tseries.frequencies as frequencies -import pandas.indexes.base as ibase +import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index a964b6d9e09d3..d9aa72fe065ab 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -30,7 +30,7 @@ from pandas.core.common import _values_from_object, _maybe_box from pandas.core.index import Index, Int64Index, Float64Index -from pandas.indexes.base import _index_shared_docs +from pandas.core.indexes.base import _index_shared_docs import pandas.compat as compat from pandas.tseries.frequencies import ( to_offset, get_period_alias, diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 66275925ff355..b19e086b818f0 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -37,14 +37,14 @@ _quarter_to_myear) from pandas.core.base import _shared_docs -from pandas.indexes.base import _index_shared_docs, _ensure_index +from pandas.core.indexes.base import _index_shared_docs, _ensure_index from pandas import compat from pandas.util.decorators import (Appender, Substitution, cache_readonly, deprecate_kwarg) from pandas.compat import zip, u -import pandas.indexes.base as ibase +import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update( dict(target_klass='PeriodIndex or list of Periods')) diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py index 020b7328238b7..7768b4a340775 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/tseries/tdi.py @@ -23,7 +23,7 @@ from pandas.tseries.frequencies import to_offset from pandas.core.algorithms import checked_add_with_arr from pandas.core.base import _shared_docs -from pandas.indexes.base import _index_shared_docs +from pandas.core.indexes.base import _index_shared_docs import pandas.core.common as com import pandas.core.dtypes.concat as _concat from pandas.util.decorators import Appender, Substitution, deprecate_kwarg diff --git a/setup.py b/setup.py index a1ec567a20ee2..6fc66e2355c0f 100755 --- a/setup.py +++ b/setup.py @@ -640,9 +640,9 @@ def pxd(name): 'pandas.compat.numpy', 'pandas.core', 'pandas.core.dtypes', + 'pandas.core.indexes', 'pandas.core.computation', 'pandas.core.sparse', - 'pandas.indexes', 'pandas.errors', 'pandas.io', 'pandas.io.json', From 1a094376680368e9d4e5dfa6c900e99f6291ca41 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Mon, 17 Apr 2017 22:27:06 +0200 Subject: [PATCH 46/56] CLN: remove old vb_suite files (now asv_bench) (#16034) --- test_perf.sh | 5 - vb_suite/.gitignore | 4 - vb_suite/attrs_caching.py | 20 - vb_suite/binary_ops.py | 199 ------ vb_suite/categoricals.py | 16 - vb_suite/ctors.py | 39 -- vb_suite/eval.py | 150 ----- vb_suite/frame_ctor.py | 123 ---- vb_suite/frame_methods.py | 525 --------------- vb_suite/generate_rst_files.py | 2 - vb_suite/gil.py | 110 ---- vb_suite/groupby.py | 620 ------------------ vb_suite/hdfstore_bench.py | 278 -------- vb_suite/index_object.py | 173 ----- vb_suite/indexing.py | 292 --------- vb_suite/inference.py | 36 - vb_suite/io_bench.py | 150 ----- vb_suite/io_sql.py | 126 ---- vb_suite/join_merge.py | 270 -------- vb_suite/make.py | 167 ----- vb_suite/measure_memory_consumption.py | 55 -- vb_suite/miscellaneous.py | 32 - vb_suite/packers.py | 252 ------- vb_suite/pandas_vb_common.py | 30 - vb_suite/panel_ctor.py | 76 --- vb_suite/panel_methods.py | 28 - vb_suite/parser_vb.py | 112 ---- vb_suite/perf_HEAD.py | 243 ------- vb_suite/plotting.py | 25 - vb_suite/reindex.py | 225 ------- vb_suite/replace.py | 36 - vb_suite/reshape.py | 65 -- vb_suite/run_suite.py | 15 - vb_suite/series_methods.py | 39 -- vb_suite/source/_static/stub | 0 vb_suite/source/conf.py | 225 ------- vb_suite/source/themes/agogo/layout.html | 95 --- .../source/themes/agogo/static/agogo.css_t | 476 -------------- .../source/themes/agogo/static/bgfooter.png | Bin 434 -> 0 bytes vb_suite/source/themes/agogo/static/bgtop.png | Bin 430 -> 0 bytes vb_suite/source/themes/agogo/theme.conf | 19 - vb_suite/sparse.py | 65 -- vb_suite/stat_ops.py | 126 ---- vb_suite/strings.py | 59 -- vb_suite/suite.py | 164 ----- vb_suite/test.py | 67 -- vb_suite/test_perf.py | 616 ----------------- vb_suite/timedelta.py | 32 - vb_suite/timeseries.py | 445 ------------- 49 files changed, 6927 deletions(-) delete mode 100755 test_perf.sh delete mode 100644 vb_suite/.gitignore delete mode 100644 vb_suite/attrs_caching.py delete mode 100644 vb_suite/binary_ops.py delete mode 100644 vb_suite/categoricals.py delete mode 100644 vb_suite/ctors.py delete mode 100644 vb_suite/eval.py delete mode 100644 vb_suite/frame_ctor.py delete mode 100644 vb_suite/frame_methods.py delete mode 100644 vb_suite/generate_rst_files.py delete mode 100644 vb_suite/gil.py delete mode 100644 vb_suite/groupby.py delete mode 100644 vb_suite/hdfstore_bench.py delete mode 100644 vb_suite/index_object.py delete mode 100644 vb_suite/indexing.py delete mode 100644 vb_suite/inference.py delete mode 100644 vb_suite/io_bench.py delete mode 100644 vb_suite/io_sql.py delete mode 100644 vb_suite/join_merge.py delete mode 100755 vb_suite/make.py delete mode 100755 vb_suite/measure_memory_consumption.py delete mode 100644 vb_suite/miscellaneous.py delete mode 100644 vb_suite/packers.py delete mode 100644 vb_suite/pandas_vb_common.py delete mode 100644 vb_suite/panel_ctor.py delete mode 100644 vb_suite/panel_methods.py delete mode 100644 vb_suite/parser_vb.py delete mode 100755 vb_suite/perf_HEAD.py delete mode 100644 vb_suite/plotting.py delete mode 100644 vb_suite/reindex.py delete mode 100644 vb_suite/replace.py delete mode 100644 vb_suite/reshape.py delete mode 100755 vb_suite/run_suite.py delete mode 100644 vb_suite/series_methods.py delete mode 100644 vb_suite/source/_static/stub delete mode 100644 vb_suite/source/conf.py delete mode 100644 vb_suite/source/themes/agogo/layout.html delete mode 100644 vb_suite/source/themes/agogo/static/agogo.css_t delete mode 100644 vb_suite/source/themes/agogo/static/bgfooter.png delete mode 100644 vb_suite/source/themes/agogo/static/bgtop.png delete mode 100644 vb_suite/source/themes/agogo/theme.conf delete mode 100644 vb_suite/sparse.py delete mode 100644 vb_suite/stat_ops.py delete mode 100644 vb_suite/strings.py delete mode 100644 vb_suite/suite.py delete mode 100644 vb_suite/test.py delete mode 100755 vb_suite/test_perf.py delete mode 100644 vb_suite/timedelta.py delete mode 100644 vb_suite/timeseries.py diff --git a/test_perf.sh b/test_perf.sh deleted file mode 100755 index 022de25bca8fc..0000000000000 --- a/test_perf.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -CURDIR=$(pwd) -BASEDIR=$(cd "$(dirname "$0")"; pwd) -python "$BASEDIR"/vb_suite/test_perf.py $@ diff --git a/vb_suite/.gitignore b/vb_suite/.gitignore deleted file mode 100644 index cc110f04e1225..0000000000000 --- a/vb_suite/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -benchmarks.db -build/* -source/vbench/* -source/*.rst \ No newline at end of file diff --git a/vb_suite/attrs_caching.py b/vb_suite/attrs_caching.py deleted file mode 100644 index a7e3ed7094ed6..0000000000000 --- a/vb_suite/attrs_caching.py +++ /dev/null @@ -1,20 +0,0 @@ -from vbench.benchmark import Benchmark - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# DataFrame.index / columns property lookup time - -setup = common_setup + """ -df = DataFrame(np.random.randn(10, 6)) -cur_index = df.index -""" -stmt = "foo = df.index" - -getattr_dataframe_index = Benchmark(stmt, setup, - name="getattr_dataframe_index") - -stmt = "df.index = cur_index" -setattr_dataframe_index = Benchmark(stmt, setup, - name="setattr_dataframe_index") diff --git a/vb_suite/binary_ops.py b/vb_suite/binary_ops.py deleted file mode 100644 index edc29bf3eec37..0000000000000 --- a/vb_suite/binary_ops.py +++ /dev/null @@ -1,199 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -SECTION = 'Binary ops' - -#---------------------------------------------------------------------- -# binary ops - -#---------------------------------------------------------------------- -# add - -setup = common_setup + """ -df = DataFrame(np.random.randn(20000, 100)) -df2 = DataFrame(np.random.randn(20000, 100)) -""" -frame_add = \ - Benchmark("df + df2", setup, name='frame_add', - start_date=datetime(2012, 1, 1)) - -setup = common_setup + """ -import pandas.core.computation.expressions as expr -df = DataFrame(np.random.randn(20000, 100)) -df2 = DataFrame(np.random.randn(20000, 100)) -expr.set_numexpr_threads(1) -""" - -frame_add_st = \ - Benchmark("df + df2", setup, name='frame_add_st',cleanup="expr.set_numexpr_threads()", - start_date=datetime(2013, 2, 26)) - -setup = common_setup + """ -import pandas.core.computation.expressions as expr -df = DataFrame(np.random.randn(20000, 100)) -df2 = DataFrame(np.random.randn(20000, 100)) -expr.set_use_numexpr(False) -""" -frame_add_no_ne = \ - Benchmark("df + df2", setup, name='frame_add_no_ne',cleanup="expr.set_use_numexpr(True)", - start_date=datetime(2013, 2, 26)) - -#---------------------------------------------------------------------- -# mult - -setup = common_setup + """ -df = DataFrame(np.random.randn(20000, 100)) -df2 = DataFrame(np.random.randn(20000, 100)) -""" -frame_mult = \ - Benchmark("df * df2", setup, name='frame_mult', - start_date=datetime(2012, 1, 1)) - -setup = common_setup + """ -import pandas.core.computation.expressions as expr -df = DataFrame(np.random.randn(20000, 100)) -df2 = DataFrame(np.random.randn(20000, 100)) -expr.set_numexpr_threads(1) -""" -frame_mult_st = \ - Benchmark("df * df2", setup, name='frame_mult_st',cleanup="expr.set_numexpr_threads()", - start_date=datetime(2013, 2, 26)) - -setup = common_setup + """ -import pandas.core.computation.expressions as expr -df = DataFrame(np.random.randn(20000, 100)) -df2 = DataFrame(np.random.randn(20000, 100)) -expr.set_use_numexpr(False) -""" -frame_mult_no_ne = \ - Benchmark("df * df2", setup, name='frame_mult_no_ne',cleanup="expr.set_use_numexpr(True)", - start_date=datetime(2013, 2, 26)) - -#---------------------------------------------------------------------- -# division - -setup = common_setup + """ -df = DataFrame(np.random.randn(1000, 1000)) -""" -frame_float_div_by_zero = \ - Benchmark("df / 0", setup, name='frame_float_div_by_zero') - -setup = common_setup + """ -df = DataFrame(np.random.randn(1000, 1000)) -""" -frame_float_floor_by_zero = \ - Benchmark("df // 0", setup, name='frame_float_floor_by_zero') - -setup = common_setup + """ -df = DataFrame(np.random.random_integers(np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(1000, 1000))) -""" -frame_int_div_by_zero = \ - Benchmark("df / 0", setup, name='frame_int_div_by_zero') - -setup = common_setup + """ -df = DataFrame(np.random.randn(1000, 1000)) -df2 = DataFrame(np.random.randn(1000, 1000)) -""" -frame_float_div = \ - Benchmark("df // df2", setup, name='frame_float_div') - -#---------------------------------------------------------------------- -# modulo - -setup = common_setup + """ -df = DataFrame(np.random.randn(1000, 1000)) -df2 = DataFrame(np.random.randn(1000, 1000)) -""" -frame_float_mod = \ - Benchmark("df / df2", setup, name='frame_float_mod') - -setup = common_setup + """ -df = DataFrame(np.random.random_integers(np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(1000, 1000))) -df2 = DataFrame(np.random.random_integers(np.iinfo(np.int16).min, np.iinfo(np.int16).max, size=(1000, 1000))) -""" -frame_int_mod = \ - Benchmark("df / df2", setup, name='frame_int_mod') - -#---------------------------------------------------------------------- -# multi and - -setup = common_setup + """ -df = DataFrame(np.random.randn(20000, 100)) -df2 = DataFrame(np.random.randn(20000, 100)) -""" -frame_multi_and = \ - Benchmark("df[(df>0) & (df2>0)]", setup, name='frame_multi_and', - start_date=datetime(2012, 1, 1)) - -setup = common_setup + """ -import pandas.core.computation.expressions as expr -df = DataFrame(np.random.randn(20000, 100)) -df2 = DataFrame(np.random.randn(20000, 100)) -expr.set_numexpr_threads(1) -""" -frame_multi_and_st = \ - Benchmark("df[(df>0) & (df2>0)]", setup, name='frame_multi_and_st',cleanup="expr.set_numexpr_threads()", - start_date=datetime(2013, 2, 26)) - -setup = common_setup + """ -import pandas.core.computation.expressions as expr -df = DataFrame(np.random.randn(20000, 100)) -df2 = DataFrame(np.random.randn(20000, 100)) -expr.set_use_numexpr(False) -""" -frame_multi_and_no_ne = \ - Benchmark("df[(df>0) & (df2>0)]", setup, name='frame_multi_and_no_ne',cleanup="expr.set_use_numexpr(True)", - start_date=datetime(2013, 2, 26)) - -#---------------------------------------------------------------------- -# timeseries - -setup = common_setup + """ -N = 1000000 -halfway = N // 2 - 1 -s = Series(date_range('20010101', periods=N, freq='T')) -ts = s[halfway] -""" - -timestamp_series_compare = Benchmark("ts >= s", setup, - start_date=datetime(2013, 9, 27)) -series_timestamp_compare = Benchmark("s <= ts", setup, - start_date=datetime(2012, 2, 21)) - -setup = common_setup + """ -N = 1000000 -s = Series(date_range('20010101', periods=N, freq='s')) -""" - -timestamp_ops_diff1 = Benchmark("s.diff()", setup, - start_date=datetime(2013, 1, 1)) -timestamp_ops_diff2 = Benchmark("s-s.shift()", setup, - start_date=datetime(2013, 1, 1)) - -#---------------------------------------------------------------------- -# timeseries with tz - -setup = common_setup + """ -N = 10000 -halfway = N // 2 - 1 -s = Series(date_range('20010101', periods=N, freq='T', tz='US/Eastern')) -ts = s[halfway] -""" - -timestamp_tz_series_compare = Benchmark("ts >= s", setup, - start_date=datetime(2013, 9, 27)) -series_timestamp_tz_compare = Benchmark("s <= ts", setup, - start_date=datetime(2012, 2, 21)) - -setup = common_setup + """ -N = 10000 -s = Series(date_range('20010101', periods=N, freq='s', tz='US/Eastern')) -""" - -timestamp_tz_ops_diff1 = Benchmark("s.diff()", setup, - start_date=datetime(2013, 1, 1)) -timestamp_tz_ops_diff2 = Benchmark("s-s.shift()", setup, - start_date=datetime(2013, 1, 1)) diff --git a/vb_suite/categoricals.py b/vb_suite/categoricals.py deleted file mode 100644 index a08d479df20cb..0000000000000 --- a/vb_suite/categoricals.py +++ /dev/null @@ -1,16 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# Series constructors - -setup = common_setup + """ -s = pd.Series(list('aabbcd') * 1000000).astype('category') -""" - -concat_categorical = \ - Benchmark("concat([s, s])", setup=setup, name='concat_categorical', - start_date=datetime(year=2015, month=7, day=15)) diff --git a/vb_suite/ctors.py b/vb_suite/ctors.py deleted file mode 100644 index 8123322383f0a..0000000000000 --- a/vb_suite/ctors.py +++ /dev/null @@ -1,39 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# Series constructors - -setup = common_setup + """ -data = np.random.randn(100) -index = Index(np.arange(100)) -""" - -ctor_series_ndarray = \ - Benchmark("Series(data, index=index)", setup=setup, - name='series_constructor_ndarray') - -setup = common_setup + """ -arr = np.random.randn(100, 100) -""" - -ctor_frame_ndarray = \ - Benchmark("DataFrame(arr)", setup=setup, - name='frame_constructor_ndarray') - -setup = common_setup + """ -data = np.array(['foo', 'bar', 'baz'], dtype=object) -""" - -ctor_index_array_string = Benchmark('Index(data)', setup=setup) - -# index constructors -setup = common_setup + """ -s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')]*1000) -""" -index_from_series_ctor = Benchmark('Index(s)', setup=setup) - -dtindex_from_series_ctor = Benchmark('DatetimeIndex(s)', setup=setup) diff --git a/vb_suite/eval.py b/vb_suite/eval.py deleted file mode 100644 index 011669256a9bc..0000000000000 --- a/vb_suite/eval.py +++ /dev/null @@ -1,150 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -import pandas as pd -df = DataFrame(np.random.randn(20000, 100)) -df2 = DataFrame(np.random.randn(20000, 100)) -df3 = DataFrame(np.random.randn(20000, 100)) -df4 = DataFrame(np.random.randn(20000, 100)) -""" - -setup = common_setup + """ -import pandas.core.computation.expressions as expr -expr.set_numexpr_threads(1) -""" - -SECTION = 'Eval' - -#---------------------------------------------------------------------- -# binary ops - -#---------------------------------------------------------------------- -# add -eval_frame_add_all_threads = \ - Benchmark("pd.eval('df + df2 + df3 + df4')", common_setup, - name='eval_frame_add_all_threads', - start_date=datetime(2013, 7, 21)) - - - -eval_frame_add_one_thread = \ - Benchmark("pd.eval('df + df2 + df3 + df4')", setup, - name='eval_frame_add_one_thread', - start_date=datetime(2013, 7, 26)) - -eval_frame_add_python = \ - Benchmark("pd.eval('df + df2 + df3 + df4', engine='python')", common_setup, - name='eval_frame_add_python', start_date=datetime(2013, 7, 21)) - -eval_frame_add_python_one_thread = \ - Benchmark("pd.eval('df + df2 + df3 + df4', engine='python')", setup, - name='eval_frame_add_python_one_thread', - start_date=datetime(2013, 7, 26)) -#---------------------------------------------------------------------- -# mult - -eval_frame_mult_all_threads = \ - Benchmark("pd.eval('df * df2 * df3 * df4')", common_setup, - name='eval_frame_mult_all_threads', - start_date=datetime(2013, 7, 21)) - -eval_frame_mult_one_thread = \ - Benchmark("pd.eval('df * df2 * df3 * df4')", setup, - name='eval_frame_mult_one_thread', - start_date=datetime(2013, 7, 26)) - -eval_frame_mult_python = \ - Benchmark("pd.eval('df * df2 * df3 * df4', engine='python')", - common_setup, - name='eval_frame_mult_python', start_date=datetime(2013, 7, 21)) - -eval_frame_mult_python_one_thread = \ - Benchmark("pd.eval('df * df2 * df3 * df4', engine='python')", setup, - name='eval_frame_mult_python_one_thread', - start_date=datetime(2013, 7, 26)) - -#---------------------------------------------------------------------- -# multi and - -eval_frame_and_all_threads = \ - Benchmark("pd.eval('(df > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)')", - common_setup, - name='eval_frame_and_all_threads', - start_date=datetime(2013, 7, 21)) - -eval_frame_and_one_thread = \ - Benchmark("pd.eval('(df > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)')", setup, - name='eval_frame_and_one_thread', - start_date=datetime(2013, 7, 26)) - -eval_frame_and_python = \ - Benchmark("pd.eval('(df > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)', engine='python')", - common_setup, name='eval_frame_and_python', - start_date=datetime(2013, 7, 21)) - -eval_frame_and_one_thread = \ - Benchmark("pd.eval('(df > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)', engine='python')", - setup, - name='eval_frame_and_python_one_thread', - start_date=datetime(2013, 7, 26)) - -#-------------------------------------------------------------------- -# chained comp -eval_frame_chained_cmp_all_threads = \ - Benchmark("pd.eval('df < df2 < df3 < df4')", common_setup, - name='eval_frame_chained_cmp_all_threads', - start_date=datetime(2013, 7, 21)) - -eval_frame_chained_cmp_one_thread = \ - Benchmark("pd.eval('df < df2 < df3 < df4')", setup, - name='eval_frame_chained_cmp_one_thread', - start_date=datetime(2013, 7, 26)) - -eval_frame_chained_cmp_python = \ - Benchmark("pd.eval('df < df2 < df3 < df4', engine='python')", - common_setup, name='eval_frame_chained_cmp_python', - start_date=datetime(2013, 7, 26)) - -eval_frame_chained_cmp_one_thread = \ - Benchmark("pd.eval('df < df2 < df3 < df4', engine='python')", setup, - name='eval_frame_chained_cmp_python_one_thread', - start_date=datetime(2013, 7, 26)) - - -common_setup = """from .pandas_vb_common import * -""" - -setup = common_setup + """ -N = 1000000 -halfway = N // 2 - 1 -index = date_range('20010101', periods=N, freq='T') -s = Series(index) -ts = s.iloc[halfway] -""" - -series_setup = setup + """ -df = DataFrame({'dates': s.values}) -""" - -query_datetime_series = Benchmark("df.query('dates < @ts')", - series_setup, - start_date=datetime(2013, 9, 27)) - -index_setup = setup + """ -df = DataFrame({'a': np.random.randn(N)}, index=index) -""" - -query_datetime_index = Benchmark("df.query('index < @ts')", - index_setup, start_date=datetime(2013, 9, 27)) - -setup = setup + """ -N = 1000000 -df = DataFrame({'a': np.random.randn(N)}) -min_val = df['a'].min() -max_val = df['a'].max() -""" - -query_with_boolean_selection = Benchmark("df.query('(a >= @min_val) & (a <= @max_val)')", - setup, start_date=datetime(2013, 9, 27)) - diff --git a/vb_suite/frame_ctor.py b/vb_suite/frame_ctor.py deleted file mode 100644 index 0d57da7b88d3b..0000000000000 --- a/vb_suite/frame_ctor.py +++ /dev/null @@ -1,123 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime -try: - import pandas.tseries.offsets as offsets -except: - import pandas.core.datetools as offsets - -common_setup = """from .pandas_vb_common import * -try: - from pandas.tseries.offsets import * -except: - from pandas.core.datetools import * -""" - -#---------------------------------------------------------------------- -# Creation from nested dict - -setup = common_setup + """ -N, K = 5000, 50 -index = tm.makeStringIndex(N) -columns = tm.makeStringIndex(K) -frame = DataFrame(np.random.randn(N, K), index=index, columns=columns) - -try: - data = frame.to_dict() -except: - data = frame.toDict() - -some_dict = data.values()[0] -dict_list = [dict(zip(columns, row)) for row in frame.values] -""" - -frame_ctor_nested_dict = Benchmark("DataFrame(data)", setup) - -# From JSON-like stuff -frame_ctor_list_of_dict = Benchmark("DataFrame(dict_list)", setup, - start_date=datetime(2011, 12, 20)) - -series_ctor_from_dict = Benchmark("Series(some_dict)", setup) - -# nested dict, integer indexes, regression described in #621 -setup = common_setup + """ -data = dict((i,dict((j,float(j)) for j in range(100))) for i in xrange(2000)) -""" -frame_ctor_nested_dict_int64 = Benchmark("DataFrame(data)", setup) - -# dynamically generate benchmarks for every offset -# -# get_period_count & get_index_for_offset are there because blindly taking each -# offset times 1000 can easily go out of Timestamp bounds and raise errors. -dynamic_benchmarks = {} -n_steps = [1, 2] -offset_kwargs = {'WeekOfMonth': {'weekday': 1, 'week': 1}, - 'LastWeekOfMonth': {'weekday': 1, 'week': 1}, - 'FY5253': {'startingMonth': 1, 'weekday': 1}, - 'FY5253Quarter': {'qtr_with_extra_week': 1, 'startingMonth': 1, 'weekday': 1}} - -offset_extra_cases = {'FY5253': {'variation': ['nearest', 'last']}, - 'FY5253Quarter': {'variation': ['nearest', 'last']}} - -for offset in offsets.__all__: - for n in n_steps: - kwargs = {} - if offset in offset_kwargs: - kwargs = offset_kwargs[offset] - - if offset in offset_extra_cases: - extras = offset_extra_cases[offset] - else: - extras = {'': ['']} - - for extra_arg in extras: - for extra in extras[extra_arg]: - if extra: - kwargs[extra_arg] = extra - setup = common_setup + """ - -def get_period_count(start_date, off): - ten_offsets_in_days = ((start_date + off * 10) - start_date).days - if ten_offsets_in_days == 0: - return 1000 - else: - return min(9 * ((Timestamp.max - start_date).days // - ten_offsets_in_days), - 1000) - -def get_index_for_offset(off): - start_date = Timestamp('1/1/1900') - return date_range(start_date, - periods=min(1000, get_period_count(start_date, off)), - freq=off) - -idx = get_index_for_offset({}({}, **{})) -df = DataFrame(np.random.randn(len(idx),10), index=idx) -d = dict([ (col,df[col]) for col in df.columns ]) -""".format(offset, n, kwargs) - key = 'frame_ctor_dtindex_{}x{}'.format(offset, n) - if extra: - key += '__{}_{}'.format(extra_arg, extra) - dynamic_benchmarks[key] = Benchmark("DataFrame(d)", setup, name=key) - -# Have to stuff them in globals() so vbench detects them -globals().update(dynamic_benchmarks) - -# from a mi-series -setup = common_setup + """ -mi = MultiIndex.from_tuples([(x,y) for x in range(100) for y in range(100)]) -s = Series(randn(10000), index=mi) -""" -frame_from_series = Benchmark("DataFrame(s)", setup) - -#---------------------------------------------------------------------- -# get_numeric_data - -setup = common_setup + """ -df = DataFrame(randn(10000, 25)) -df['foo'] = 'bar' -df['bar'] = 'baz' -df = df.consolidate() -""" - -frame_get_numeric_data = Benchmark('df._get_numeric_data()', setup, - start_date=datetime(2011, 11, 1)) diff --git a/vb_suite/frame_methods.py b/vb_suite/frame_methods.py deleted file mode 100644 index 46343e9c607fd..0000000000000 --- a/vb_suite/frame_methods.py +++ /dev/null @@ -1,525 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# lookup - -setup = common_setup + """ -df = DataFrame(np.random.randn(10000, 8), columns=list('abcdefgh')) -df['foo'] = 'bar' - -row_labels = list(df.index[::10])[:900] -col_labels = list(df.columns) * 100 -row_labels_all = np.array(list(df.index) * len(df.columns), dtype='object') -col_labels_all = np.array(list(df.columns) * len(df.index), dtype='object') -""" - -frame_fancy_lookup = Benchmark('df.lookup(row_labels, col_labels)', setup, - start_date=datetime(2012, 1, 12)) - -frame_fancy_lookup_all = Benchmark('df.lookup(row_labels_all, col_labels_all)', - setup, - start_date=datetime(2012, 1, 12)) - -#---------------------------------------------------------------------- -# fillna in place - -setup = common_setup + """ -df = DataFrame(randn(10000, 100)) -df.values[::2] = np.nan -""" - -frame_fillna_inplace = Benchmark('df.fillna(0, inplace=True)', setup, - start_date=datetime(2012, 4, 4)) - - -#---------------------------------------------------------------------- -# reindex both axes - -setup = common_setup + """ -df = DataFrame(randn(10000, 10000)) -idx = np.arange(4000, 7000) -""" - -frame_reindex_axis0 = Benchmark('df.reindex(idx)', setup) - -frame_reindex_axis1 = Benchmark('df.reindex(columns=idx)', setup) - -frame_reindex_both_axes = Benchmark('df.reindex(index=idx, columns=idx)', - setup, start_date=datetime(2011, 1, 1)) - -frame_reindex_both_axes_ix = Benchmark('df.ix[idx, idx]', setup, - start_date=datetime(2011, 1, 1)) - -#---------------------------------------------------------------------- -# reindex with upcasts -setup = common_setup + """ -df=DataFrame(dict([(c, { - 0: randint(0, 2, 1000).astype(np.bool_), - 1: randint(0, 1000, 1000).astype(np.int16), - 2: randint(0, 1000, 1000).astype(np.int32), - 3: randint(0, 1000, 1000).astype(np.int64) - }[randint(0, 4)]) for c in range(1000)])) -""" - -frame_reindex_upcast = Benchmark('df.reindex(permutation(range(1200)))', setup) - -#---------------------------------------------------------------------- -# boolean indexing - -setup = common_setup + """ -df = DataFrame(randn(10000, 100)) -bool_arr = np.zeros(10000, dtype=bool) -bool_arr[:1000] = True -""" - -frame_boolean_row_select = Benchmark('df[bool_arr]', setup, - start_date=datetime(2011, 1, 1)) - -#---------------------------------------------------------------------- -# iteritems (monitor no-copying behaviour) - -setup = common_setup + """ -df = DataFrame(randn(10000, 1000)) -df2 = DataFrame(randn(3000,1),columns=['A']) -df3 = DataFrame(randn(3000,1)) - -def f(): - if hasattr(df, '_item_cache'): - df._item_cache.clear() - for name, col in df.iteritems(): - pass - -def g(): - for name, col in df.iteritems(): - pass - -def h(): - for i in range(10000): - df2['A'] - -def j(): - for i in range(10000): - df3[0] - -""" - -# as far back as the earliest test currently in the suite -frame_iteritems = Benchmark('f()', setup, - start_date=datetime(2010, 6, 1)) - -frame_iteritems_cached = Benchmark('g()', setup, - start_date=datetime(2010, 6, 1)) - -frame_getitem_single_column = Benchmark('h()', setup, - start_date=datetime(2010, 6, 1)) - -frame_getitem_single_column2 = Benchmark('j()', setup, - start_date=datetime(2010, 6, 1)) - -#---------------------------------------------------------------------- -# assignment - -setup = common_setup + """ -idx = date_range('1/1/2000', periods=100000, freq='D') -df = DataFrame(randn(100000, 1),columns=['A'],index=idx) -def f(df): - x = df.copy() - x['date'] = x.index -""" - -frame_assign_timeseries_index = Benchmark('f(df)', setup, - start_date=datetime(2013, 10, 1)) - - -#---------------------------------------------------------------------- -# to_string - -setup = common_setup + """ -df = DataFrame(randn(100, 10)) -""" - -frame_to_string_floats = Benchmark('df.to_string()', setup, - start_date=datetime(2010, 6, 1)) - -#---------------------------------------------------------------------- -# to_html - -setup = common_setup + """ -nrows=500 -df = DataFrame(randn(nrows, 10)) -df[0]=period_range("2000","2010",nrows) -df[1]=range(nrows) - -""" - -frame_to_html_mixed = Benchmark('df.to_html()', setup, - start_date=datetime(2011, 11, 18)) - - -# truncated repr_html, single index - -setup = common_setup + """ -nrows=10000 -data=randn(nrows,10) -idx=MultiIndex.from_arrays(np.tile(randn(3,nrows/100),100)) -df=DataFrame(data,index=idx) - -""" - -frame_html_repr_trunc_mi = Benchmark('df._repr_html_()', setup, - start_date=datetime(2013, 11, 25)) - -# truncated repr_html, MultiIndex - -setup = common_setup + """ -nrows=10000 -data=randn(nrows,10) -idx=randn(nrows) -df=DataFrame(data,index=idx) - -""" - -frame_html_repr_trunc_si = Benchmark('df._repr_html_()', setup, - start_date=datetime(2013, 11, 25)) - - -# insert many columns - -setup = common_setup + """ -N = 1000 - -def f(K=500): - df = DataFrame(index=range(N)) - new_col = np.random.randn(N) - for i in range(K): - df[i] = new_col -""" - -frame_insert_500_columns_end = Benchmark('f()', setup, start_date=datetime(2011, 1, 1)) - -setup = common_setup + """ -N = 1000 - -def f(K=100): - df = DataFrame(index=range(N)) - new_col = np.random.randn(N) - for i in range(K): - df.insert(0,i,new_col) -""" - -frame_insert_100_columns_begin = Benchmark('f()', setup, start_date=datetime(2011, 1, 1)) - -#---------------------------------------------------------------------- -# strings methods, #2602 - -setup = common_setup + """ -s = Series(['abcdefg', np.nan]*500000) -""" - -series_string_vector_slice = Benchmark('s.str[:5]', setup, - start_date=datetime(2012, 8, 1)) - -#---------------------------------------------------------------------- -# df.info() and get_dtype_counts() # 2807 - -setup = common_setup + """ -df = pandas.DataFrame(np.random.randn(10,10000)) -""" - -frame_get_dtype_counts = Benchmark('df.get_dtype_counts()', setup, - start_date=datetime(2012, 8, 1)) - -## -setup = common_setup + """ -df = pandas.DataFrame(np.random.randn(10,10000)) -""" - -frame_repr_wide = Benchmark('repr(df)', setup, - start_date=datetime(2012, 8, 1)) - -## -setup = common_setup + """ -df = pandas.DataFrame(np.random.randn(10000, 10)) -""" - -frame_repr_tall = Benchmark('repr(df)', setup, - start_date=datetime(2012, 8, 1)) - -## -setup = common_setup + """ -df = DataFrame(randn(100000, 1)) -""" - -frame_xs_row = Benchmark('df.xs(50000)', setup) - -## -setup = common_setup + """ -df = DataFrame(randn(1,100000)) -""" - -frame_xs_col = Benchmark('df.xs(50000,axis = 1)', setup) - -#---------------------------------------------------------------------- -# nulls/masking - -## masking -setup = common_setup + """ -data = np.random.randn(1000, 500) -df = DataFrame(data) -df = df.where(df > 0) # create nans -bools = df > 0 -mask = isnull(df) -""" - -frame_mask_bools = Benchmark('bools.mask(mask)', setup, - start_date=datetime(2013,1,1)) - -frame_mask_floats = Benchmark('bools.astype(float).mask(mask)', setup, - start_date=datetime(2013,1,1)) - -## isnull -setup = common_setup + """ -data = np.random.randn(1000, 1000) -df = DataFrame(data) -""" -frame_isnull = Benchmark('isnull(df)', setup, - start_date=datetime(2012,1,1)) - -## dropna -dropna_setup = common_setup + """ -data = np.random.randn(10000, 1000) -df = DataFrame(data) -df.ix[50:1000,20:50] = np.nan -df.ix[2000:3000] = np.nan -df.ix[:,60:70] = np.nan -""" -frame_dropna_axis0_any = Benchmark('df.dropna(how="any",axis=0)', dropna_setup, - start_date=datetime(2012,1,1)) -frame_dropna_axis0_all = Benchmark('df.dropna(how="all",axis=0)', dropna_setup, - start_date=datetime(2012,1,1)) - -frame_dropna_axis1_any = Benchmark('df.dropna(how="any",axis=1)', dropna_setup, - start_date=datetime(2012,1,1)) - -frame_dropna_axis1_all = Benchmark('df.dropna(how="all",axis=1)', dropna_setup, - start_date=datetime(2012,1,1)) - -# dropna on mixed dtypes -dropna_mixed_setup = common_setup + """ -data = np.random.randn(10000, 1000) -df = DataFrame(data) -df.ix[50:1000,20:50] = np.nan -df.ix[2000:3000] = np.nan -df.ix[:,60:70] = np.nan -df['foo'] = 'bar' -""" -frame_dropna_axis0_any_mixed_dtypes = Benchmark('df.dropna(how="any",axis=0)', dropna_mixed_setup, - start_date=datetime(2012,1,1)) -frame_dropna_axis0_all_mixed_dtypes = Benchmark('df.dropna(how="all",axis=0)', dropna_mixed_setup, - start_date=datetime(2012,1,1)) - -frame_dropna_axis1_any_mixed_dtypes = Benchmark('df.dropna(how="any",axis=1)', dropna_mixed_setup, - start_date=datetime(2012,1,1)) - -frame_dropna_axis1_all_mixed_dtypes = Benchmark('df.dropna(how="all",axis=1)', dropna_mixed_setup, - start_date=datetime(2012,1,1)) - -## dropna multi -dropna_setup = common_setup + """ -data = np.random.randn(10000, 1000) -df = DataFrame(data) -df.ix[50:1000,20:50] = np.nan -df.ix[2000:3000] = np.nan -df.ix[:,60:70] = np.nan -df.index = MultiIndex.from_tuples(df.index.map(lambda x: (x, x))) -df.columns = MultiIndex.from_tuples(df.columns.map(lambda x: (x, x))) -""" -frame_count_level_axis0_multi = Benchmark('df.count(axis=0, level=1)', dropna_setup, - start_date=datetime(2012,1,1)) - -frame_count_level_axis1_multi = Benchmark('df.count(axis=1, level=1)', dropna_setup, - start_date=datetime(2012,1,1)) - -# dropna on mixed dtypes -dropna_mixed_setup = common_setup + """ -data = np.random.randn(10000, 1000) -df = DataFrame(data) -df.ix[50:1000,20:50] = np.nan -df.ix[2000:3000] = np.nan -df.ix[:,60:70] = np.nan -df['foo'] = 'bar' -df.index = MultiIndex.from_tuples(df.index.map(lambda x: (x, x))) -df.columns = MultiIndex.from_tuples(df.columns.map(lambda x: (x, x))) -""" -frame_count_level_axis0_mixed_dtypes_multi = Benchmark('df.count(axis=0, level=1)', dropna_mixed_setup, - start_date=datetime(2012,1,1)) - -frame_count_level_axis1_mixed_dtypes_multi = Benchmark('df.count(axis=1, level=1)', dropna_mixed_setup, - start_date=datetime(2012,1,1)) - -#---------------------------------------------------------------------- -# apply - -setup = common_setup + """ -s = Series(np.arange(1028.)) -df = DataFrame({ i:s for i in range(1028) }) -""" -frame_apply_user_func = Benchmark('df.apply(lambda x: np.corrcoef(x,s)[0,1])', setup, - name = 'frame_apply_user_func', - start_date=datetime(2012,1,1)) - -setup = common_setup + """ -df = DataFrame(np.random.randn(1000,100)) -""" -frame_apply_lambda_mean = Benchmark('df.apply(lambda x: x.sum())', setup, - name = 'frame_apply_lambda_mean', - start_date=datetime(2012,1,1)) -setup = common_setup + """ -df = DataFrame(np.random.randn(1000,100)) -""" -frame_apply_np_mean = Benchmark('df.apply(np.mean)', setup, - name = 'frame_apply_np_mean', - start_date=datetime(2012,1,1)) - -setup = common_setup + """ -df = DataFrame(np.random.randn(1000,100)) -""" -frame_apply_pass_thru = Benchmark('df.apply(lambda x: x)', setup, - name = 'frame_apply_pass_thru', - start_date=datetime(2012,1,1)) - -setup = common_setup + """ -df = DataFrame(np.random.randn(1000,100)) -""" -frame_apply_axis_1 = Benchmark('df.apply(lambda x: x+1,axis=1)', setup, - name = 'frame_apply_axis_1', - start_date=datetime(2012,1,1)) - -setup = common_setup + """ -df = DataFrame(np.random.randn(1000,3),columns=list('ABC')) -""" -frame_apply_ref_by_name = Benchmark('df.apply(lambda x: x["A"] + x["B"],axis=1)', setup, - name = 'frame_apply_ref_by_name', - start_date=datetime(2012,1,1)) - -#---------------------------------------------------------------------- -# dtypes - -setup = common_setup + """ -df = DataFrame(np.random.randn(1000,1000)) -""" -frame_dtypes = Benchmark('df.dtypes', setup, - start_date=datetime(2012,1,1)) - -#---------------------------------------------------------------------- -# equals -setup = common_setup + """ -def make_pair(frame): - df = frame - df2 = df.copy() - df2.ix[-1,-1] = np.nan - return df, df2 - -def test_equal(name): - df, df2 = pairs[name] - return df.equals(df) - -def test_unequal(name): - df, df2 = pairs[name] - return df.equals(df2) - -float_df = DataFrame(np.random.randn(1000, 1000)) -object_df = DataFrame([['foo']*1000]*1000) -nonunique_cols = object_df.copy() -nonunique_cols.columns = ['A']*len(nonunique_cols.columns) - -pairs = dict([(name, make_pair(frame)) - for name, frame in (('float_df', float_df), ('object_df', object_df), ('nonunique_cols', nonunique_cols))]) -""" -frame_float_equal = Benchmark('test_equal("float_df")', setup) -frame_object_equal = Benchmark('test_equal("object_df")', setup) -frame_nonunique_equal = Benchmark('test_equal("nonunique_cols")', setup) - -frame_float_unequal = Benchmark('test_unequal("float_df")', setup) -frame_object_unequal = Benchmark('test_unequal("object_df")', setup) -frame_nonunique_unequal = Benchmark('test_unequal("nonunique_cols")', setup) - -#----------------------------------------------------------------------------- -# interpolate -# this is the worst case, where every column has NaNs. -setup = common_setup + """ -df = DataFrame(randn(10000, 100)) -df.values[::2] = np.nan -""" - -frame_interpolate = Benchmark('df.interpolate()', setup, - start_date=datetime(2014, 2, 7)) - -setup = common_setup + """ -df = DataFrame({'A': np.arange(0, 10000), - 'B': np.random.randint(0, 100, 10000), - 'C': randn(10000), - 'D': randn(10000)}) -df.loc[1::5, 'A'] = np.nan -df.loc[1::5, 'C'] = np.nan -""" - -frame_interpolate_some_good = Benchmark('df.interpolate()', setup, - start_date=datetime(2014, 2, 7)) -frame_interpolate_some_good_infer = Benchmark('df.interpolate(downcast="infer")', - setup, - start_date=datetime(2014, 2, 7)) - - -#------------------------------------------------------------------------- -# frame shift speedup issue-5609 - -setup = common_setup + """ -df = DataFrame(np.random.rand(10000,500)) -# note: df._data.blocks are f_contigous -""" -frame_shift_axis0 = Benchmark('df.shift(1,axis=0)', setup, - start_date=datetime(2014,1,1)) -frame_shift_axis1 = Benchmark('df.shift(1,axis=1)', setup, - name = 'frame_shift_axis_1', - start_date=datetime(2014,1,1)) - - -#----------------------------------------------------------------------------- -# from_records issue-6700 - -setup = common_setup + """ -def get_data(n=100000): - return ((x, x*20, x*100) for x in range(n)) -""" - -frame_from_records_generator = Benchmark('df = DataFrame.from_records(get_data())', - setup, - name='frame_from_records_generator', - start_date=datetime(2013,10,4)) # issue-4911 - -frame_from_records_generator_nrows = Benchmark('df = DataFrame.from_records(get_data(), nrows=1000)', - setup, - name='frame_from_records_generator_nrows', - start_date=datetime(2013,10,04)) # issue-4911 - -#----------------------------------------------------------------------------- -# duplicated - -setup = common_setup + ''' -n = 1 << 20 - -t = date_range('2015-01-01', freq='S', periods=n // 64) -xs = np.random.randn(n // 64).round(2) - -df = DataFrame({'a':np.random.randint(- 1 << 8, 1 << 8, n), - 'b':np.random.choice(t, n), - 'c':np.random.choice(xs, n)}) -''' - -frame_duplicated = Benchmark('df.duplicated()', setup, - name='frame_duplicated') diff --git a/vb_suite/generate_rst_files.py b/vb_suite/generate_rst_files.py deleted file mode 100644 index 92e7cd4d59b71..0000000000000 --- a/vb_suite/generate_rst_files.py +++ /dev/null @@ -1,2 +0,0 @@ -from suite import benchmarks, generate_rst_files -generate_rst_files(benchmarks) diff --git a/vb_suite/gil.py b/vb_suite/gil.py deleted file mode 100644 index df2bd2dcd8db4..0000000000000 --- a/vb_suite/gil.py +++ /dev/null @@ -1,110 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -basic = common_setup + """ -try: - from pandas.util.testing import test_parallel - have_real_test_parallel = True -except ImportError: - have_real_test_parallel = False - def test_parallel(num_threads=1): - def wrapper(fname): - return fname - - return wrapper - -N = 1000000 -ngroups = 1000 -np.random.seed(1234) - -df = DataFrame({'key' : np.random.randint(0,ngroups,size=N), - 'data' : np.random.randn(N) }) - -if not have_real_test_parallel: - raise NotImplementedError -""" - -setup = basic + """ - -def f(): - df.groupby('key')['data'].sum() - -# run consecutivily -def g2(): - for i in range(2): - f() -def g4(): - for i in range(4): - f() -def g8(): - for i in range(8): - f() - -# run in parallel -@test_parallel(num_threads=2) -def pg2(): - f() - -@test_parallel(num_threads=4) -def pg4(): - f() - -@test_parallel(num_threads=8) -def pg8(): - f() - -""" - -nogil_groupby_sum_4 = Benchmark( - 'pg4()', setup, - start_date=datetime(2015, 1, 1)) - -nogil_groupby_sum_8 = Benchmark( - 'pg8()', setup, - start_date=datetime(2015, 1, 1)) - - -#### test all groupby funcs #### - -setup = basic + """ - -@test_parallel(num_threads=2) -def pg2(): - df.groupby('key')['data'].func() - -""" - -for f in ['sum','prod','var','count','min','max','mean','last']: - - name = "nogil_groupby_{f}_2".format(f=f) - bmark = Benchmark('pg2()', setup.replace('func',f), start_date=datetime(2015, 1, 1)) - bmark.name = name - globals()[name] = bmark - -del bmark - - -#### test take_1d #### -setup = basic + """ -from pandas.core import common as com - -N = 1e7 -df = DataFrame({'int64' : np.arange(N,dtype='int64'), - 'float64' : np.arange(N,dtype='float64')}) -indexer = np.arange(100,len(df)-100) - -@test_parallel(num_threads=2) -def take_1d_pg2_int64(): - com.take_1d(df.int64.values,indexer) - -@test_parallel(num_threads=2) -def take_1d_pg2_float64(): - com.take_1d(df.float64.values,indexer) - -""" - -nogil_take1d_float64 = Benchmark('take_1d_pg2_int64()', setup, start_date=datetime(2015, 1, 1)) -nogil_take1d_int64 = Benchmark('take_1d_pg2_float64()', setup, start_date=datetime(2015, 1, 1)) diff --git a/vb_suite/groupby.py b/vb_suite/groupby.py deleted file mode 100644 index 268d71f864823..0000000000000 --- a/vb_suite/groupby.py +++ /dev/null @@ -1,620 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -setup = common_setup + """ -N = 100000 -ngroups = 100 - -def get_test_data(ngroups=100, n=100000): - unique_groups = range(ngroups) - arr = np.asarray(np.tile(unique_groups, n / ngroups), dtype=object) - - if len(arr) < n: - arr = np.asarray(list(arr) + unique_groups[:n - len(arr)], - dtype=object) - - random.shuffle(arr) - return arr - -# aggregate multiple columns -df = DataFrame({'key1' : get_test_data(ngroups=ngroups), - 'key2' : get_test_data(ngroups=ngroups), - 'data1' : np.random.randn(N), - 'data2' : np.random.randn(N)}) -def f(): - df.groupby(['key1', 'key2']).agg(lambda x: x.values.sum()) - -simple_series = Series(np.random.randn(N)) -key1 = df['key1'] -""" - -stmt1 = "df.groupby(['key1', 'key2'])['data1'].agg(lambda x: x.values.sum())" -groupby_multi_python = Benchmark(stmt1, setup, - start_date=datetime(2011, 7, 1)) - -stmt3 = "df.groupby(['key1', 'key2']).sum()" -groupby_multi_cython = Benchmark(stmt3, setup, - start_date=datetime(2011, 7, 1)) - -stmt = "df.groupby(['key1', 'key2'])['data1'].agg(np.std)" -groupby_multi_series_op = Benchmark(stmt, setup, - start_date=datetime(2011, 8, 1)) - -groupby_series_simple_cython = \ - Benchmark('simple_series.groupby(key1).sum()', setup, - start_date=datetime(2011, 3, 1)) - - -stmt4 = "df.groupby('key1').rank(pct=True)" -groupby_series_simple_cython = Benchmark(stmt4, setup, - start_date=datetime(2014, 1, 16)) - -#---------------------------------------------------------------------- -# 2d grouping, aggregate many columns - -setup = common_setup + """ -labels = np.random.randint(0, 100, size=1000) -df = DataFrame(randn(1000, 1000)) -""" - -groupby_frame_cython_many_columns = Benchmark( - 'df.groupby(labels).sum()', setup, - start_date=datetime(2011, 8, 1), - logy=True) - -#---------------------------------------------------------------------- -# single key, long, integer key - -setup = common_setup + """ -data = np.random.randn(100000, 1) -labels = np.random.randint(0, 1000, size=100000) -df = DataFrame(data) -""" - -groupby_frame_singlekey_integer = \ - Benchmark('df.groupby(labels).sum()', setup, - start_date=datetime(2011, 8, 1), logy=True) - -#---------------------------------------------------------------------- -# group with different functions per column - -setup = common_setup + """ -fac1 = np.array(['A', 'B', 'C'], dtype='O') -fac2 = np.array(['one', 'two'], dtype='O') - -df = DataFrame({'key1': fac1.take(np.random.randint(0, 3, size=100000)), - 'key2': fac2.take(np.random.randint(0, 2, size=100000)), - 'value1' : np.random.randn(100000), - 'value2' : np.random.randn(100000), - 'value3' : np.random.randn(100000)}) -""" - -groupby_multi_different_functions = \ - Benchmark("""df.groupby(['key1', 'key2']).agg({'value1' : 'mean', - 'value2' : 'var', - 'value3' : 'sum'})""", - setup, start_date=datetime(2011, 9, 1)) - -groupby_multi_different_numpy_functions = \ - Benchmark("""df.groupby(['key1', 'key2']).agg({'value1' : np.mean, - 'value2' : np.var, - 'value3' : np.sum})""", - setup, start_date=datetime(2011, 9, 1)) - -#---------------------------------------------------------------------- -# size() speed - -setup = common_setup + """ -n = 100000 -offsets = np.random.randint(n, size=n).astype('timedelta64[ns]') -dates = np.datetime64('now') + offsets -df = DataFrame({'key1': np.random.randint(0, 500, size=n), - 'key2': np.random.randint(0, 100, size=n), - 'value1' : np.random.randn(n), - 'value2' : np.random.randn(n), - 'value3' : np.random.randn(n), - 'dates' : dates}) -""" - -groupby_multi_size = Benchmark("df.groupby(['key1', 'key2']).size()", - setup, start_date=datetime(2011, 10, 1)) - -groupby_dt_size = Benchmark("df.groupby(['dates']).size()", - setup, start_date=datetime(2011, 10, 1)) - -groupby_dt_timegrouper_size = Benchmark("df.groupby(TimeGrouper(key='dates', freq='M')).size()", - setup, start_date=datetime(2011, 10, 1)) - -#---------------------------------------------------------------------- -# count() speed - -setup = common_setup + """ -n = 10000 -offsets = np.random.randint(n, size=n).astype('timedelta64[ns]') - -dates = np.datetime64('now') + offsets -dates[np.random.rand(n) > 0.5] = np.datetime64('nat') - -offsets[np.random.rand(n) > 0.5] = np.timedelta64('nat') - -value2 = np.random.randn(n) -value2[np.random.rand(n) > 0.5] = np.nan - -obj = np.random.choice(list('ab'), size=n).astype(object) -obj[np.random.randn(n) > 0.5] = np.nan - -df = DataFrame({'key1': np.random.randint(0, 500, size=n), - 'key2': np.random.randint(0, 100, size=n), - 'dates': dates, - 'value2' : value2, - 'value3' : np.random.randn(n), - 'ints': np.random.randint(0, 1000, size=n), - 'obj': obj, - 'offsets': offsets}) -""" - -groupby_multi_count = Benchmark("df.groupby(['key1', 'key2']).count()", - setup, name='groupby_multi_count', - start_date=datetime(2014, 5, 5)) - -setup = common_setup + """ -n = 10000 - -df = DataFrame({'key1': randint(0, 500, size=n), - 'key2': randint(0, 100, size=n), - 'ints': randint(0, 1000, size=n), - 'ints2': randint(0, 1000, size=n)}) -""" - -groupby_int_count = Benchmark("df.groupby(['key1', 'key2']).count()", - setup, name='groupby_int_count', - start_date=datetime(2014, 5, 6)) -#---------------------------------------------------------------------- -# Series.value_counts - -setup = common_setup + """ -s = Series(np.random.randint(0, 1000, size=100000)) -""" - -series_value_counts_int64 = Benchmark('s.value_counts()', setup, - start_date=datetime(2011, 10, 21)) - -# value_counts on lots of strings - -setup = common_setup + """ -K = 1000 -N = 100000 -uniques = tm.makeStringIndex(K).values -s = Series(np.tile(uniques, N // K)) -""" - -series_value_counts_strings = Benchmark('s.value_counts()', setup, - start_date=datetime(2011, 10, 21)) - -#value_counts on float dtype - -setup = common_setup + """ -s = Series(np.random.randint(0, 1000, size=100000)).astype(float) -""" - -series_value_counts_float64 = Benchmark('s.value_counts()', setup, - start_date=datetime(2015, 8, 17)) - -#---------------------------------------------------------------------- -# pivot_table - -setup = common_setup + """ -fac1 = np.array(['A', 'B', 'C'], dtype='O') -fac2 = np.array(['one', 'two'], dtype='O') - -ind1 = np.random.randint(0, 3, size=100000) -ind2 = np.random.randint(0, 2, size=100000) - -df = DataFrame({'key1': fac1.take(ind1), -'key2': fac2.take(ind2), -'key3': fac2.take(ind2), -'value1' : np.random.randn(100000), -'value2' : np.random.randn(100000), -'value3' : np.random.randn(100000)}) -""" - -stmt = "df.pivot_table(index='key1', columns=['key2', 'key3'])" -groupby_pivot_table = Benchmark(stmt, setup, start_date=datetime(2011, 12, 15)) - - -#---------------------------------------------------------------------- -# dict return values - -setup = common_setup + """ -labels = np.arange(1000).repeat(10) -data = Series(randn(len(labels))) -f = lambda x: {'first': x.values[0], 'last': x.values[-1]} -""" - -groupby_apply_dict_return = Benchmark('data.groupby(labels).apply(f)', - setup, start_date=datetime(2011, 12, 15)) - -#---------------------------------------------------------------------- -# First / last functions - -setup = common_setup + """ -labels = np.arange(10000).repeat(10) -data = Series(randn(len(labels))) -data[::3] = np.nan -data[1::3] = np.nan -data2 = Series(randn(len(labels)),dtype='float32') -data2[::3] = np.nan -data2[1::3] = np.nan -labels = labels.take(np.random.permutation(len(labels))) -""" - -groupby_first_float64 = Benchmark('data.groupby(labels).first()', setup, - start_date=datetime(2012, 5, 1)) - -groupby_first_float32 = Benchmark('data2.groupby(labels).first()', setup, - start_date=datetime(2013, 1, 1)) - -groupby_last_float64 = Benchmark('data.groupby(labels).last()', setup, - start_date=datetime(2012, 5, 1)) - -groupby_last_float32 = Benchmark('data2.groupby(labels).last()', setup, - start_date=datetime(2013, 1, 1)) - -groupby_nth_float64_none = Benchmark('data.groupby(labels).nth(0)', setup, - start_date=datetime(2012, 5, 1)) -groupby_nth_float32_none = Benchmark('data2.groupby(labels).nth(0)', setup, - start_date=datetime(2013, 1, 1)) -groupby_nth_float64_any = Benchmark('data.groupby(labels).nth(0,dropna="all")', setup, - start_date=datetime(2012, 5, 1)) -groupby_nth_float32_any = Benchmark('data2.groupby(labels).nth(0,dropna="all")', setup, - start_date=datetime(2013, 1, 1)) - -# with datetimes (GH7555) -setup = common_setup + """ -df = DataFrame({'a' : date_range('1/1/2011',periods=100000,freq='s'),'b' : range(100000)}) -""" - -groupby_first_datetimes = Benchmark('df.groupby("b").first()', setup, - start_date=datetime(2013, 5, 1)) -groupby_last_datetimes = Benchmark('df.groupby("b").last()', setup, - start_date=datetime(2013, 5, 1)) -groupby_nth_datetimes_none = Benchmark('df.groupby("b").nth(0)', setup, - start_date=datetime(2013, 5, 1)) -groupby_nth_datetimes_any = Benchmark('df.groupby("b").nth(0,dropna="all")', setup, - start_date=datetime(2013, 5, 1)) - -# with object -setup = common_setup + """ -df = DataFrame({'a' : ['foo']*100000,'b' : range(100000)}) -""" - -groupby_first_object = Benchmark('df.groupby("b").first()', setup, - start_date=datetime(2013, 5, 1)) -groupby_last_object = Benchmark('df.groupby("b").last()', setup, - start_date=datetime(2013, 5, 1)) -groupby_nth_object_none = Benchmark('df.groupby("b").nth(0)', setup, - start_date=datetime(2013, 5, 1)) -groupby_nth_object_any = Benchmark('df.groupby("b").nth(0,dropna="any")', setup, - start_date=datetime(2013, 5, 1)) - -#---------------------------------------------------------------------- -# groupby_indices replacement, chop up Series - -setup = common_setup + """ -try: - rng = date_range('1/1/2000', '12/31/2005', freq='H') - year, month, day = rng.year, rng.month, rng.day -except: - rng = date_range('1/1/2000', '12/31/2000', offset=datetools.Hour()) - year = rng.map(lambda x: x.year) - month = rng.map(lambda x: x.month) - day = rng.map(lambda x: x.day) - -ts = Series(np.random.randn(len(rng)), index=rng) -""" - -groupby_indices = Benchmark('len(ts.groupby([year, month, day]))', - setup, start_date=datetime(2012, 1, 1)) - -#---------------------------------------------------------------------- -# median - -#---------------------------------------------------------------------- -# single key, long, integer key - -setup = common_setup + """ -data = np.random.randn(100000, 2) -labels = np.random.randint(0, 1000, size=100000) -df = DataFrame(data) -""" - -groupby_frame_median = \ - Benchmark('df.groupby(labels).median()', setup, - start_date=datetime(2011, 8, 1), logy=True) - - -setup = common_setup + """ -data = np.random.randn(1000000, 2) -labels = np.random.randint(0, 1000, size=1000000) -df = DataFrame(data) -""" - -groupby_simple_compress_timing = \ - Benchmark('df.groupby(labels).mean()', setup, - start_date=datetime(2011, 8, 1)) - - -#---------------------------------------------------------------------- -# DataFrame Apply overhead - -setup = common_setup + """ -N = 10000 -labels = np.random.randint(0, 2000, size=N) -labels2 = np.random.randint(0, 3, size=N) -df = DataFrame({'key': labels, -'key2': labels2, -'value1': randn(N), -'value2': ['foo', 'bar', 'baz', 'qux'] * (N / 4)}) -def f(g): - return 1 -""" - -groupby_frame_apply_overhead = Benchmark("df.groupby('key').apply(f)", setup, - start_date=datetime(2011, 10, 1)) - -groupby_frame_apply = Benchmark("df.groupby(['key', 'key2']).apply(f)", setup, - start_date=datetime(2011, 10, 1)) - - -#---------------------------------------------------------------------- -# DataFrame nth - -setup = common_setup + """ -df = DataFrame(np.random.randint(1, 100, (10000, 2))) -""" - -# Not really a fair test as behaviour has changed! -groupby_frame_nth_none = Benchmark("df.groupby(0).nth(0)", setup, - start_date=datetime(2014, 3, 1)) - -groupby_series_nth_none = Benchmark("df[1].groupby(df[0]).nth(0)", setup, - start_date=datetime(2014, 3, 1)) -groupby_frame_nth_any= Benchmark("df.groupby(0).nth(0,dropna='any')", setup, - start_date=datetime(2014, 3, 1)) - -groupby_series_nth_any = Benchmark("df[1].groupby(df[0]).nth(0,dropna='any')", setup, - start_date=datetime(2014, 3, 1)) - - -#---------------------------------------------------------------------- -# Sum booleans #2692 - -setup = common_setup + """ -N = 500 -df = DataFrame({'ii':range(N),'bb':[True for x in range(N)]}) -""" - -groupby_sum_booleans = Benchmark("df.groupby('ii').sum()", setup) - - -#---------------------------------------------------------------------- -# multi-indexed group sum #9049 - -setup = common_setup + """ -N = 50 -df = DataFrame({'A': range(N) * 2, 'B': range(N*2), 'C': 1}).set_index(["A", "B"]) -""" - -groupby_sum_multiindex = Benchmark("df.groupby(level=[0, 1]).sum()", setup) - - -#---------------------------------------------------------------------- -# Transform testing - -setup = common_setup + """ -n_dates = 400 -n_securities = 250 -n_columns = 3 -share_na = 0.1 - -dates = date_range('1997-12-31', periods=n_dates, freq='B') -dates = Index(map(lambda x: x.year * 10000 + x.month * 100 + x.day, dates)) - -secid_min = int('10000000', 16) -secid_max = int('F0000000', 16) -step = (secid_max - secid_min) // (n_securities - 1) -security_ids = map(lambda x: hex(x)[2:10].upper(), range(secid_min, secid_max + 1, step)) - -data_index = MultiIndex(levels=[dates.values, security_ids], - labels=[[i for i in range(n_dates) for _ in xrange(n_securities)], range(n_securities) * n_dates], - names=['date', 'security_id']) -n_data = len(data_index) - -columns = Index(['factor{}'.format(i) for i in range(1, n_columns + 1)]) - -data = DataFrame(np.random.randn(n_data, n_columns), index=data_index, columns=columns) - -step = int(n_data * share_na) -for column_index in range(n_columns): - index = column_index - while index < n_data: - data.set_value(data_index[index], columns[column_index], np.nan) - index += step - -f_fillna = lambda x: x.fillna(method='pad') -""" - -groupby_transform = Benchmark("data.groupby(level='security_id').transform(f_fillna)", setup) -groupby_transform_ufunc = Benchmark("data.groupby(level='date').transform(np.max)", setup) - -setup = common_setup + """ -np.random.seed(0) - -N = 120000 -N_TRANSITIONS = 1400 - -# generate groups -transition_points = np.random.permutation(np.arange(N))[:N_TRANSITIONS] -transition_points.sort() -transitions = np.zeros((N,), dtype=np.bool) -transitions[transition_points] = True -g = transitions.cumsum() - -df = DataFrame({ 'signal' : np.random.rand(N)}) -""" -groupby_transform_series = Benchmark("df['signal'].groupby(g).transform(np.mean)", setup) - -setup = common_setup + """ -np.random.seed(0) - -df=DataFrame( { 'id' : np.arange( 100000 ) / 3, - 'val': np.random.randn( 100000) } ) -""" - -groupby_transform_series2 = Benchmark("df.groupby('id')['val'].transform(np.mean)", setup) - -setup = common_setup + ''' -np.random.seed(2718281) -n = 20000 -df = DataFrame(np.random.randint(1, n, (n, 3)), - columns=['jim', 'joe', 'jolie']) -''' - -stmt = "df.groupby(['jim', 'joe'])['jolie'].transform('max')"; -groupby_transform_multi_key1 = Benchmark(stmt, setup) -groupby_transform_multi_key2 = Benchmark(stmt, setup + "df['jim'] = df['joe']") - -setup = common_setup + ''' -np.random.seed(2718281) -n = 200000 -df = DataFrame(np.random.randint(1, n / 10, (n, 3)), - columns=['jim', 'joe', 'jolie']) -''' -groupby_transform_multi_key3 = Benchmark(stmt, setup) -groupby_transform_multi_key4 = Benchmark(stmt, setup + "df['jim'] = df['joe']") - -setup = common_setup + ''' -np.random.seed(27182) -n = 100000 -df = DataFrame(np.random.randint(1, n / 100, (n, 3)), - columns=['jim', 'joe', 'jolie']) -''' - -groupby_agg_builtins1 = Benchmark("df.groupby('jim').agg([sum, min, max])", setup) -groupby_agg_builtins2 = Benchmark("df.groupby(['jim', 'joe']).agg([sum, min, max])", setup) - - -setup = common_setup + ''' -arr = np.random.randint(- 1 << 12, 1 << 12, (1 << 17, 5)) -i = np.random.choice(len(arr), len(arr) * 5) -arr = np.vstack((arr, arr[i])) # add sume duplicate rows - -i = np.random.permutation(len(arr)) -arr = arr[i] # shuffle rows - -df = DataFrame(arr, columns=list('abcde')) -df['jim'], df['joe'] = np.random.randn(2, len(df)) * 10 -''' - -groupby_int64_overflow = Benchmark("df.groupby(list('abcde')).max()", setup, - name='groupby_int64_overflow') - - -setup = common_setup + ''' -from itertools import product -from string import ascii_letters, digits - -n = 5 * 7 * 11 * (1 << 9) -alpha = list(map(''.join, product(ascii_letters + digits, repeat=4))) -f = lambda k: np.repeat(np.random.choice(alpha, n // k), k) - -df = DataFrame({'a': f(11), 'b': f(7), 'c': f(5), 'd': f(1)}) -df['joe'] = (np.random.randn(len(df)) * 10).round(3) - -i = np.random.permutation(len(df)) -df = df.iloc[i].reset_index(drop=True).copy() -''' - -groupby_multi_index = Benchmark("df.groupby(list('abcd')).max()", setup, - name='groupby_multi_index') - -#---------------------------------------------------------------------- -# groupby with a variable value for ngroups - - -ngroups_list = [100, 10000] -no_arg_func_list = [ - 'all', - 'any', - 'count', - 'cumcount', - 'cummax', - 'cummin', - 'cumprod', - 'cumsum', - 'describe', - 'diff', - 'first', - 'head', - 'last', - 'mad', - 'max', - 'mean', - 'median', - 'min', - 'nunique', - 'pct_change', - 'prod', - 'rank', - 'sem', - 'size', - 'skew', - 'std', - 'sum', - 'tail', - 'unique', - 'var', - 'value_counts', -] - - -_stmt_template = "df.groupby('value')['timestamp'].%s" -_setup_template = common_setup + """ -np.random.seed(1234) -ngroups = %s -size = ngroups * 2 -rng = np.arange(ngroups) -df = DataFrame(dict( - timestamp=rng.take(np.random.randint(0, ngroups, size=size)), - value=np.random.randint(0, size, size=size) -)) -""" -START_DATE = datetime(2011, 7, 1) - - -def make_large_ngroups_bmark(ngroups, func_name, func_args=''): - bmark_name = 'groupby_ngroups_%s_%s' % (ngroups, func_name) - stmt = _stmt_template % ('%s(%s)' % (func_name, func_args)) - setup = _setup_template % ngroups - bmark = Benchmark(stmt, setup, start_date=START_DATE) - # MUST set name - bmark.name = bmark_name - return bmark - - -def inject_bmark_into_globals(bmark): - if not bmark.name: - raise AssertionError('benchmark must have a name') - globals()[bmark.name] = bmark - - -for ngroups in ngroups_list: - for func_name in no_arg_func_list: - bmark = make_large_ngroups_bmark(ngroups, func_name) - inject_bmark_into_globals(bmark) - -# avoid bmark to be collected as Benchmark object -del bmark diff --git a/vb_suite/hdfstore_bench.py b/vb_suite/hdfstore_bench.py deleted file mode 100644 index 393fd4cc77e66..0000000000000 --- a/vb_suite/hdfstore_bench.py +++ /dev/null @@ -1,278 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -start_date = datetime(2012, 7, 1) - -common_setup = """from .pandas_vb_common import * -import os - -f = '__test__.h5' -def remove(f): - try: - os.remove(f) - except: - pass - -""" - -#---------------------------------------------------------------------- -# get from a store - -setup1 = common_setup + """ -index = tm.makeStringIndex(25000) -df = DataFrame({'float1' : randn(25000), - 'float2' : randn(25000)}, - index=index) -remove(f) -store = HDFStore(f) -store.put('df1',df) -""" - -read_store = Benchmark("store.get('df1')", setup1, cleanup="store.close()", - start_date=start_date) - - -#---------------------------------------------------------------------- -# write to a store - -setup2 = common_setup + """ -index = tm.makeStringIndex(25000) -df = DataFrame({'float1' : randn(25000), - 'float2' : randn(25000)}, - index=index) -remove(f) -store = HDFStore(f) -""" - -write_store = Benchmark( - "store.put('df2',df)", setup2, cleanup="store.close()", - start_date=start_date) - -#---------------------------------------------------------------------- -# get from a store (mixed) - -setup3 = common_setup + """ -index = tm.makeStringIndex(25000) -df = DataFrame({'float1' : randn(25000), - 'float2' : randn(25000), - 'string1' : ['foo'] * 25000, - 'bool1' : [True] * 25000, - 'int1' : np.random.randint(0, 250000, size=25000)}, - index=index) -remove(f) -store = HDFStore(f) -store.put('df3',df) -""" - -read_store_mixed = Benchmark( - "store.get('df3')", setup3, cleanup="store.close()", - start_date=start_date) - - -#---------------------------------------------------------------------- -# write to a store (mixed) - -setup4 = common_setup + """ -index = tm.makeStringIndex(25000) -df = DataFrame({'float1' : randn(25000), - 'float2' : randn(25000), - 'string1' : ['foo'] * 25000, - 'bool1' : [True] * 25000, - 'int1' : np.random.randint(0, 250000, size=25000)}, - index=index) -remove(f) -store = HDFStore(f) -""" - -write_store_mixed = Benchmark( - "store.put('df4',df)", setup4, cleanup="store.close()", - start_date=start_date) - -#---------------------------------------------------------------------- -# get from a table (mixed) - -setup5 = common_setup + """ -N=10000 -index = tm.makeStringIndex(N) -df = DataFrame({'float1' : randn(N), - 'float2' : randn(N), - 'string1' : ['foo'] * N, - 'bool1' : [True] * N, - 'int1' : np.random.randint(0, N, size=N)}, - index=index) - -remove(f) -store = HDFStore(f) -store.append('df5',df) -""" - -read_store_table_mixed = Benchmark( - "store.select('df5')", setup5, cleanup="store.close()", - start_date=start_date) - - -#---------------------------------------------------------------------- -# write to a table (mixed) - -setup6 = common_setup + """ -index = tm.makeStringIndex(25000) -df = DataFrame({'float1' : randn(25000), - 'float2' : randn(25000), - 'string1' : ['foo'] * 25000, - 'bool1' : [True] * 25000, - 'int1' : np.random.randint(0, 25000, size=25000)}, - index=index) -remove(f) -store = HDFStore(f) -""" - -write_store_table_mixed = Benchmark( - "store.append('df6',df)", setup6, cleanup="store.close()", - start_date=start_date) - -#---------------------------------------------------------------------- -# select from a table - -setup7 = common_setup + """ -index = tm.makeStringIndex(25000) -df = DataFrame({'float1' : randn(25000), - 'float2' : randn(25000) }, - index=index) - -remove(f) -store = HDFStore(f) -store.append('df7',df) -""" - -read_store_table = Benchmark( - "store.select('df7')", setup7, cleanup="store.close()", - start_date=start_date) - - -#---------------------------------------------------------------------- -# write to a table - -setup8 = common_setup + """ -index = tm.makeStringIndex(25000) -df = DataFrame({'float1' : randn(25000), - 'float2' : randn(25000) }, - index=index) -remove(f) -store = HDFStore(f) -""" - -write_store_table = Benchmark( - "store.append('df8',df)", setup8, cleanup="store.close()", - start_date=start_date) - -#---------------------------------------------------------------------- -# get from a table (wide) - -setup9 = common_setup + """ -df = DataFrame(np.random.randn(25000,100)) - -remove(f) -store = HDFStore(f) -store.append('df9',df) -""" - -read_store_table_wide = Benchmark( - "store.select('df9')", setup9, cleanup="store.close()", - start_date=start_date) - - -#---------------------------------------------------------------------- -# write to a table (wide) - -setup10 = common_setup + """ -df = DataFrame(np.random.randn(25000,100)) - -remove(f) -store = HDFStore(f) -""" - -write_store_table_wide = Benchmark( - "store.append('df10',df)", setup10, cleanup="store.close()", - start_date=start_date) - -#---------------------------------------------------------------------- -# get from a table (wide) - -setup11 = common_setup + """ -index = date_range('1/1/2000', periods = 25000) -df = DataFrame(np.random.randn(25000,100), index = index) - -remove(f) -store = HDFStore(f) -store.append('df11',df) -""" - -query_store_table_wide = Benchmark( - "store.select('df11', [ ('index', '>', df.index[10000]), ('index', '<', df.index[15000]) ])", setup11, cleanup="store.close()", - start_date=start_date) - - -#---------------------------------------------------------------------- -# query from a table - -setup12 = common_setup + """ -index = date_range('1/1/2000', periods = 25000) -df = DataFrame({'float1' : randn(25000), - 'float2' : randn(25000) }, - index=index) - -remove(f) -store = HDFStore(f) -store.append('df12',df) -""" - -query_store_table = Benchmark( - "store.select('df12', [ ('index', '>', df.index[10000]), ('index', '<', df.index[15000]) ])", setup12, cleanup="store.close()", - start_date=start_date) - -#---------------------------------------------------------------------- -# select from a panel table - -setup13 = common_setup + """ -p = Panel(randn(20, 1000, 25), items= [ 'Item%03d' % i for i in range(20) ], - major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%03d' % i for i in range(25) ]) - -remove(f) -store = HDFStore(f) -store.append('p1',p) -""" - -read_store_table_panel = Benchmark( - "store.select('p1')", setup13, cleanup="store.close()", - start_date=start_date) - - -#---------------------------------------------------------------------- -# write to a panel table - -setup14 = common_setup + """ -p = Panel(randn(20, 1000, 25), items= [ 'Item%03d' % i for i in range(20) ], - major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%03d' % i for i in range(25) ]) - -remove(f) -store = HDFStore(f) -""" - -write_store_table_panel = Benchmark( - "store.append('p2',p)", setup14, cleanup="store.close()", - start_date=start_date) - -#---------------------------------------------------------------------- -# write to a table (data_columns) - -setup15 = common_setup + """ -df = DataFrame(np.random.randn(10000,10),columns = [ 'C%03d' % i for i in range(10) ]) - -remove(f) -store = HDFStore(f) -""" - -write_store_table_dc = Benchmark( - "store.append('df15',df,data_columns=True)", setup15, cleanup="store.close()", - start_date=start_date) - diff --git a/vb_suite/index_object.py b/vb_suite/index_object.py deleted file mode 100644 index 2ab2bc15f3853..0000000000000 --- a/vb_suite/index_object.py +++ /dev/null @@ -1,173 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -SECTION = "Index / MultiIndex objects" - - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# intersection, union - -setup = common_setup + """ -rng = DatetimeIndex(start='1/1/2000', periods=10000, freq=datetools.Minute()) -if rng.dtype == object: - rng = rng.view(Index) -else: - rng = rng.asobject -rng2 = rng[:-1] -""" - -index_datetime_intersection = Benchmark("rng.intersection(rng2)", setup) -index_datetime_union = Benchmark("rng.union(rng2)", setup) - -setup = common_setup + """ -rng = date_range('1/1/2000', periods=10000, freq='T') -rng2 = rng[:-1] -""" - -datetime_index_intersection = Benchmark("rng.intersection(rng2)", setup, - start_date=datetime(2013, 9, 27)) -datetime_index_union = Benchmark("rng.union(rng2)", setup, - start_date=datetime(2013, 9, 27)) - -# integers -setup = common_setup + """ -N = 1000000 -options = np.arange(N) - -left = Index(options.take(np.random.permutation(N)[:N // 2])) -right = Index(options.take(np.random.permutation(N)[:N // 2])) -""" - -index_int64_union = Benchmark('left.union(right)', setup, - start_date=datetime(2011, 1, 1)) - -index_int64_intersection = Benchmark('left.intersection(right)', setup, - start_date=datetime(2011, 1, 1)) - -#---------------------------------------------------------------------- -# string index slicing -setup = common_setup + """ -idx = tm.makeStringIndex(1000000) - -mask = np.arange(1000000) % 3 == 0 -series_mask = Series(mask) -""" -index_str_slice_indexer_basic = Benchmark('idx[:-1]', setup) -index_str_slice_indexer_even = Benchmark('idx[::2]', setup) -index_str_boolean_indexer = Benchmark('idx[mask]', setup) -index_str_boolean_series_indexer = Benchmark('idx[series_mask]', setup) - -#---------------------------------------------------------------------- -# float64 index -#---------------------------------------------------------------------- -# construction -setup = common_setup + """ -baseidx = np.arange(1e6) -""" - -index_float64_construct = Benchmark('Index(baseidx)', setup, - name='index_float64_construct', - start_date=datetime(2014, 4, 13)) - -setup = common_setup + """ -idx = tm.makeFloatIndex(1000000) - -mask = np.arange(idx.size) % 3 == 0 -series_mask = Series(mask) -""" -#---------------------------------------------------------------------- -# getting -index_float64_get = Benchmark('idx[1]', setup, name='index_float64_get', - start_date=datetime(2014, 4, 13)) - - -#---------------------------------------------------------------------- -# slicing -index_float64_slice_indexer_basic = Benchmark('idx[:-1]', setup, - name='index_float64_slice_indexer_basic', - start_date=datetime(2014, 4, 13)) -index_float64_slice_indexer_even = Benchmark('idx[::2]', setup, - name='index_float64_slice_indexer_even', - start_date=datetime(2014, 4, 13)) -index_float64_boolean_indexer = Benchmark('idx[mask]', setup, - name='index_float64_boolean_indexer', - start_date=datetime(2014, 4, 13)) -index_float64_boolean_series_indexer = Benchmark('idx[series_mask]', setup, - name='index_float64_boolean_series_indexer', - start_date=datetime(2014, 4, 13)) - -#---------------------------------------------------------------------- -# arith ops -index_float64_mul = Benchmark('idx * 2', setup, name='index_float64_mul', - start_date=datetime(2014, 4, 13)) -index_float64_div = Benchmark('idx / 2', setup, name='index_float64_div', - start_date=datetime(2014, 4, 13)) - - -# Constructing MultiIndex from cartesian product of iterables -# - -setup = common_setup + """ -iterables = [tm.makeStringIndex(10000), range(20)] -""" - -multiindex_from_product = Benchmark('MultiIndex.from_product(iterables)', - setup, name='multiindex_from_product', - start_date=datetime(2014, 6, 30)) - -#---------------------------------------------------------------------- -# MultiIndex with DatetimeIndex level - -setup = common_setup + """ -level1 = range(1000) -level2 = date_range(start='1/1/2012', periods=100) -mi = MultiIndex.from_product([level1, level2]) -""" - -multiindex_with_datetime_level_full = \ - Benchmark("mi.copy().values", setup, - name='multiindex_with_datetime_level_full', - start_date=datetime(2014, 10, 11)) - - -multiindex_with_datetime_level_sliced = \ - Benchmark("mi[:10].values", setup, - name='multiindex_with_datetime_level_sliced', - start_date=datetime(2014, 10, 11)) - -# multi-index duplicated -setup = common_setup + """ -n, k = 200, 5000 -levels = [np.arange(n), tm.makeStringIndex(n).values, 1000 + np.arange(n)] -labels = [np.random.choice(n, k * n) for lev in levels] -mi = MultiIndex(levels=levels, labels=labels) -""" - -multiindex_duplicated = Benchmark('mi.duplicated()', setup, - name='multiindex_duplicated') - -#---------------------------------------------------------------------- -# repr - -setup = common_setup + """ -dr = pd.date_range('20000101', freq='D', periods=100000) -""" - -datetime_index_repr = \ - Benchmark("dr._is_dates_only", setup, - start_date=datetime(2012, 1, 11)) - -setup = common_setup + """ -n = 3 * 5 * 7 * 11 * (1 << 10) -low, high = - 1 << 12, 1 << 12 -f = lambda k: np.repeat(np.random.randint(low, high, n // k), k) - -i = np.random.permutation(n) -mi = MultiIndex.from_arrays([f(11), f(7), f(5), f(3), f(1)])[i] -""" - -multiindex_sortlevel_int64 = Benchmark('mi.sortlevel()', setup, - name='multiindex_sortlevel_int64') diff --git a/vb_suite/indexing.py b/vb_suite/indexing.py deleted file mode 100644 index ff634bf2a8fc7..0000000000000 --- a/vb_suite/indexing.py +++ /dev/null @@ -1,292 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -SECTION = 'Indexing and scalar value access' - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# Series.__getitem__, get_value, __getitem__(slice) - -setup = common_setup + """ -tm.N = 1000 -ts = tm.makeTimeSeries() -dt = ts.index[500] -""" -statement = "ts[dt]" -bm_getitem = Benchmark(statement, setup, ncalls=100000, - name='time_series_getitem_scalar') - -setup = common_setup + """ -index = tm.makeStringIndex(1000) -s = Series(np.random.rand(1000), index=index) -idx = index[100] -""" -statement = "s.get_value(idx)" -bm_get_value = Benchmark(statement, setup, - name='series_get_value', - start_date=datetime(2011, 11, 12)) - - -setup = common_setup + """ -index = tm.makeStringIndex(1000000) -s = Series(np.random.rand(1000000), index=index) -""" -series_getitem_pos_slice = Benchmark("s[:800000]", setup, - name="series_getitem_pos_slice") - - -setup = common_setup + """ -index = tm.makeStringIndex(1000000) -s = Series(np.random.rand(1000000), index=index) -lbl = s.index[800000] -""" -series_getitem_label_slice = Benchmark("s[:lbl]", setup, - name="series_getitem_label_slice") - - -#---------------------------------------------------------------------- -# DataFrame __getitem__ - -setup = common_setup + """ -index = tm.makeStringIndex(1000) -columns = tm.makeStringIndex(30) -df = DataFrame(np.random.rand(1000, 30), index=index, - columns=columns) -idx = index[100] -col = columns[10] -""" -statement = "df[col][idx]" -bm_df_getitem = Benchmark(statement, setup, - name='dataframe_getitem_scalar') - -setup = common_setup + """ -try: - klass = DataMatrix -except: - klass = DataFrame - -index = tm.makeStringIndex(1000) -columns = tm.makeStringIndex(30) -df = klass(np.random.rand(1000, 30), index=index, columns=columns) -idx = index[100] -col = columns[10] -""" -statement = "df[col][idx]" -bm_df_getitem2 = Benchmark(statement, setup, - name='datamatrix_getitem_scalar') - - -#---------------------------------------------------------------------- -# ix get scalar - -setup = common_setup + """ -index = tm.makeStringIndex(1000) -columns = tm.makeStringIndex(30) -df = DataFrame(np.random.randn(1000, 30), index=index, columns=columns) -idx = index[100] -col = columns[10] -""" - -indexing_frame_get_value_ix = Benchmark("df.ix[idx,col]", setup, - name='indexing_frame_get_value_ix', - start_date=datetime(2011, 11, 12)) - -indexing_frame_get_value = Benchmark("df.get_value(idx,col)", setup, - name='indexing_frame_get_value', - start_date=datetime(2011, 11, 12)) - -setup = common_setup + """ -mi = MultiIndex.from_tuples([(x,y) for x in range(1000) for y in range(1000)]) -s = Series(np.random.randn(1000000), index=mi) -""" - -series_xs_mi_ix = Benchmark("s.ix[999]", setup, - name='series_xs_mi_ix', - start_date=datetime(2013, 1, 1)) - -setup = common_setup + """ -mi = MultiIndex.from_tuples([(x,y) for x in range(1000) for y in range(1000)]) -s = Series(np.random.randn(1000000), index=mi) -df = DataFrame(s) -""" - -frame_xs_mi_ix = Benchmark("df.ix[999]", setup, - name='frame_xs_mi_ix', - start_date=datetime(2013, 1, 1)) - -#---------------------------------------------------------------------- -# Boolean DataFrame row selection - -setup = common_setup + """ -df = DataFrame(np.random.randn(10000, 4), columns=['A', 'B', 'C', 'D']) -indexer = df['B'] > 0 -obj_indexer = indexer.astype('O') -""" -indexing_dataframe_boolean_rows = \ - Benchmark("df[indexer]", setup, name='indexing_dataframe_boolean_rows') - -indexing_dataframe_boolean_rows_object = \ - Benchmark("df[obj_indexer]", setup, - name='indexing_dataframe_boolean_rows_object') - -setup = common_setup + """ -df = DataFrame(np.random.randn(50000, 100)) -df2 = DataFrame(np.random.randn(50000, 100)) -""" -indexing_dataframe_boolean = \ - Benchmark("df > df2", setup, name='indexing_dataframe_boolean', - start_date=datetime(2012, 1, 1)) - -setup = common_setup + """ -try: - import pandas.core.computation.expressions as expr -except: - expr = None - -if expr is None: - raise NotImplementedError -df = DataFrame(np.random.randn(50000, 100)) -df2 = DataFrame(np.random.randn(50000, 100)) -expr.set_numexpr_threads(1) -""" - -indexing_dataframe_boolean_st = \ - Benchmark("df > df2", setup, name='indexing_dataframe_boolean_st',cleanup="expr.set_numexpr_threads()", - start_date=datetime(2013, 2, 26)) - - -setup = common_setup + """ -try: - import pandas.core.computation.expressions as expr -except: - expr = None - -if expr is None: - raise NotImplementedError -df = DataFrame(np.random.randn(50000, 100)) -df2 = DataFrame(np.random.randn(50000, 100)) -expr.set_use_numexpr(False) -""" - -indexing_dataframe_boolean_no_ne = \ - Benchmark("df > df2", setup, name='indexing_dataframe_boolean_no_ne',cleanup="expr.set_use_numexpr(True)", - start_date=datetime(2013, 2, 26)) -#---------------------------------------------------------------------- -# MultiIndex sortlevel - -setup = common_setup + """ -a = np.repeat(np.arange(100), 1000) -b = np.tile(np.arange(1000), 100) -midx = MultiIndex.from_arrays([a, b]) -midx = midx.take(np.random.permutation(np.arange(100000))) -""" -sort_level_zero = Benchmark("midx.sortlevel(0)", setup, - start_date=datetime(2012, 1, 1)) -sort_level_one = Benchmark("midx.sortlevel(1)", setup, - start_date=datetime(2012, 1, 1)) - -#---------------------------------------------------------------------- -# Panel subset selection - -setup = common_setup + """ -p = Panel(np.random.randn(100, 100, 100)) -inds = range(0, 100, 10) -""" - -indexing_panel_subset = Benchmark('p.ix[inds, inds, inds]', setup, - start_date=datetime(2012, 1, 1)) - -#---------------------------------------------------------------------- -# Iloc - -setup = common_setup + """ -df = DataFrame({'A' : [0.1] * 3000, 'B' : [1] * 3000}) -idx = np.array(range(30)) * 99 -df2 = DataFrame({'A' : [0.1] * 1000, 'B' : [1] * 1000}) -df2 = concat([df2, 2*df2, 3*df2]) -""" - -frame_iloc_dups = Benchmark('df2.iloc[idx]', setup, - start_date=datetime(2013, 1, 1)) - -frame_loc_dups = Benchmark('df2.loc[idx]', setup, - start_date=datetime(2013, 1, 1)) - -setup = common_setup + """ -df = DataFrame(dict( A = [ 'foo'] * 1000000)) -""" - -frame_iloc_big = Benchmark('df.iloc[:100,0]', setup, - start_date=datetime(2013, 1, 1)) - -#---------------------------------------------------------------------- -# basic tests for [], .loc[], .iloc[] and .ix[] - -setup = common_setup + """ -s = Series(np.random.rand(1000000)) -""" - -series_getitem_scalar = Benchmark("s[800000]", setup) -series_getitem_slice = Benchmark("s[:800000]", setup) -series_getitem_list_like = Benchmark("s[[800000]]", setup) -series_getitem_array = Benchmark("s[np.arange(10000)]", setup) - -series_loc_scalar = Benchmark("s.loc[800000]", setup) -series_loc_slice = Benchmark("s.loc[:800000]", setup) -series_loc_list_like = Benchmark("s.loc[[800000]]", setup) -series_loc_array = Benchmark("s.loc[np.arange(10000)]", setup) - -series_iloc_scalar = Benchmark("s.iloc[800000]", setup) -series_iloc_slice = Benchmark("s.iloc[:800000]", setup) -series_iloc_list_like = Benchmark("s.iloc[[800000]]", setup) -series_iloc_array = Benchmark("s.iloc[np.arange(10000)]", setup) - -series_ix_scalar = Benchmark("s.ix[800000]", setup) -series_ix_slice = Benchmark("s.ix[:800000]", setup) -series_ix_list_like = Benchmark("s.ix[[800000]]", setup) -series_ix_array = Benchmark("s.ix[np.arange(10000)]", setup) - - -# multi-index slicing -setup = common_setup + """ -np.random.seed(1234) -idx=pd.IndexSlice -n=100000 -mdt = pandas.DataFrame() -mdt['A'] = np.random.choice(range(10000,45000,1000), n) -mdt['B'] = np.random.choice(range(10,400), n) -mdt['C'] = np.random.choice(range(1,150), n) -mdt['D'] = np.random.choice(range(10000,45000), n) -mdt['x'] = np.random.choice(range(400), n) -mdt['y'] = np.random.choice(range(25), n) - - -test_A = 25000 -test_B = 25 -test_C = 40 -test_D = 35000 - -eps_A = 5000 -eps_B = 5 -eps_C = 5 -eps_D = 5000 -mdt2 = mdt.set_index(['A','B','C','D']).sortlevel() -""" - -multiindex_slicers = Benchmark('mdt2.loc[idx[test_A-eps_A:test_A+eps_A,test_B-eps_B:test_B+eps_B,test_C-eps_C:test_C+eps_C,test_D-eps_D:test_D+eps_D],:]', setup, - start_date=datetime(2015, 1, 1)) - -#---------------------------------------------------------------------- -# take - -setup = common_setup + """ -s = Series(np.random.rand(100000)) -ts = Series(np.random.rand(100000), - index=date_range('2011-01-01', freq='S', periods=100000)) -indexer = [True, False, True, True, False] * 20000 -""" - -series_take_intindex = Benchmark("s.take(indexer)", setup) -series_take_dtindex = Benchmark("ts.take(indexer)", setup) diff --git a/vb_suite/inference.py b/vb_suite/inference.py deleted file mode 100644 index aaa51aa5163ce..0000000000000 --- a/vb_suite/inference.py +++ /dev/null @@ -1,36 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime -import sys - -# from GH 7332 - -setup = """from .pandas_vb_common import * -import pandas as pd -N = 500000 -df_int64 = DataFrame(dict(A = np.arange(N,dtype='int64'), B = np.arange(N,dtype='int64'))) -df_int32 = DataFrame(dict(A = np.arange(N,dtype='int32'), B = np.arange(N,dtype='int32'))) -df_uint32 = DataFrame(dict(A = np.arange(N,dtype='uint32'), B = np.arange(N,dtype='uint32'))) -df_float64 = DataFrame(dict(A = np.arange(N,dtype='float64'), B = np.arange(N,dtype='float64'))) -df_float32 = DataFrame(dict(A = np.arange(N,dtype='float32'), B = np.arange(N,dtype='float32'))) -df_datetime64 = DataFrame(dict(A = pd.to_datetime(np.arange(N,dtype='int64'),unit='ms'), - B = pd.to_datetime(np.arange(N,dtype='int64'),unit='ms'))) -df_timedelta64 = DataFrame(dict(A = df_datetime64['A']-df_datetime64['B'], - B = df_datetime64['B'])) -""" - -dtype_infer_int64 = Benchmark('df_int64["A"] + df_int64["B"]', setup, - start_date=datetime(2014, 1, 1)) -dtype_infer_int32 = Benchmark('df_int32["A"] + df_int32["B"]', setup, - start_date=datetime(2014, 1, 1)) -dtype_infer_uint32 = Benchmark('df_uint32["A"] + df_uint32["B"]', setup, - start_date=datetime(2014, 1, 1)) -dtype_infer_float64 = Benchmark('df_float64["A"] + df_float64["B"]', setup, - start_date=datetime(2014, 1, 1)) -dtype_infer_float32 = Benchmark('df_float32["A"] + df_float32["B"]', setup, - start_date=datetime(2014, 1, 1)) -dtype_infer_datetime64 = Benchmark('df_datetime64["A"] - df_datetime64["B"]', setup, - start_date=datetime(2014, 1, 1)) -dtype_infer_timedelta64_1 = Benchmark('df_timedelta64["A"] + df_timedelta64["B"]', setup, - start_date=datetime(2014, 1, 1)) -dtype_infer_timedelta64_2 = Benchmark('df_timedelta64["A"] + df_timedelta64["A"]', setup, - start_date=datetime(2014, 1, 1)) diff --git a/vb_suite/io_bench.py b/vb_suite/io_bench.py deleted file mode 100644 index af5f6076515cc..0000000000000 --- a/vb_suite/io_bench.py +++ /dev/null @@ -1,150 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -from io import StringIO -""" - -#---------------------------------------------------------------------- -# read_csv - -setup1 = common_setup + """ -index = tm.makeStringIndex(10000) -df = DataFrame({'float1' : randn(10000), - 'float2' : randn(10000), - 'string1' : ['foo'] * 10000, - 'bool1' : [True] * 10000, - 'int1' : np.random.randint(0, 100000, size=10000)}, - index=index) -df.to_csv('__test__.csv') -""" - -read_csv_standard = Benchmark("read_csv('__test__.csv')", setup1, - start_date=datetime(2011, 9, 15)) - -#---------------------------------- -# skiprows - -setup1 = common_setup + """ -index = tm.makeStringIndex(20000) -df = DataFrame({'float1' : randn(20000), - 'float2' : randn(20000), - 'string1' : ['foo'] * 20000, - 'bool1' : [True] * 20000, - 'int1' : np.random.randint(0, 200000, size=20000)}, - index=index) -df.to_csv('__test__.csv') -""" - -read_csv_skiprows = Benchmark("read_csv('__test__.csv', skiprows=10000)", setup1, - start_date=datetime(2011, 9, 15)) - -#---------------------------------------------------------------------- -# write_csv - -setup2 = common_setup + """ -index = tm.makeStringIndex(10000) -df = DataFrame({'float1' : randn(10000), - 'float2' : randn(10000), - 'string1' : ['foo'] * 10000, - 'bool1' : [True] * 10000, - 'int1' : np.random.randint(0, 100000, size=10000)}, - index=index) -""" - -write_csv_standard = Benchmark("df.to_csv('__test__.csv')", setup2, - start_date=datetime(2011, 9, 15)) - -#---------------------------------- -setup = common_setup + """ -df = DataFrame(np.random.randn(3000, 30)) -""" -frame_to_csv = Benchmark("df.to_csv('__test__.csv')", setup, - start_date=datetime(2011, 1, 1)) -#---------------------------------- - -setup = common_setup + """ -df=DataFrame({'A':range(50000)}) -df['B'] = df.A + 1.0 -df['C'] = df.A + 2.0 -df['D'] = df.A + 3.0 -""" -frame_to_csv2 = Benchmark("df.to_csv('__test__.csv')", setup, - start_date=datetime(2011, 1, 1)) - -#---------------------------------- -setup = common_setup + """ -from pandas import concat, Timestamp - -def create_cols(name): - return [ "%s%03d" % (name,i) for i in range(5) ] -df_float = DataFrame(np.random.randn(5000, 5),dtype='float64',columns=create_cols('float')) -df_int = DataFrame(np.random.randn(5000, 5),dtype='int64',columns=create_cols('int')) -df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool')) -df_object = DataFrame('foo',index=df_float.index,columns=create_cols('object')) -df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=create_cols('date')) - -# add in some nans -df_float.ix[30:500,1:3] = np.nan - -df = concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1) - -""" -frame_to_csv_mixed = Benchmark("df.to_csv('__test__.csv')", setup, - start_date=datetime(2012, 6, 1)) - -#---------------------------------------------------------------------- -# parse dates, ISO8601 format - -setup = common_setup + """ -rng = date_range('1/1/2000', periods=1000) -data = '\\n'.join(rng.map(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))) -""" - -stmt = ("read_csv(StringIO(data), header=None, names=['foo'], " - " parse_dates=['foo'])") -read_parse_dates_iso8601 = Benchmark(stmt, setup, - start_date=datetime(2012, 3, 1)) - -setup = common_setup + """ -rng = date_range('1/1/2000', periods=1000) -data = DataFrame(rng, index=rng) -""" - -stmt = ("data.to_csv('__test__.csv', date_format='%Y%m%d')") - -frame_to_csv_date_formatting = Benchmark(stmt, setup, - start_date=datetime(2013, 9, 1)) - -#---------------------------------------------------------------------- -# infer datetime format - -setup = common_setup + """ -rng = date_range('1/1/2000', periods=1000) -data = '\\n'.join(rng.map(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))) -""" - -stmt = ("read_csv(StringIO(data), header=None, names=['foo'], " - " parse_dates=['foo'], infer_datetime_format=True)") - -read_csv_infer_datetime_format_iso8601 = Benchmark(stmt, setup) - -setup = common_setup + """ -rng = date_range('1/1/2000', periods=1000) -data = '\\n'.join(rng.map(lambda x: x.strftime("%Y%m%d"))) -""" - -stmt = ("read_csv(StringIO(data), header=None, names=['foo'], " - " parse_dates=['foo'], infer_datetime_format=True)") - -read_csv_infer_datetime_format_ymd = Benchmark(stmt, setup) - -setup = common_setup + """ -rng = date_range('1/1/2000', periods=1000) -data = '\\n'.join(rng.map(lambda x: x.strftime("%m/%d/%Y %H:%M:%S.%f"))) -""" - -stmt = ("read_csv(StringIO(data), header=None, names=['foo'], " - " parse_dates=['foo'], infer_datetime_format=True)") - -read_csv_infer_datetime_format_custom = Benchmark(stmt, setup) diff --git a/vb_suite/io_sql.py b/vb_suite/io_sql.py deleted file mode 100644 index ba8367e7e356b..0000000000000 --- a/vb_suite/io_sql.py +++ /dev/null @@ -1,126 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -import sqlite3 -import sqlalchemy -from sqlalchemy import create_engine - -engine = create_engine('sqlite:///:memory:') -con = sqlite3.connect(':memory:') -""" - -sdate = datetime(2014, 6, 1) - - -#------------------------------------------------------------------------------- -# to_sql - -setup = common_setup + """ -index = tm.makeStringIndex(10000) -df = DataFrame({'float1' : randn(10000), - 'float2' : randn(10000), - 'string1' : ['foo'] * 10000, - 'bool1' : [True] * 10000, - 'int1' : np.random.randint(0, 100000, size=10000)}, - index=index) -""" - -sql_write_sqlalchemy = Benchmark("df.to_sql('test1', engine, if_exists='replace')", - setup, start_date=sdate) - -sql_write_fallback = Benchmark("df.to_sql('test1', con, if_exists='replace')", - setup, start_date=sdate) - - -#------------------------------------------------------------------------------- -# read_sql - -setup = common_setup + """ -index = tm.makeStringIndex(10000) -df = DataFrame({'float1' : randn(10000), - 'float2' : randn(10000), - 'string1' : ['foo'] * 10000, - 'bool1' : [True] * 10000, - 'int1' : np.random.randint(0, 100000, size=10000)}, - index=index) -df.to_sql('test2', engine, if_exists='replace') -df.to_sql('test2', con, if_exists='replace') -""" - -sql_read_query_sqlalchemy = Benchmark("read_sql_query('SELECT * FROM test2', engine)", - setup, start_date=sdate) - -sql_read_query_fallback = Benchmark("read_sql_query('SELECT * FROM test2', con)", - setup, start_date=sdate) - -sql_read_table_sqlalchemy = Benchmark("read_sql_table('test2', engine)", - setup, start_date=sdate) - - -#------------------------------------------------------------------------------- -# type specific write - -setup = common_setup + """ -df = DataFrame({'float' : randn(10000), - 'string' : ['foo'] * 10000, - 'bool' : [True] * 10000, - 'datetime' : date_range('2000-01-01', periods=10000, freq='s')}) -df.loc[1000:3000, 'float'] = np.nan -""" - -sql_float_write_sqlalchemy = \ - Benchmark("df[['float']].to_sql('test_float', engine, if_exists='replace')", - setup, start_date=sdate) - -sql_float_write_fallback = \ - Benchmark("df[['float']].to_sql('test_float', con, if_exists='replace')", - setup, start_date=sdate) - -sql_string_write_sqlalchemy = \ - Benchmark("df[['string']].to_sql('test_string', engine, if_exists='replace')", - setup, start_date=sdate) - -sql_string_write_fallback = \ - Benchmark("df[['string']].to_sql('test_string', con, if_exists='replace')", - setup, start_date=sdate) - -sql_datetime_write_sqlalchemy = \ - Benchmark("df[['datetime']].to_sql('test_datetime', engine, if_exists='replace')", - setup, start_date=sdate) - -#sql_datetime_write_fallback = \ -# Benchmark("df[['datetime']].to_sql('test_datetime', con, if_exists='replace')", -# setup3, start_date=sdate) - -#------------------------------------------------------------------------------- -# type specific read - -setup = common_setup + """ -df = DataFrame({'float' : randn(10000), - 'datetime' : date_range('2000-01-01', periods=10000, freq='s')}) -df['datetime_string'] = df['datetime'].map(str) - -df.to_sql('test_type', engine, if_exists='replace') -df[['float', 'datetime_string']].to_sql('test_type', con, if_exists='replace') -""" - -sql_float_read_query_sqlalchemy = \ - Benchmark("read_sql_query('SELECT float FROM test_type', engine)", - setup, start_date=sdate) - -sql_float_read_table_sqlalchemy = \ - Benchmark("read_sql_table('test_type', engine, columns=['float'])", - setup, start_date=sdate) - -sql_float_read_query_fallback = \ - Benchmark("read_sql_query('SELECT float FROM test_type', con)", - setup, start_date=sdate) - -sql_datetime_read_as_native_sqlalchemy = \ - Benchmark("read_sql_table('test_type', engine, columns=['datetime'])", - setup, start_date=sdate) - -sql_datetime_read_and_parse_sqlalchemy = \ - Benchmark("read_sql_table('test_type', engine, columns=['datetime_string'], parse_dates=['datetime_string'])", - setup, start_date=sdate) diff --git a/vb_suite/join_merge.py b/vb_suite/join_merge.py deleted file mode 100644 index 238a129552e90..0000000000000 --- a/vb_suite/join_merge.py +++ /dev/null @@ -1,270 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -setup = common_setup + """ -level1 = tm.makeStringIndex(10).values -level2 = tm.makeStringIndex(1000).values -label1 = np.arange(10).repeat(1000) -label2 = np.tile(np.arange(1000), 10) - -key1 = np.tile(level1.take(label1), 10) -key2 = np.tile(level2.take(label2), 10) - -shuf = np.arange(100000) -random.shuffle(shuf) -try: - index2 = MultiIndex(levels=[level1, level2], labels=[label1, label2]) - index3 = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], - labels=[np.arange(10).repeat(10000), - np.tile(np.arange(100).repeat(100), 10), - np.tile(np.tile(np.arange(100), 100), 10)]) - df_multi = DataFrame(np.random.randn(len(index2), 4), index=index2, - columns=['A', 'B', 'C', 'D']) -except: # pre-MultiIndex - pass - -try: - DataFrame = DataMatrix -except: - pass - -df = pd.DataFrame({'data1' : np.random.randn(100000), - 'data2' : np.random.randn(100000), - 'key1' : key1, - 'key2' : key2}) - - -df_key1 = pd.DataFrame(np.random.randn(len(level1), 4), index=level1, - columns=['A', 'B', 'C', 'D']) -df_key2 = pd.DataFrame(np.random.randn(len(level2), 4), index=level2, - columns=['A', 'B', 'C', 'D']) - -df_shuf = df.reindex(df.index[shuf]) -""" - -#---------------------------------------------------------------------- -# DataFrame joins on key - -join_dataframe_index_single_key_small = \ - Benchmark("df.join(df_key1, on='key1')", setup, - name='join_dataframe_index_single_key_small') - -join_dataframe_index_single_key_bigger = \ - Benchmark("df.join(df_key2, on='key2')", setup, - name='join_dataframe_index_single_key_bigger') - -join_dataframe_index_single_key_bigger_sort = \ - Benchmark("df_shuf.join(df_key2, on='key2', sort=True)", setup, - name='join_dataframe_index_single_key_bigger_sort', - start_date=datetime(2012, 2, 5)) - -join_dataframe_index_multi = \ - Benchmark("df.join(df_multi, on=['key1', 'key2'])", setup, - name='join_dataframe_index_multi', - start_date=datetime(2011, 10, 20)) - -#---------------------------------------------------------------------- -# Joins on integer keys -setup = common_setup + """ -df = pd.DataFrame({'key1': np.tile(np.arange(500).repeat(10), 2), - 'key2': np.tile(np.arange(250).repeat(10), 4), - 'value': np.random.randn(10000)}) -df2 = pd.DataFrame({'key1': np.arange(500), 'value2': randn(500)}) -df3 = df[:5000] -""" - - -join_dataframe_integer_key = Benchmark("merge(df, df2, on='key1')", setup, - start_date=datetime(2011, 10, 20)) -join_dataframe_integer_2key = Benchmark("merge(df, df3)", setup, - start_date=datetime(2011, 10, 20)) - -#---------------------------------------------------------------------- -# DataFrame joins on index - - -#---------------------------------------------------------------------- -# Merges -setup = common_setup + """ -N = 10000 - -indices = tm.makeStringIndex(N).values -indices2 = tm.makeStringIndex(N).values -key = np.tile(indices[:8000], 10) -key2 = np.tile(indices2[:8000], 10) - -left = pd.DataFrame({'key' : key, 'key2':key2, - 'value' : np.random.randn(80000)}) -right = pd.DataFrame({'key': indices[2000:], 'key2':indices2[2000:], - 'value2' : np.random.randn(8000)}) -""" - -merge_2intkey_nosort = Benchmark('merge(left, right, sort=False)', setup, - start_date=datetime(2011, 10, 20)) - -merge_2intkey_sort = Benchmark('merge(left, right, sort=True)', setup, - start_date=datetime(2011, 10, 20)) - -#---------------------------------------------------------------------- -# Appending DataFrames - -setup = common_setup + """ -df1 = pd.DataFrame(np.random.randn(10000, 4), columns=['A', 'B', 'C', 'D']) -df2 = df1.copy() -df2.index = np.arange(10000, 20000) -mdf1 = df1.copy() -mdf1['obj1'] = 'bar' -mdf1['obj2'] = 'bar' -mdf1['int1'] = 5 -try: - mdf1.consolidate(inplace=True) -except: - pass -mdf2 = mdf1.copy() -mdf2.index = df2.index -""" - -stmt = "df1.append(df2)" -append_frame_single_homogenous = \ - Benchmark(stmt, setup, name='append_frame_single_homogenous', - ncalls=500, repeat=1) - -stmt = "mdf1.append(mdf2)" -append_frame_single_mixed = Benchmark(stmt, setup, - name='append_frame_single_mixed', - ncalls=500, repeat=1) - -#---------------------------------------------------------------------- -# data alignment - -setup = common_setup + """n = 1000000 -# indices = tm.makeStringIndex(n) -def sample(values, k): - sampler = np.random.permutation(len(values)) - return values.take(sampler[:k]) -sz = 500000 -rng = np.arange(0, 10000000000000, 10000000) -stamps = np.datetime64(datetime.now()).view('i8') + rng -idx1 = np.sort(sample(stamps, sz)) -idx2 = np.sort(sample(stamps, sz)) -ts1 = Series(np.random.randn(sz), idx1) -ts2 = Series(np.random.randn(sz), idx2) -""" -stmt = "ts1 + ts2" -series_align_int64_index = \ - Benchmark(stmt, setup, - name="series_align_int64_index", - start_date=datetime(2010, 6, 1), logy=True) - -stmt = "ts1.align(ts2, join='left')" -series_align_left_monotonic = \ - Benchmark(stmt, setup, - name="series_align_left_monotonic", - start_date=datetime(2011, 12, 1), logy=True) - -#---------------------------------------------------------------------- -# Concat Series axis=1 - -setup = common_setup + """ -n = 1000 -indices = tm.makeStringIndex(1000) -s = Series(n, index=indices) -pieces = [s[i:-i] for i in range(1, 10)] -pieces = pieces * 50 -""" - -concat_series_axis1 = Benchmark('concat(pieces, axis=1)', setup, - start_date=datetime(2012, 2, 27)) - -setup = common_setup + """ -df = pd.DataFrame(randn(5, 4)) -""" - -concat_small_frames = Benchmark('concat([df] * 1000)', setup, - start_date=datetime(2012, 1, 1)) - - -#---------------------------------------------------------------------- -# Concat empty - -setup = common_setup + """ -df = pd.DataFrame(dict(A = range(10000)),index=date_range('20130101',periods=10000,freq='s')) -empty = pd.DataFrame() -""" - -concat_empty_frames1 = Benchmark('concat([df,empty])', setup, - start_date=datetime(2012, 1, 1)) -concat_empty_frames2 = Benchmark('concat([empty,df])', setup, - start_date=datetime(2012, 1, 1)) - - -#---------------------------------------------------------------------- -# Ordered merge - -setup = common_setup + """ -groups = tm.makeStringIndex(10).values - -left = pd.DataFrame({'group': groups.repeat(5000), - 'key' : np.tile(np.arange(0, 10000, 2), 10), - 'lvalue': np.random.randn(50000)}) - -right = pd.DataFrame({'key' : np.arange(10000), - 'rvalue' : np.random.randn(10000)}) - -""" - -stmt = "ordered_merge(left, right, on='key', left_by='group')" - -#---------------------------------------------------------------------- -# outer join of non-unique -# GH 6329 - -setup = common_setup + """ -date_index = date_range('01-Jan-2013', '23-Jan-2013', freq='T') -daily_dates = date_index.to_period('D').to_timestamp('S','S') -fracofday = date_index.view(np.ndarray) - daily_dates.view(np.ndarray) -fracofday = fracofday.astype('timedelta64[ns]').astype(np.float64)/864e11 -fracofday = TimeSeries(fracofday, daily_dates) -index = date_range(date_index.min().to_period('A').to_timestamp('D','S'), - date_index.max().to_period('A').to_timestamp('D','E'), - freq='D') -temp = TimeSeries(1.0, index) -""" - -join_non_unique_equal = Benchmark('fracofday * temp[fracofday.index]', setup, - start_date=datetime(2013, 1, 1)) - - -setup = common_setup + ''' -np.random.seed(2718281) -n = 50000 - -left = pd.DataFrame(np.random.randint(1, n/500, (n, 2)), - columns=['jim', 'joe']) - -right = pd.DataFrame(np.random.randint(1, n/500, (n, 2)), - columns=['jolie', 'jolia']).set_index('jolie') -''' - -left_outer_join_index = Benchmark("left.join(right, on='jim')", setup, - name='left_outer_join_index') - - -setup = common_setup + """ -low, high, n = -1 << 10, 1 << 10, 1 << 20 -left = pd.DataFrame(np.random.randint(low, high, (n, 7)), - columns=list('ABCDEFG')) -left['left'] = left.sum(axis=1) - -i = np.random.permutation(len(left)) -right = left.iloc[i].copy() -right.columns = right.columns[:-1].tolist() + ['right'] -right.index = np.arange(len(right)) -right['right'] *= -1 -""" - -i8merge = Benchmark("merge(left, right, how='outer')", setup, - name='i8merge') diff --git a/vb_suite/make.py b/vb_suite/make.py deleted file mode 100755 index 5a8a8215db9a4..0000000000000 --- a/vb_suite/make.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python - -""" -Python script for building documentation. - -To build the docs you must have all optional dependencies for statsmodels -installed. See the installation instructions for a list of these. - -Note: currently latex builds do not work because of table formats that are not -supported in the latex generation. - -Usage ------ -python make.py clean -python make.py html -""" - -import glob -import os -import shutil -import sys -import sphinx - -os.environ['PYTHONPATH'] = '..' - -SPHINX_BUILD = 'sphinxbuild' - - -def upload(): - 'push a copy to the site' - os.system('cd build/html; rsync -avz . pandas@pandas.pydata.org' - ':/usr/share/nginx/pandas/pandas-docs/vbench/ -essh') - - -def clean(): - if os.path.exists('build'): - shutil.rmtree('build') - - if os.path.exists('source/generated'): - shutil.rmtree('source/generated') - - -def html(): - check_build() - if os.system('sphinx-build -P -b html -d build/doctrees ' - 'source build/html'): - raise SystemExit("Building HTML failed.") - - -def check_build(): - build_dirs = [ - 'build', 'build/doctrees', 'build/html', - 'build/plots', 'build/_static', - 'build/_templates'] - for d in build_dirs: - try: - os.mkdir(d) - except OSError: - pass - - -def all(): - clean() - html() - - -def auto_update(): - msg = '' - try: - clean() - html() - upload() - sendmail() - except (Exception, SystemExit), inst: - msg += str(inst) + '\n' - sendmail(msg) - - -def sendmail(err_msg=None): - from_name, to_name = _get_config() - - if err_msg is None: - msgstr = 'Daily vbench uploaded successfully' - subject = "VB: daily update successful" - else: - msgstr = err_msg - subject = "VB: daily update failed" - - import smtplib - from email.MIMEText import MIMEText - msg = MIMEText(msgstr) - msg['Subject'] = subject - msg['From'] = from_name - msg['To'] = to_name - - server_str, port, login, pwd = _get_credentials() - server = smtplib.SMTP(server_str, port) - server.ehlo() - server.starttls() - server.ehlo() - - server.login(login, pwd) - try: - server.sendmail(from_name, to_name, msg.as_string()) - finally: - server.close() - - -def _get_dir(subdir=None): - import getpass - USERNAME = getpass.getuser() - if sys.platform == 'darwin': - HOME = '/Users/%s' % USERNAME - else: - HOME = '/home/%s' % USERNAME - - if subdir is None: - subdir = '/code/scripts' - conf_dir = '%s%s' % (HOME, subdir) - return conf_dir - - -def _get_credentials(): - tmp_dir = _get_dir() - cred = '%s/credentials' % tmp_dir - with open(cred, 'r') as fh: - server, port, un, domain = fh.read().split(',') - port = int(port) - login = un + '@' + domain + '.com' - - import base64 - with open('%s/cron_email_pwd' % tmp_dir, 'r') as fh: - pwd = base64.b64decode(fh.read()) - - return server, port, login, pwd - - -def _get_config(): - tmp_dir = _get_dir() - with open('%s/addresses' % tmp_dir, 'r') as fh: - from_name, to_name = fh.read().split(',') - return from_name, to_name - -funcd = { - 'html': html, - 'clean': clean, - 'upload': upload, - 'auto_update': auto_update, - 'all': all, -} - -small_docs = False - -# current_dir = os.getcwd() -# os.chdir(os.path.dirname(os.path.join(current_dir, __file__))) - -if len(sys.argv) > 1: - for arg in sys.argv[1:]: - func = funcd.get(arg) - if func is None: - raise SystemExit('Do not know how to handle %s; valid args are %s' % ( - arg, funcd.keys())) - func() -else: - small_docs = False - all() -# os.chdir(current_dir) diff --git a/vb_suite/measure_memory_consumption.py b/vb_suite/measure_memory_consumption.py deleted file mode 100755 index bb73cf5da4302..0000000000000 --- a/vb_suite/measure_memory_consumption.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from __future__ import print_function - -"""Short one-line summary - -long summary -""" - - -def main(): - import shutil - import tempfile - import warnings - - from pandas import Series - - from vbench.api import BenchmarkRunner - from suite import (REPO_PATH, BUILD, DB_PATH, PREPARE, - dependencies, benchmarks) - - from memory_profiler import memory_usage - - warnings.filterwarnings('ignore', category=FutureWarning) - - try: - TMP_DIR = tempfile.mkdtemp() - runner = BenchmarkRunner( - benchmarks, REPO_PATH, REPO_PATH, BUILD, DB_PATH, - TMP_DIR, PREPARE, always_clean=True, - # run_option='eod', start_date=START_DATE, - module_dependencies=dependencies) - results = {} - for b in runner.benchmarks: - k = b.name - try: - vs = memory_usage((b.run,)) - v = max(vs) - # print(k, v) - results[k] = v - except Exception as e: - print("Exception caught in %s\n" % k) - print(str(e)) - - s = Series(results) - s.sort() - print((s)) - - finally: - shutil.rmtree(TMP_DIR) - - -if __name__ == "__main__": - main() diff --git a/vb_suite/miscellaneous.py b/vb_suite/miscellaneous.py deleted file mode 100644 index da2c736e79ea7..0000000000000 --- a/vb_suite/miscellaneous.py +++ /dev/null @@ -1,32 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# cache_readonly - -setup = common_setup + """ -from pandas.util.decorators import cache_readonly - -class Foo: - - @cache_readonly - def prop(self): - return 5 -obj = Foo() -""" -misc_cache_readonly = Benchmark("obj.prop", setup, name="misc_cache_readonly", - ncalls=2000000) - -#---------------------------------------------------------------------- -# match - -setup = common_setup + """ -uniques = tm.makeStringIndex(1000).values -all = uniques.repeat(10) -""" - -match_strings = Benchmark("match(all, uniques)", setup, - start_date=datetime(2012, 5, 12)) diff --git a/vb_suite/packers.py b/vb_suite/packers.py deleted file mode 100644 index 69ec10822b392..0000000000000 --- a/vb_suite/packers.py +++ /dev/null @@ -1,252 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -start_date = datetime(2013, 5, 1) - -common_setup = """from .pandas_vb_common import * -import os -import pandas as pd -from pandas.core import common as com -from pandas.compat import BytesIO -from random import randrange - -f = '__test__.msg' -def remove(f): - try: - os.remove(f) - except: - pass - -N=100000 -C=5 -index = date_range('20000101',periods=N,freq='H') -df = DataFrame(dict([ ("float{0}".format(i),randn(N)) for i in range(C) ]), - index=index) - -N=100000 -C=5 -index = date_range('20000101',periods=N,freq='H') -df2 = DataFrame(dict([ ("float{0}".format(i),randn(N)) for i in range(C) ]), - index=index) -df2['object'] = ['%08x'%randrange(16**8) for _ in range(N)] -remove(f) -""" - -#---------------------------------------------------------------------- -# msgpack - -setup = common_setup + """ -df2.to_msgpack(f) -""" - -packers_read_pack = Benchmark("pd.read_msgpack(f)", setup, start_date=start_date) - -setup = common_setup + """ -""" - -packers_write_pack = Benchmark("df2.to_msgpack(f)", setup, cleanup="remove(f)", start_date=start_date) - -#---------------------------------------------------------------------- -# pickle - -setup = common_setup + """ -df2.to_pickle(f) -""" - -packers_read_pickle = Benchmark("pd.read_pickle(f)", setup, start_date=start_date) - -setup = common_setup + """ -""" - -packers_write_pickle = Benchmark("df2.to_pickle(f)", setup, cleanup="remove(f)", start_date=start_date) - -#---------------------------------------------------------------------- -# csv - -setup = common_setup + """ -df.to_csv(f) -""" - -packers_read_csv = Benchmark("pd.read_csv(f)", setup, start_date=start_date) - -setup = common_setup + """ -""" - -packers_write_csv = Benchmark("df.to_csv(f)", setup, cleanup="remove(f)", start_date=start_date) - -#---------------------------------------------------------------------- -# hdf store - -setup = common_setup + """ -df2.to_hdf(f,'df') -""" - -packers_read_hdf_store = Benchmark("pd.read_hdf(f,'df')", setup, start_date=start_date) - -setup = common_setup + """ -""" - -packers_write_hdf_store = Benchmark("df2.to_hdf(f,'df')", setup, cleanup="remove(f)", start_date=start_date) - -#---------------------------------------------------------------------- -# hdf table - -setup = common_setup + """ -df2.to_hdf(f,'df',format='table') -""" - -packers_read_hdf_table = Benchmark("pd.read_hdf(f,'df')", setup, start_date=start_date) - -setup = common_setup + """ -""" - -packers_write_hdf_table = Benchmark("df2.to_hdf(f,'df',table=True)", setup, cleanup="remove(f)", start_date=start_date) - -#---------------------------------------------------------------------- -# sql - -setup = common_setup + """ -import sqlite3 -from sqlalchemy import create_engine -engine = create_engine('sqlite:///:memory:') - -df2.to_sql('table', engine, if_exists='replace') -""" - -packers_read_sql= Benchmark("pd.read_sql_table('table', engine)", setup, start_date=start_date) - -setup = common_setup + """ -import sqlite3 -from sqlalchemy import create_engine -engine = create_engine('sqlite:///:memory:') -""" - -packers_write_sql = Benchmark("df2.to_sql('table', engine, if_exists='replace')", setup, start_date=start_date) - -#---------------------------------------------------------------------- -# json - -setup_int_index = """ -import numpy as np -df.index = np.arange(N) -""" - -setup = common_setup + """ -df.to_json(f,orient='split') -""" -packers_read_json_date_index = Benchmark("pd.read_json(f, orient='split')", setup, start_date=start_date) -setup = setup + setup_int_index -packers_read_json = Benchmark("pd.read_json(f, orient='split')", setup, start_date=start_date) - -setup = common_setup + """ -""" -packers_write_json_date_index = Benchmark("df.to_json(f,orient='split')", setup, cleanup="remove(f)", start_date=start_date) - -setup = setup + setup_int_index -packers_write_json = Benchmark("df.to_json(f,orient='split')", setup, cleanup="remove(f)", start_date=start_date) -packers_write_json_T = Benchmark("df.to_json(f,orient='columns')", setup, cleanup="remove(f)", start_date=start_date) - -setup = common_setup + """ -from numpy.random import randint -from collections import OrderedDict - -cols = [ - lambda i: ("{0}_timedelta".format(i), [pd.Timedelta('%d seconds' % randrange(1e6)) for _ in range(N)]), - lambda i: ("{0}_int".format(i), randint(1e8, size=N)), - lambda i: ("{0}_timestamp".format(i), [pd.Timestamp( 1418842918083256000 + randrange(1e9, 1e18, 200)) for _ in range(N)]) - ] -df_mixed = DataFrame(OrderedDict([cols[i % len(cols)](i) for i in range(C)]), - index=index) -""" -packers_write_json_mixed_delta_int_tstamp = Benchmark("df_mixed.to_json(f,orient='split')", setup, cleanup="remove(f)", start_date=start_date) - -setup = common_setup + """ -from numpy.random import randint -from collections import OrderedDict -cols = [ - lambda i: ("{0}_float".format(i), randn(N)), - lambda i: ("{0}_int".format(i), randint(1e8, size=N)) - ] -df_mixed = DataFrame(OrderedDict([cols[i % len(cols)](i) for i in range(C)]), - index=index) -""" -packers_write_json_mixed_float_int = Benchmark("df_mixed.to_json(f,orient='index')", setup, cleanup="remove(f)", start_date=start_date) -packers_write_json_mixed_float_int_T = Benchmark("df_mixed.to_json(f,orient='columns')", setup, cleanup="remove(f)", start_date=start_date) - -setup = common_setup + """ -from numpy.random import randint -from collections import OrderedDict -cols = [ - lambda i: ("{0}_float".format(i), randn(N)), - lambda i: ("{0}_int".format(i), randint(1e8, size=N)), - lambda i: ("{0}_str".format(i), ['%08x'%randrange(16**8) for _ in range(N)]) - ] -df_mixed = DataFrame(OrderedDict([cols[i % len(cols)](i) for i in range(C)]), - index=index) -""" -packers_write_json_mixed_float_int_str = Benchmark("df_mixed.to_json(f,orient='split')", setup, cleanup="remove(f)", start_date=start_date) - -#---------------------------------------------------------------------- -# stata - -setup = common_setup + """ -df.to_stata(f, {'index': 'tc'}) -""" -packers_read_stata = Benchmark("pd.read_stata(f)", setup, start_date=start_date) - -packers_write_stata = Benchmark("df.to_stata(f, {'index': 'tc'})", setup, cleanup="remove(f)", start_date=start_date) - -setup = common_setup + """ -df['int8_'] = [randint(np.iinfo(np.int8).min, np.iinfo(np.int8).max - 27) for _ in range(N)] -df['int16_'] = [randint(np.iinfo(np.int16).min, np.iinfo(np.int16).max - 27) for _ in range(N)] -df['int32_'] = [randint(np.iinfo(np.int32).min, np.iinfo(np.int32).max - 27) for _ in range(N)] -df['float32_'] = np.array(randn(N), dtype=np.float32) -df.to_stata(f, {'index': 'tc'}) -""" - -packers_read_stata_with_validation = Benchmark("pd.read_stata(f)", setup, start_date=start_date) - -packers_write_stata_with_validation = Benchmark("df.to_stata(f, {'index': 'tc'})", setup, cleanup="remove(f)", start_date=start_date) - -#---------------------------------------------------------------------- -# Excel - alternative writers -setup = common_setup + """ -bio = BytesIO() -""" - -excel_writer_bench = """ -bio.seek(0) -writer = pd.io.excel.ExcelWriter(bio, engine='{engine}') -df[:2000].to_excel(writer) -writer.save() -""" - -benchmark_xlsxwriter = excel_writer_bench.format(engine='xlsxwriter') - -packers_write_excel_xlsxwriter = Benchmark(benchmark_xlsxwriter, setup) - -benchmark_openpyxl = excel_writer_bench.format(engine='openpyxl') - -packers_write_excel_openpyxl = Benchmark(benchmark_openpyxl, setup) - -benchmark_xlwt = excel_writer_bench.format(engine='xlwt') - -packers_write_excel_xlwt = Benchmark(benchmark_xlwt, setup) - - -#---------------------------------------------------------------------- -# Excel - reader - -setup = common_setup + """ -bio = BytesIO() -writer = pd.io.excel.ExcelWriter(bio, engine='xlsxwriter') -df[:2000].to_excel(writer) -writer.save() -""" - -benchmark_read_excel=""" -bio.seek(0) -pd.read_excel(bio) -""" - -packers_read_excel = Benchmark(benchmark_read_excel, setup) diff --git a/vb_suite/pandas_vb_common.py b/vb_suite/pandas_vb_common.py deleted file mode 100644 index bd2e8a1c1d504..0000000000000 --- a/vb_suite/pandas_vb_common.py +++ /dev/null @@ -1,30 +0,0 @@ -from pandas import * -import pandas as pd -from datetime import timedelta -from numpy.random import randn -from numpy.random import randint -from numpy.random import permutation -import pandas.util.testing as tm -import random -import numpy as np -try: - from pandas.compat import range -except ImportError: - pass - -np.random.seed(1234) -try: - import pandas._tseries as lib -except: - import pandas._libs.lib as lib - -try: - Panel = WidePanel -except Exception: - pass - -# didn't add to namespace until later -try: - from pandas.core.index import MultiIndex -except ImportError: - pass diff --git a/vb_suite/panel_ctor.py b/vb_suite/panel_ctor.py deleted file mode 100644 index 9f497e7357a61..0000000000000 --- a/vb_suite/panel_ctor.py +++ /dev/null @@ -1,76 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# Panel.from_dict homogenization time - -START_DATE = datetime(2011, 6, 1) - -setup_same_index = common_setup + """ -# create 100 dataframes with the same index -dr = np.asarray(DatetimeIndex(start=datetime(1990,1,1), end=datetime(2012,1,1), - freq=datetools.Day(1))) -data_frames = {} -for x in range(100): - df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), - "c": [2]*len(dr)}, index=dr) - data_frames[x] = df -""" - -panel_from_dict_same_index = \ - Benchmark("Panel.from_dict(data_frames)", - setup_same_index, name='panel_from_dict_same_index', - start_date=START_DATE, repeat=1, logy=True) - -setup_equiv_indexes = common_setup + """ -data_frames = {} -for x in range(100): - dr = np.asarray(DatetimeIndex(start=datetime(1990,1,1), end=datetime(2012,1,1), - freq=datetools.Day(1))) - df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), - "c": [2]*len(dr)}, index=dr) - data_frames[x] = df -""" - -panel_from_dict_equiv_indexes = \ - Benchmark("Panel.from_dict(data_frames)", - setup_equiv_indexes, name='panel_from_dict_equiv_indexes', - start_date=START_DATE, repeat=1, logy=True) - -setup_all_different_indexes = common_setup + """ -data_frames = {} -start = datetime(1990,1,1) -end = datetime(2012,1,1) -for x in range(100): - end += timedelta(days=1) - dr = np.asarray(date_range(start, end)) - df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), - "c": [2]*len(dr)}, index=dr) - data_frames[x] = df -""" -panel_from_dict_all_different_indexes = \ - Benchmark("Panel.from_dict(data_frames)", - setup_all_different_indexes, - name='panel_from_dict_all_different_indexes', - start_date=START_DATE, repeat=1, logy=True) - -setup_two_different_indexes = common_setup + """ -data_frames = {} -start = datetime(1990,1,1) -end = datetime(2012,1,1) -for x in range(100): - if x == 50: - end += timedelta(days=1) - dr = np.asarray(date_range(start, end)) - df = DataFrame({"a": [0]*len(dr), "b": [1]*len(dr), - "c": [2]*len(dr)}, index=dr) - data_frames[x] = df -""" -panel_from_dict_two_different_indexes = \ - Benchmark("Panel.from_dict(data_frames)", - setup_two_different_indexes, - name='panel_from_dict_two_different_indexes', - start_date=START_DATE, repeat=1, logy=True) diff --git a/vb_suite/panel_methods.py b/vb_suite/panel_methods.py deleted file mode 100644 index 28586422a66e3..0000000000000 --- a/vb_suite/panel_methods.py +++ /dev/null @@ -1,28 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# shift - -setup = common_setup + """ -index = date_range(start="2000", freq="D", periods=1000) -panel = Panel(np.random.randn(100, len(index), 1000)) -""" - -panel_shift = Benchmark('panel.shift(1)', setup, - start_date=datetime(2012, 1, 12)) - -panel_shift_minor = Benchmark('panel.shift(1, axis="minor")', setup, - start_date=datetime(2012, 1, 12)) - -panel_pct_change_major = Benchmark('panel.pct_change(1, axis="major")', setup, - start_date=datetime(2014, 4, 19)) - -panel_pct_change_minor = Benchmark('panel.pct_change(1, axis="minor")', setup, - start_date=datetime(2014, 4, 19)) - -panel_pct_change_items = Benchmark('panel.pct_change(1, axis="items")', setup, - start_date=datetime(2014, 4, 19)) diff --git a/vb_suite/parser_vb.py b/vb_suite/parser_vb.py deleted file mode 100644 index bb9ccbdb5e854..0000000000000 --- a/vb_suite/parser_vb.py +++ /dev/null @@ -1,112 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -from pandas import read_csv, read_table -""" - -setup = common_setup + """ -import os -N = 10000 -K = 8 -df = DataFrame(np.random.randn(N, K) * np.random.randint(100, 10000, (N, K))) -df.to_csv('test.csv', sep='|') -""" - -read_csv_vb = Benchmark("read_csv('test.csv', sep='|')", setup, - cleanup="os.remove('test.csv')", - start_date=datetime(2012, 5, 7)) - - -setup = common_setup + """ -import os -N = 10000 -K = 8 -format = lambda x: '{:,}'.format(x) -df = DataFrame(np.random.randn(N, K) * np.random.randint(100, 10000, (N, K))) -df = df.applymap(format) -df.to_csv('test.csv', sep='|') -""" - -read_csv_thou_vb = Benchmark("read_csv('test.csv', sep='|', thousands=',')", - setup, - cleanup="os.remove('test.csv')", - start_date=datetime(2012, 5, 7)) - -setup = common_setup + """ -data = ['A,B,C'] -data = data + ['1,2,3 # comment'] * 100000 -data = '\\n'.join(data) -""" - -stmt = "read_csv(StringIO(data), comment='#')" -read_csv_comment2 = Benchmark(stmt, setup, - start_date=datetime(2011, 11, 1)) - -setup = common_setup + """ -try: - from cStringIO import StringIO -except ImportError: - from io import StringIO - -import os -N = 10000 -K = 8 -data = '''\ -KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 -KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 -KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 -KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 -KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 -''' -data = data * 200 -""" -cmd = ("read_table(StringIO(data), sep=',', header=None, " - "parse_dates=[[1,2], [1,3]])") -sdate = datetime(2012, 5, 7) -read_table_multiple_date = Benchmark(cmd, setup, start_date=sdate) - -setup = common_setup + """ -try: - from cStringIO import StringIO -except ImportError: - from io import StringIO - -import os -N = 10000 -K = 8 -data = '''\ -KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000 -KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000 -KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000 -KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000 -KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000 -''' -data = data * 200 -""" -cmd = "read_table(StringIO(data), sep=',', header=None, parse_dates=[1])" -sdate = datetime(2012, 5, 7) -read_table_multiple_date_baseline = Benchmark(cmd, setup, start_date=sdate) - -setup = common_setup + """ -try: - from cStringIO import StringIO -except ImportError: - from io import StringIO - -data = '''\ -0.1213700904466425978256438611,0.0525708283766902484401839501,0.4174092731488769913994474336 -0.4096341697147408700274695547,0.1587830198973579909349496119,0.1292545832485494372576795285 -0.8323255650024565799327547210,0.9694902427379478160318626578,0.6295047811546814475747169126 -0.4679375305798131323697930383,0.2963942381834381301075609371,0.5268936082160610157032465394 -0.6685382761849776311890991564,0.6721207066140679753374342908,0.6519975277021627935170045020 -''' -data = data * 200 -""" -cmd = "read_csv(StringIO(data), sep=',', header=None, float_precision=None)" -sdate = datetime(2014, 8, 20) -read_csv_default_converter = Benchmark(cmd, setup, start_date=sdate) -cmd = "read_csv(StringIO(data), sep=',', header=None, float_precision='high')" -read_csv_precise_converter = Benchmark(cmd, setup, start_date=sdate) -cmd = "read_csv(StringIO(data), sep=',', header=None, float_precision='round_trip')" -read_csv_roundtrip_converter = Benchmark(cmd, setup, start_date=sdate) diff --git a/vb_suite/perf_HEAD.py b/vb_suite/perf_HEAD.py deleted file mode 100755 index 143d943b9eadf..0000000000000 --- a/vb_suite/perf_HEAD.py +++ /dev/null @@ -1,243 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from __future__ import print_function - -"""Run all the vbenches in `suite`, and post the results as a json blob to gist - -""" - -import urllib2 -from contextlib import closing -from urllib2 import urlopen -import json - -import pandas as pd - -WEB_TIMEOUT = 10 - - -def get_travis_data(): - """figure out what worker we're running on, and the number of jobs it's running - """ - import os - jobid = os.environ.get("TRAVIS_JOB_ID") - if not jobid: - return None, None - - with closing(urlopen("https://api.travis-ci.org/workers/")) as resp: - workers = json.loads(resp.read()) - - host = njobs = None - for item in workers: - host = item.get("host") - id = ((item.get("payload") or {}).get("job") or {}).get("id") - if id and str(id) == str(jobid): - break - if host: - njobs = len( - [x for x in workers if host in x['host'] and x['payload']]) - - return host, njobs - - -def get_utcdatetime(): - try: - from datetime import datetime - return datetime.utcnow().isoformat(" ") - except: - pass - - -def dump_as_gist(data, desc="The Commit", njobs=None): - host, njobs2 = get_travis_data()[:2] - - if njobs: # be slightly more reliable - njobs = max(njobs, njobs2) - - content = dict(version="0.1.1", - timings=data, - datetime=get_utcdatetime(), # added in 0.1.1 - hostname=host, # added in 0.1.1 - njobs=njobs # added in 0.1.1, a measure of load on the travis box - ) - - payload = dict(description=desc, - public=True, - files={'results.json': dict(content=json.dumps(content))}) - try: - with closing(urlopen("https://api.github.com/gists", - json.dumps(payload), timeout=WEB_TIMEOUT)) as r: - if 200 <= r.getcode() < 300: - print("\n\n" + "-" * 80) - - gist = json.loads(r.read()) - file_raw_url = gist['files'].items()[0][1]['raw_url'] - print("[vbench-gist-raw_url] %s" % file_raw_url) - print("[vbench-html-url] %s" % gist['html_url']) - print("[vbench-api-url] %s" % gist['url']) - - print("-" * 80 + "\n\n") - else: - print("api.github.com returned status %d" % r.getcode()) - except: - print("Error occured while dumping to gist") - - -def main(): - import warnings - from suite import benchmarks - - exit_code = 0 - warnings.filterwarnings('ignore', category=FutureWarning) - - host, njobs = get_travis_data()[:2] - results = [] - for b in benchmarks: - try: - d = b.run() - d.update(dict(name=b.name)) - results.append(d) - msg = "{name:<40}: {timing:> 10.4f} [ms]" - print(msg.format(name=results[-1]['name'], - timing=results[-1]['timing'])) - - except Exception as e: - exit_code = 1 - if (type(e) == KeyboardInterrupt or - 'KeyboardInterrupt' in str(d)): - raise KeyboardInterrupt() - - msg = "{name:<40}: ERROR:\n<-------" - print(msg.format(name=b.name)) - if isinstance(d, dict): - if d['succeeded']: - print("\nException:\n%s\n" % str(e)) - else: - for k, v in sorted(d.iteritems()): - print("{k}: {v}".format(k=k, v=v)) - - print("------->\n") - - dump_as_gist(results, "testing", njobs=njobs) - - return exit_code - - -if __name__ == "__main__": - import sys - sys.exit(main()) - -##################################################### -# functions for retrieving and processing the results - - -def get_vbench_log(build_url): - with closing(urllib2.urlopen(build_url)) as r: - if not (200 <= r.getcode() < 300): - return - - s = json.loads(r.read()) - s = [x for x in s['matrix'] if "VBENCH" in ((x.get('config', {}) - or {}).get('env', {}) or {})] - # s=[x for x in s['matrix']] - if not s: - return - id = s[0]['id'] # should be just one for now - with closing(urllib2.urlopen("https://api.travis-ci.org/jobs/%s" % id)) as r2: - if not 200 <= r.getcode() < 300: - return - s2 = json.loads(r2.read()) - return s2.get('log') - - -def get_results_raw_url(build): - "Taks a Travis a build number, retrieves the build log and extracts the gist url" - import re - log = get_vbench_log("https://api.travis-ci.org/builds/%s" % build) - if not log: - return - l = [x.strip( - ) for x in log.split("\n") if re.match(".vbench-gist-raw_url", x)] - if l: - s = l[0] - m = re.search("(https://[^\s]+)", s) - if m: - return m.group(0) - - -def convert_json_to_df(results_url): - """retrieve json results file from url and return df - - df contains timings for all successful vbenchmarks - """ - - with closing(urlopen(results_url)) as resp: - res = json.loads(resp.read()) - timings = res.get("timings") - if not timings: - return - res = [x for x in timings if x.get('succeeded')] - df = pd.DataFrame(res) - df = df.set_index("name") - return df - - -def get_build_results(build): - "Returns a df with the results of the VBENCH job associated with the travis build" - r_url = get_results_raw_url(build) - if not r_url: - return - - return convert_json_to_df(r_url) - - -def get_all_results(repo_id=53976): # travis pandas-dev/pandas id - """Fetches the VBENCH results for all travis builds, and returns a list of result df - - unsuccesful individual vbenches are dropped. - """ - from collections import OrderedDict - - def get_results_from_builds(builds): - dfs = OrderedDict() - for build in builds: - build_id = build['id'] - build_number = build['number'] - print(build_number) - res = get_build_results(build_id) - if res is not None: - dfs[build_number] = res - return dfs - - base_url = 'https://api.travis-ci.org/builds?url=%2Fbuilds&repository_id={repo_id}' - url = base_url.format(repo_id=repo_id) - url_after = url + '&after_number={after}' - dfs = OrderedDict() - - while True: - with closing(urlopen(url)) as r: - if not (200 <= r.getcode() < 300): - break - builds = json.loads(r.read()) - res = get_results_from_builds(builds) - if not res: - break - last_build_number = min(res.keys()) - dfs.update(res) - url = url_after.format(after=last_build_number) - - return dfs - - -def get_all_results_joined(repo_id=53976): - def mk_unique(df): - for dupe in df.index.get_duplicates(): - df = df.ix[df.index != dupe] - return df - dfs = get_all_results(repo_id) - for k in dfs: - dfs[k] = mk_unique(dfs[k]) - ss = [pd.Series(v.timing, name=k) for k, v in dfs.iteritems()] - results = pd.concat(reversed(ss), 1) - return results diff --git a/vb_suite/plotting.py b/vb_suite/plotting.py deleted file mode 100644 index 79e81e9eea8f4..0000000000000 --- a/vb_suite/plotting.py +++ /dev/null @@ -1,25 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * - -try: - from pandas import date_range -except ImportError: - def date_range(start=None, end=None, periods=None, freq=None): - return DatetimeIndex(start, end, periods=periods, offset=freq) - -""" - -#----------------------------------------------------------------------------- -# Timeseries plotting - -setup = common_setup + """ -N = 2000 -M = 5 -df = DataFrame(np.random.randn(N,M), index=date_range('1/1/1975', periods=N)) -""" - -plot_timeseries_period = Benchmark("df.plot()", setup=setup, - name='plot_timeseries_period') - diff --git a/vb_suite/reindex.py b/vb_suite/reindex.py deleted file mode 100644 index 443eb43835745..0000000000000 --- a/vb_suite/reindex.py +++ /dev/null @@ -1,225 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# DataFrame reindex columns - -setup = common_setup + """ -df = DataFrame(index=range(10000), data=np.random.rand(10000,30), - columns=range(30)) -""" -statement = "df.reindex(columns=df.columns[1:5])" - -frame_reindex_columns = Benchmark(statement, setup) - -#---------------------------------------------------------------------- - -setup = common_setup + """ -rng = DatetimeIndex(start='1/1/1970', periods=10000, freq=datetools.Minute()) -df = DataFrame(np.random.rand(10000, 10), index=rng, - columns=range(10)) -df['foo'] = 'bar' -rng2 = Index(rng[::2]) -""" -statement = "df.reindex(rng2)" -dataframe_reindex = Benchmark(statement, setup) - -#---------------------------------------------------------------------- -# multiindex reindexing - -setup = common_setup + """ -N = 1000 -K = 20 - -level1 = tm.makeStringIndex(N).values.repeat(K) -level2 = np.tile(tm.makeStringIndex(K).values, N) -index = MultiIndex.from_arrays([level1, level2]) - -s1 = Series(np.random.randn(N * K), index=index) -s2 = s1[::2] -""" -statement = "s1.reindex(s2.index)" -reindex_multi = Benchmark(statement, setup, - name='reindex_multiindex', - start_date=datetime(2011, 9, 1)) - -#---------------------------------------------------------------------- -# Pad / backfill - -def pad(source_series, target_index): - try: - source_series.reindex(target_index, method='pad') - except: - source_series.reindex(target_index, fillMethod='pad') - -def backfill(source_series, target_index): - try: - source_series.reindex(target_index, method='backfill') - except: - source_series.reindex(target_index, fillMethod='backfill') - -setup = common_setup + """ -rng = date_range('1/1/2000', periods=100000, freq=datetools.Minute()) - -ts = Series(np.random.randn(len(rng)), index=rng) -ts2 = ts[::2] -ts3 = ts2.reindex(ts.index) -ts4 = ts3.astype('float32') - -def pad(source_series, target_index): - try: - source_series.reindex(target_index, method='pad') - except: - source_series.reindex(target_index, fillMethod='pad') -def backfill(source_series, target_index): - try: - source_series.reindex(target_index, method='backfill') - except: - source_series.reindex(target_index, fillMethod='backfill') -""" - -statement = "pad(ts2, ts.index)" -reindex_daterange_pad = Benchmark(statement, setup, - name="reindex_daterange_pad") - -statement = "backfill(ts2, ts.index)" -reindex_daterange_backfill = Benchmark(statement, setup, - name="reindex_daterange_backfill") - -reindex_fillna_pad = Benchmark("ts3.fillna(method='pad')", setup, - name="reindex_fillna_pad", - start_date=datetime(2011, 3, 1)) - -reindex_fillna_pad_float32 = Benchmark("ts4.fillna(method='pad')", setup, - name="reindex_fillna_pad_float32", - start_date=datetime(2013, 1, 1)) - -reindex_fillna_backfill = Benchmark("ts3.fillna(method='backfill')", setup, - name="reindex_fillna_backfill", - start_date=datetime(2011, 3, 1)) -reindex_fillna_backfill_float32 = Benchmark("ts4.fillna(method='backfill')", setup, - name="reindex_fillna_backfill_float32", - start_date=datetime(2013, 1, 1)) - -#---------------------------------------------------------------------- -# align on level - -setup = common_setup + """ -index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], - labels=[np.arange(10).repeat(10000), - np.tile(np.arange(100).repeat(100), 10), - np.tile(np.tile(np.arange(100), 100), 10)]) -random.shuffle(index.values) -df = DataFrame(np.random.randn(len(index), 4), index=index) -df_level = DataFrame(np.random.randn(100, 4), index=index.levels[1]) -""" - -reindex_frame_level_align = \ - Benchmark("df.align(df_level, level=1, copy=False)", setup, - name='reindex_frame_level_align', - start_date=datetime(2011, 12, 27)) - -reindex_frame_level_reindex = \ - Benchmark("df_level.reindex(df.index, level=1)", setup, - name='reindex_frame_level_reindex', - start_date=datetime(2011, 12, 27)) - - -#---------------------------------------------------------------------- -# sort_index, drop_duplicates - -# pathological, but realistic -setup = common_setup + """ -N = 10000 -K = 10 - -key1 = tm.makeStringIndex(N).values.repeat(K) -key2 = tm.makeStringIndex(N).values.repeat(K) - -df = DataFrame({'key1' : key1, 'key2' : key2, - 'value' : np.random.randn(N * K)}) -col_array_list = list(df.values.T) -""" -statement = "df.sort_index(by=['key1', 'key2'])" -frame_sort_index_by_columns = Benchmark(statement, setup, - start_date=datetime(2011, 11, 1)) - -# drop_duplicates - -statement = "df.drop_duplicates(['key1', 'key2'])" -frame_drop_duplicates = Benchmark(statement, setup, - start_date=datetime(2011, 11, 15)) - -statement = "df.drop_duplicates(['key1', 'key2'], inplace=True)" -frame_drop_dup_inplace = Benchmark(statement, setup, - start_date=datetime(2012, 5, 16)) - -lib_fast_zip = Benchmark('lib.fast_zip(col_array_list)', setup, - name='lib_fast_zip', - start_date=datetime(2012, 1, 1)) - -setup = setup + """ -df.ix[:10000, :] = np.nan -""" -statement2 = "df.drop_duplicates(['key1', 'key2'])" -frame_drop_duplicates_na = Benchmark(statement2, setup, - start_date=datetime(2012, 5, 15)) - -lib_fast_zip_fillna = Benchmark('lib.fast_zip_fillna(col_array_list)', setup, - start_date=datetime(2012, 5, 15)) - -statement2 = "df.drop_duplicates(['key1', 'key2'], inplace=True)" -frame_drop_dup_na_inplace = Benchmark(statement2, setup, - start_date=datetime(2012, 5, 16)) - -setup = common_setup + """ -s = Series(np.random.randint(0, 1000, size=10000)) -s2 = Series(np.tile(tm.makeStringIndex(1000).values, 10)) -""" - -series_drop_duplicates_int = Benchmark('s.drop_duplicates()', setup, - start_date=datetime(2012, 11, 27)) - -series_drop_duplicates_string = \ - Benchmark('s2.drop_duplicates()', setup, - start_date=datetime(2012, 11, 27)) - -#---------------------------------------------------------------------- -# fillna, many columns - - -setup = common_setup + """ -values = np.random.randn(1000, 1000) -values[::2] = np.nan -df = DataFrame(values) -""" - -frame_fillna_many_columns_pad = Benchmark("df.fillna(method='pad')", - setup, - start_date=datetime(2011, 3, 1)) - -#---------------------------------------------------------------------- -# blog "pandas escaped the zoo" - -setup = common_setup + """ -n = 50000 -indices = tm.makeStringIndex(n) - -def sample(values, k): - from random import shuffle - sampler = np.arange(len(values)) - shuffle(sampler) - return values.take(sampler[:k]) - -subsample_size = 40000 - -x = Series(np.random.randn(50000), indices) -y = Series(np.random.randn(subsample_size), - index=sample(indices, subsample_size)) -""" - -series_align_irregular_string = Benchmark("x + y", setup, - start_date=datetime(2010, 6, 1)) diff --git a/vb_suite/replace.py b/vb_suite/replace.py deleted file mode 100644 index 9326aa5becca9..0000000000000 --- a/vb_suite/replace.py +++ /dev/null @@ -1,36 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -from datetime import timedelta - -N = 1000000 - -try: - rng = date_range('1/1/2000', periods=N, freq='min') -except NameError: - rng = DatetimeIndex('1/1/2000', periods=N, offset=datetools.Minute()) - date_range = DateRange - -ts = Series(np.random.randn(N), index=rng) -""" - -large_dict_setup = """from .pandas_vb_common import * -from pandas.compat import range -n = 10 ** 6 -start_value = 10 ** 5 -to_rep = dict((i, start_value + i) for i in range(n)) -s = Series(np.random.randint(n, size=10 ** 3)) -""" - -replace_fillna = Benchmark('ts.fillna(0., inplace=True)', common_setup, - name='replace_fillna', - start_date=datetime(2012, 4, 4)) -replace_replacena = Benchmark('ts.replace(np.nan, 0., inplace=True)', - common_setup, - name='replace_replacena', - start_date=datetime(2012, 5, 15)) -replace_large_dict = Benchmark('s.replace(to_rep, inplace=True)', - large_dict_setup, - name='replace_large_dict', - start_date=datetime(2014, 4, 6)) diff --git a/vb_suite/reshape.py b/vb_suite/reshape.py deleted file mode 100644 index daab96103f2c5..0000000000000 --- a/vb_suite/reshape.py +++ /dev/null @@ -1,65 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -index = MultiIndex.from_arrays([np.arange(100).repeat(100), - np.roll(np.tile(np.arange(100), 100), 25)]) -df = DataFrame(np.random.randn(10000, 4), index=index) -""" - -reshape_unstack_simple = Benchmark('df.unstack(1)', common_setup, - start_date=datetime(2011, 10, 1)) - -setup = common_setup + """ -udf = df.unstack(1) -""" - -reshape_stack_simple = Benchmark('udf.stack()', setup, - start_date=datetime(2011, 10, 1)) - -setup = common_setup + """ -def unpivot(frame): - N, K = frame.shape - data = {'value' : frame.values.ravel('F'), - 'variable' : np.asarray(frame.columns).repeat(N), - 'date' : np.tile(np.asarray(frame.index), K)} - return DataFrame(data, columns=['date', 'variable', 'value']) -index = date_range('1/1/2000', periods=10000, freq='h') -df = DataFrame(randn(10000, 50), index=index, columns=range(50)) -pdf = unpivot(df) -f = lambda: pdf.pivot('date', 'variable', 'value') -""" - -reshape_pivot_time_series = Benchmark('f()', setup, - start_date=datetime(2012, 5, 1)) - -# Sparse key space, re: #2278 - -setup = common_setup + """ -NUM_ROWS = 1000 -for iter in range(10): - df = DataFrame({'A' : np.random.randint(50, size=NUM_ROWS), - 'B' : np.random.randint(50, size=NUM_ROWS), - 'C' : np.random.randint(-10,10, size=NUM_ROWS), - 'D' : np.random.randint(-10,10, size=NUM_ROWS), - 'E' : np.random.randint(10, size=NUM_ROWS), - 'F' : np.random.randn(NUM_ROWS)}) - idf = df.set_index(['A', 'B', 'C', 'D', 'E']) - if len(idf.index.unique()) == NUM_ROWS: - break -""" - -unstack_sparse_keyspace = Benchmark('idf.unstack()', setup, - start_date=datetime(2011, 10, 1)) - -# Melt - -setup = common_setup + """ -from pandas.core.reshape import melt -df = DataFrame(np.random.randn(10000, 3), columns=['A', 'B', 'C']) -df['id1'] = np.random.randint(0, 10, 10000) -df['id2'] = np.random.randint(100, 1000, 10000) -""" - -melt_dataframe = Benchmark("melt(df, id_vars=['id1', 'id2'])", setup, - start_date=datetime(2012, 8, 1)) diff --git a/vb_suite/run_suite.py b/vb_suite/run_suite.py deleted file mode 100755 index 43bf24faae43a..0000000000000 --- a/vb_suite/run_suite.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python -from vbench.api import BenchmarkRunner -from suite import * - - -def run_process(): - runner = BenchmarkRunner(benchmarks, REPO_PATH, REPO_URL, - BUILD, DB_PATH, TMP_DIR, PREPARE, - always_clean=True, - run_option='eod', start_date=START_DATE, - module_dependencies=dependencies) - runner.run() - -if __name__ == '__main__': - run_process() diff --git a/vb_suite/series_methods.py b/vb_suite/series_methods.py deleted file mode 100644 index c545f419c2dec..0000000000000 --- a/vb_suite/series_methods.py +++ /dev/null @@ -1,39 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -setup = common_setup + """ -s1 = Series(np.random.randn(10000)) -s2 = Series(np.random.randint(1, 10, 10000)) -s3 = Series(np.random.randint(1, 10, 100000)).astype('int64') -values = [1,2] -s4 = s3.astype('object') -""" - -series_nlargest1 = Benchmark("s1.nlargest(3, keep='last');" - "s1.nlargest(3, keep='first')", - setup, - start_date=datetime(2014, 1, 25)) -series_nlargest2 = Benchmark("s2.nlargest(3, keep='last');" - "s2.nlargest(3, keep='first')", - setup, - start_date=datetime(2014, 1, 25)) - -series_nsmallest2 = Benchmark("s1.nsmallest(3, keep='last');" - "s1.nsmallest(3, keep='first')", - setup, - start_date=datetime(2014, 1, 25)) - -series_nsmallest2 = Benchmark("s2.nsmallest(3, keep='last');" - "s2.nsmallest(3, keep='first')", - setup, - start_date=datetime(2014, 1, 25)) - -series_isin_int64 = Benchmark('s3.isin(values)', - setup, - start_date=datetime(2014, 1, 25)) -series_isin_object = Benchmark('s4.isin(values)', - setup, - start_date=datetime(2014, 1, 25)) diff --git a/vb_suite/source/_static/stub b/vb_suite/source/_static/stub deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/vb_suite/source/conf.py b/vb_suite/source/conf.py deleted file mode 100644 index d83448fd97d09..0000000000000 --- a/vb_suite/source/conf.py +++ /dev/null @@ -1,225 +0,0 @@ -# -*- coding: utf-8 -*- -# -# pandas documentation build configuration file, created by -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.append(os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('../sphinxext')) - -sys.path.extend([ - - # numpy standard doc extensions - os.path.join(os.path.dirname(__file__), - '..', '../..', - 'sphinxext') - -]) - -# -- General configuration ----------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. sphinxext. - -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.doctest'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates', '_templates/autosummary'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'pandas' -copyright = u'2008-2011, the pandas development team' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -import pandas - -# version = '%s r%s' % (pandas.__version__, svn_version()) -version = '%s' % (pandas.__version__) - -# The full version, including alpha/beta/rc tags. -release = version - -# JP: added from sphinxdocs -autosummary_generate = True - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -# unused_docs = [] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -exclude_trees = [] - -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------- - -# The theme to use for HTML and HTML Help pages. Major themes that come with -# Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'agogo' - -# The style sheet to use for HTML and HTML Help pages. A file of that name -# must exist either in Sphinx' static/ path, or in one of the custom paths -# given in html_static_path. -# html_style = 'statsmodels.css' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = ['themes'] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -html_title = 'Vbench performance benchmarks for pandas' - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'performance' - - -# -- Options for LaTeX output -------------------------------------------- - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'performance.tex', - u'pandas vbench Performance Benchmarks', - u'Wes McKinney', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True - - -# Example configuration for intersphinx: refer to the Python standard library. -# intersphinx_mapping = {'http://docs.scipy.org/': None} -import glob -autosummary_generate = glob.glob("*.rst") diff --git a/vb_suite/source/themes/agogo/layout.html b/vb_suite/source/themes/agogo/layout.html deleted file mode 100644 index cd0f3d7ffc9c7..0000000000000 --- a/vb_suite/source/themes/agogo/layout.html +++ /dev/null @@ -1,95 +0,0 @@ -{# - agogo/layout.html - ~~~~~~~~~~~~~~~~~ - - Sphinx layout template for the agogo theme, originally written - by Andi Albrecht. - - :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. - :license: BSD, see LICENSE for details. -#} -{% extends "basic/layout.html" %} - -{% block header %} -
-{% endblock %} - -{% block content %} -
-
- -
- {%- block document %} - {{ super() }} - {%- endblock %} -
-
-
-
-{% endblock %} - -{% block footer %} - -{% endblock %} - -{% block relbar1 %}{% endblock %} -{% block relbar2 %}{% endblock %} diff --git a/vb_suite/source/themes/agogo/static/agogo.css_t b/vb_suite/source/themes/agogo/static/agogo.css_t deleted file mode 100644 index ef909b72e20f6..0000000000000 --- a/vb_suite/source/themes/agogo/static/agogo.css_t +++ /dev/null @@ -1,476 +0,0 @@ -/* - * agogo.css_t - * ~~~~~~~~~~~ - * - * Sphinx stylesheet -- agogo theme. - * - * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -* { - margin: 0px; - padding: 0px; -} - -body { - font-family: {{ theme_bodyfont }}; - line-height: 1.4em; - color: black; - background-color: {{ theme_bgcolor }}; -} - - -/* Page layout */ - -div.header, div.content, div.footer { - max-width: {{ theme_pagewidth }}; - margin-left: auto; - margin-right: auto; -} - -div.header-wrapper { - background: {{ theme_headerbg }}; - padding: 1em 1em 0; - border-bottom: 3px solid #2e3436; - min-height: 0px; -} - - -/* Default body styles */ -a { - color: {{ theme_linkcolor }}; -} - -div.bodywrapper a, div.footer a { - text-decoration: underline; -} - -.clearer { - clear: both; -} - -.left { - float: left; -} - -.right { - float: right; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -h1, h2, h3, h4 { - font-family: {{ theme_headerfont }}; - font-weight: normal; - color: {{ theme_headercolor2 }}; - margin-bottom: .8em; -} - -h1 { - color: {{ theme_headercolor1 }}; -} - -h2 { - padding-bottom: .5em; - border-bottom: 1px solid {{ theme_headercolor2 }}; -} - -a.headerlink { - visibility: hidden; - color: #dddddd; - padding-left: .3em; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink { - visibility: visible; -} - -img { - border: 0; -} - -pre { - background-color: #EEE; - padding: 0.5em; -} - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 2px 7px 1px 7px; - border-left: 0.2em solid black; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -dt:target, .highlighted { - background-color: #fbe54e; -} - -/* Header */ - -/* -div.header { - padding-top: 10px; - padding-bottom: 10px; -} -*/ - -div.header {} - -div.header h1 { - font-family: {{ theme_headerfont }}; - font-weight: normal; - font-size: 180%; - letter-spacing: .08em; -} - -div.header h1 a { - color: white; -} - -div.header div.rel { - text-decoration: none; -} -/* margin-top: 1em; */ - -div.header div.rel a { - margin-top: 1em; - color: {{ theme_headerlinkcolor }}; - letter-spacing: .1em; - text-transform: uppercase; - padding: 3px 1em; -} - -p.logo { - float: right; -} - -img.logo { - border: 0; -} - - -/* Content */ -div.content-wrapper { - background-color: white; - padding: 1em; -} -/* - padding-top: 20px; - padding-bottom: 20px; -*/ - -/* float: left; */ - -div.document { - max-width: {{ theme_documentwidth }}; -} - -div.body { - padding-right: 2em; - text-align: {{ theme_textalign }}; -} - -div.document ul { - margin: 1.5em; - list-style-type: square; -} - -div.document dd { - margin-left: 1.2em; - margin-top: .4em; - margin-bottom: 1em; -} - -div.document .section { - margin-top: 1.7em; -} -div.document .section:first-child { - margin-top: 0px; -} - -div.document div.highlight { - padding: 3px; - background-color: #eeeeec; - border-top: 2px solid #dddddd; - border-bottom: 2px solid #dddddd; - margin-top: .8em; - margin-bottom: .8em; -} - -div.document h2 { - margin-top: .7em; -} - -div.document p { - margin-bottom: .5em; -} - -div.document li.toctree-l1 { - margin-bottom: 1em; -} - -div.document .descname { - font-weight: bold; -} - -div.document .docutils.literal { - background-color: #eeeeec; - padding: 1px; -} - -div.document .docutils.xref.literal { - background-color: transparent; - padding: 0px; -} - -div.document blockquote { - margin: 1em; -} - -div.document ol { - margin: 1.5em; -} - - -/* Sidebar */ - - -div.sidebar { - width: {{ theme_sidebarwidth }}; - padding: 0 1em; - float: right; - font-size: .93em; -} - -div.sidebar a, div.header a { - text-decoration: none; -} - -div.sidebar a:hover, div.header a:hover { - text-decoration: underline; -} - -div.sidebar h3 { - color: #2e3436; - text-transform: uppercase; - font-size: 130%; - letter-spacing: .1em; -} - -div.sidebar ul { - list-style-type: none; -} - -div.sidebar li.toctree-l1 a { - display: block; - padding: 1px; - border: 1px solid #dddddd; - background-color: #eeeeec; - margin-bottom: .4em; - padding-left: 3px; - color: #2e3436; -} - -div.sidebar li.toctree-l2 a { - background-color: transparent; - border: none; - margin-left: 1em; - border-bottom: 1px solid #dddddd; -} - -div.sidebar li.toctree-l3 a { - background-color: transparent; - border: none; - margin-left: 2em; - border-bottom: 1px solid #dddddd; -} - -div.sidebar li.toctree-l2:last-child a { - border-bottom: none; -} - -div.sidebar li.toctree-l1.current a { - border-right: 5px solid {{ theme_headerlinkcolor }}; -} - -div.sidebar li.toctree-l1.current li.toctree-l2 a { - border-right: none; -} - - -/* Footer */ - -div.footer-wrapper { - background: {{ theme_footerbg }}; - border-top: 4px solid #babdb6; - padding-top: 10px; - padding-bottom: 10px; - min-height: 80px; -} - -div.footer, div.footer a { - color: #888a85; -} - -div.footer .right { - text-align: right; -} - -div.footer .left { - text-transform: uppercase; -} - - -/* Styles copied from basic theme */ - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - clear: both; - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable dl, table.indextable dd { - margin-top: 0; - margin-bottom: 0; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -/* -- viewcode extension ---------------------------------------------------- */ - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family:: {{ theme_bodyfont }}; -} - -div.viewcode-block:target { - margin: -1px -3px; - padding: 0 3px; - background-color: #f4debf; - border-top: 1px solid #ac9; - border-bottom: 1px solid #ac9; -} - -th.field-name { - white-space: nowrap; -} diff --git a/vb_suite/source/themes/agogo/static/bgfooter.png b/vb_suite/source/themes/agogo/static/bgfooter.png deleted file mode 100644 index 9ce5bdd902943fdf8b0c0ca6a545297e1e2cc665..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 434 zcmV;j0ZsmiP)Px#24YJ`L;%wO*8tD73qoQ5000SaNLh0L01FcU01FcV0GgZ_00007bV*G`2iXD> z2Q(2CT#42I000?uMObu0Z*6U5Zgc=ca%Ew3Wn>_CX>@2HM@dakSAh-}0003ENklR?sq9~H`=l5UI-{JW_f9!)=Hwush3JC}Y z1gFM&r>$lJNPt^*1k!w;l|obx>lr$2IOaI$n=(gBBaj^I0=y%@K5N&GIU&-%OE_~V zX=m=_j7d`hvubQRuF+xT63vIfWnC3%kKN*T3l7ob3nEC2R->wU1Y)4)(7_t^thiqb zj$CO7xBn9gg`*!MY$}SI|_*)!a*&V0w7h>cUb&$Grh37iJ=C%Yn c>}w1E0Z4f>1OEiDlmGw#07*qoM6N<$g4BwtIsgCw diff --git a/vb_suite/source/themes/agogo/static/bgtop.png b/vb_suite/source/themes/agogo/static/bgtop.png deleted file mode 100644 index a0d4709bac8f79943a817195c086461c8c4d5419..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 430 zcmV;f0a5;mP)Px#24YJ`L;zI)R{&FzA;Z4_000SaNLh0L01FcU01FcV0GgZ_00007bV*G`2iXD> z2Q3AZhV-)l000?uMObu0Z*6U5Zgc=ca%Ew3Wn>_CX>@2HM@dakSAh-}0003ANklMo8vqN`cM=KwSQV|n zk}naE+VzlN;kK@Ej${PSkI$-R6-Yfp`zA;^O$`)7`gRi{-0i?owGIbX{p>Nc##93U z;sA|ayOYkG%F9M0iEMUM*s3NDYSS=KN2ht8Rv|7nv77i{NTO47R)}V_+2H~mL-nTR z_8j}*%6Qm8?#7NU2kM$#gcP&kO?iw|n}ynz+r-~FA9nKcZnfixWvZ&d28Cc_6&_Pe zMpbjI>9r+<=}NIDz4mCd3U++H?rrHcYxH&eeB|)>mnv*N#44ILM2zL6yU!VVWSrgp Y0Yu&#qm)=by8r+H07*qoM6N<$f@HC)j{pDw diff --git a/vb_suite/source/themes/agogo/theme.conf b/vb_suite/source/themes/agogo/theme.conf deleted file mode 100644 index 3fc88580f1ab4..0000000000000 --- a/vb_suite/source/themes/agogo/theme.conf +++ /dev/null @@ -1,19 +0,0 @@ -[theme] -inherit = basic -stylesheet = agogo.css -pygments_style = tango - -[options] -bodyfont = "Verdana", Arial, sans-serif -headerfont = "Georgia", "Times New Roman", serif -pagewidth = 70em -documentwidth = 50em -sidebarwidth = 20em -bgcolor = #eeeeec -headerbg = url(bgtop.png) top left repeat-x -footerbg = url(bgfooter.png) top left repeat-x -linkcolor = #ce5c00 -headercolor1 = #204a87 -headercolor2 = #3465a4 -headerlinkcolor = #fcaf3e -textalign = justify \ No newline at end of file diff --git a/vb_suite/sparse.py b/vb_suite/sparse.py deleted file mode 100644 index b1c1a2f24e41d..0000000000000 --- a/vb_suite/sparse.py +++ /dev/null @@ -1,65 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- - -setup = common_setup + """ -from pandas.core.sparse import SparseSeries, SparseDataFrame - -K = 50 -N = 50000 -rng = np.asarray(date_range('1/1/2000', periods=N, - freq='T')) - -# rng2 = np.asarray(rng).astype('M8[ns]').astype('i8') - -series = {} -for i in range(1, K + 1): - data = np.random.randn(N)[:-i] - this_rng = rng[:-i] - data[100:] = np.nan - series[i] = SparseSeries(data, index=this_rng) -""" -stmt = "SparseDataFrame(series)" - -bm_sparse1 = Benchmark(stmt, setup, name="sparse_series_to_frame", - start_date=datetime(2011, 6, 1)) - - -setup = common_setup + """ -from pandas.core.sparse import SparseDataFrame -""" - -stmt = "SparseDataFrame(columns=np.arange(100), index=np.arange(1000))" - -sparse_constructor = Benchmark(stmt, setup, name="sparse_frame_constructor", - start_date=datetime(2012, 6, 1)) - - -setup = common_setup + """ -s = pd.Series([np.nan] * 10000) -s[0] = 3.0 -s[100] = -1.0 -s[999] = 12.1 -s.index = pd.MultiIndex.from_product((range(10), range(10), range(10), range(10))) -ss = s.to_sparse() -""" - -stmt = "ss.to_coo(row_levels=[0, 1], column_levels=[2, 3], sort_labels=True)" - -sparse_series_to_coo = Benchmark(stmt, setup, name="sparse_series_to_coo", - start_date=datetime(2015, 1, 3)) - -setup = common_setup + """ -import scipy.sparse -import pandas.core.sparse.series -A = scipy.sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(100, 100)) -""" - -stmt = "ss = pandas.core.sparse.series.SparseSeries.from_coo(A)" - -sparse_series_from_coo = Benchmark(stmt, setup, name="sparse_series_from_coo", - start_date=datetime(2015, 1, 3)) diff --git a/vb_suite/stat_ops.py b/vb_suite/stat_ops.py deleted file mode 100644 index 8d7c30dc9fdcf..0000000000000 --- a/vb_suite/stat_ops.py +++ /dev/null @@ -1,126 +0,0 @@ -from vbench.benchmark import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -""" - -#---------------------------------------------------------------------- -# nanops - -setup = common_setup + """ -s = Series(np.random.randn(100000), index=np.arange(100000)) -s[::2] = np.nan -""" - -stat_ops_series_std = Benchmark("s.std()", setup) - -#---------------------------------------------------------------------- -# ops by level - -setup = common_setup + """ -index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], - labels=[np.arange(10).repeat(10000), - np.tile(np.arange(100).repeat(100), 10), - np.tile(np.tile(np.arange(100), 100), 10)]) -random.shuffle(index.values) -df = DataFrame(np.random.randn(len(index), 4), index=index) -df_level = DataFrame(np.random.randn(100, 4), index=index.levels[1]) -""" - -stat_ops_level_frame_sum = \ - Benchmark("df.sum(level=1)", setup, - start_date=datetime(2011, 11, 15)) - -stat_ops_level_frame_sum_multiple = \ - Benchmark("df.sum(level=[0, 1])", setup, repeat=1, - start_date=datetime(2011, 11, 15)) - -stat_ops_level_series_sum = \ - Benchmark("df[1].sum(level=1)", setup, - start_date=datetime(2011, 11, 15)) - -stat_ops_level_series_sum_multiple = \ - Benchmark("df[1].sum(level=[0, 1])", setup, repeat=1, - start_date=datetime(2011, 11, 15)) - -sum_setup = common_setup + """ -df = DataFrame(np.random.randn(100000, 4)) -dfi = DataFrame(np.random.randint(1000, size=df.shape)) -""" - -stat_ops_frame_sum_int_axis_0 = \ - Benchmark("dfi.sum()", sum_setup, start_date=datetime(2013, 7, 25)) - -stat_ops_frame_sum_float_axis_0 = \ - Benchmark("df.sum()", sum_setup, start_date=datetime(2013, 7, 25)) - -stat_ops_frame_mean_int_axis_0 = \ - Benchmark("dfi.mean()", sum_setup, start_date=datetime(2013, 7, 25)) - -stat_ops_frame_mean_float_axis_0 = \ - Benchmark("df.mean()", sum_setup, start_date=datetime(2013, 7, 25)) - -stat_ops_frame_sum_int_axis_1 = \ - Benchmark("dfi.sum(1)", sum_setup, start_date=datetime(2013, 7, 25)) - -stat_ops_frame_sum_float_axis_1 = \ - Benchmark("df.sum(1)", sum_setup, start_date=datetime(2013, 7, 25)) - -stat_ops_frame_mean_int_axis_1 = \ - Benchmark("dfi.mean(1)", sum_setup, start_date=datetime(2013, 7, 25)) - -stat_ops_frame_mean_float_axis_1 = \ - Benchmark("df.mean(1)", sum_setup, start_date=datetime(2013, 7, 25)) - -#---------------------------------------------------------------------- -# rank - -setup = common_setup + """ -values = np.concatenate([np.arange(100000), - np.random.randn(100000), - np.arange(100000)]) -s = Series(values) -""" - -stats_rank_average = Benchmark('s.rank()', setup, - start_date=datetime(2011, 12, 12)) - -stats_rank_pct_average = Benchmark('s.rank(pct=True)', setup, - start_date=datetime(2014, 1, 16)) -stats_rank_pct_average_old = Benchmark('s.rank() / len(s)', setup, - start_date=datetime(2014, 1, 16)) -setup = common_setup + """ -values = np.random.randint(0, 100000, size=200000) -s = Series(values) -""" - -stats_rank_average_int = Benchmark('s.rank()', setup, - start_date=datetime(2011, 12, 12)) - -setup = common_setup + """ -df = DataFrame(np.random.randn(5000, 50)) -""" - -stats_rank2d_axis1_average = Benchmark('df.rank(1)', setup, - start_date=datetime(2011, 12, 12)) - -stats_rank2d_axis0_average = Benchmark('df.rank()', setup, - start_date=datetime(2011, 12, 12)) - -# rolling functions - -setup = common_setup + """ -arr = np.random.randn(100000) -""" - -stats_rolling_mean = Benchmark('rolling_mean(arr, 100)', setup, - start_date=datetime(2011, 6, 1)) - -# spearman correlation - -setup = common_setup + """ -df = DataFrame(np.random.randn(1000, 30)) -""" - -stats_corr_spearman = Benchmark("df.corr(method='spearman')", setup, - start_date=datetime(2011, 12, 4)) diff --git a/vb_suite/strings.py b/vb_suite/strings.py deleted file mode 100644 index 0948df5673a0d..0000000000000 --- a/vb_suite/strings.py +++ /dev/null @@ -1,59 +0,0 @@ -from vbench.api import Benchmark - -common_setup = """from .pandas_vb_common import * -""" - -setup = common_setup + """ -import string -import itertools as IT - -def make_series(letters, strlen, size): - return Series( - [str(x) for x in np.fromiter(IT.cycle(letters), count=size*strlen, dtype='|S1') - .view('|S{}'.format(strlen))]) - -many = make_series('matchthis'+string.ascii_uppercase, strlen=19, size=10000) # 31% matches -few = make_series('matchthis'+string.ascii_uppercase*42, strlen=19, size=10000) # 1% matches -""" - -strings_cat = Benchmark("many.str.cat(sep=',')", setup) -strings_title = Benchmark("many.str.title()", setup) -strings_count = Benchmark("many.str.count('matchthis')", setup) -strings_contains_many = Benchmark("many.str.contains('matchthis')", setup) -strings_contains_few = Benchmark("few.str.contains('matchthis')", setup) -strings_contains_many_noregex = Benchmark( - "many.str.contains('matchthis', regex=False)", setup) -strings_contains_few_noregex = Benchmark( - "few.str.contains('matchthis', regex=False)", setup) -strings_startswith = Benchmark("many.str.startswith('matchthis')", setup) -strings_endswith = Benchmark("many.str.endswith('matchthis')", setup) -strings_lower = Benchmark("many.str.lower()", setup) -strings_upper = Benchmark("many.str.upper()", setup) -strings_replace = Benchmark("many.str.replace(r'(matchthis)', r'\1\1')", setup) -strings_repeat = Benchmark( - "many.str.repeat(list(IT.islice(IT.cycle(range(1,4)),len(many))))", setup) -strings_match = Benchmark("many.str.match(r'mat..this')", setup) -strings_extract = Benchmark("many.str.extract(r'(\w*)matchthis(\w*)')", setup) -strings_join_split = Benchmark("many.str.join(r'--').str.split('--')", setup) -strings_join_split_expand = Benchmark("many.str.join(r'--').str.split('--',expand=True)", setup) -strings_len = Benchmark("many.str.len()", setup) -strings_findall = Benchmark("many.str.findall(r'[A-Z]+')", setup) -strings_pad = Benchmark("many.str.pad(100, side='both')", setup) -strings_center = Benchmark("many.str.center(100)", setup) -strings_slice = Benchmark("many.str.slice(5,15,2)", setup) -strings_strip = Benchmark("many.str.strip('matchthis')", setup) -strings_lstrip = Benchmark("many.str.lstrip('matchthis')", setup) -strings_rstrip = Benchmark("many.str.rstrip('matchthis')", setup) -strings_get = Benchmark("many.str.get(0)", setup) - -setup = setup + """ -s = make_series(string.ascii_uppercase, strlen=10, size=10000).str.join('|') -""" -strings_get_dummies = Benchmark("s.str.get_dummies('|')", setup) - -setup = common_setup + """ -import pandas.util.testing as testing -ser = Series(testing.makeUnicodeIndex()) -""" - -strings_encode_decode = Benchmark("ser.str.encode('utf-8').str.decode('utf-8')", setup) diff --git a/vb_suite/suite.py b/vb_suite/suite.py deleted file mode 100644 index 45053b6610896..0000000000000 --- a/vb_suite/suite.py +++ /dev/null @@ -1,164 +0,0 @@ -from vbench.api import Benchmark, GitRepo -from datetime import datetime - -import os - -modules = ['attrs_caching', - 'binary_ops', - 'ctors', - 'frame_ctor', - 'frame_methods', - 'groupby', - 'index_object', - 'indexing', - 'io_bench', - 'io_sql', - 'inference', - 'hdfstore_bench', - 'join_merge', - 'gil', - 'miscellaneous', - 'panel_ctor', - 'packers', - 'parser_vb', - 'panel_methods', - 'plotting', - 'reindex', - 'replace', - 'sparse', - 'strings', - 'reshape', - 'stat_ops', - 'timeseries', - 'timedelta', - 'eval'] - -by_module = {} -benchmarks = [] - -for modname in modules: - ref = __import__(modname) - by_module[modname] = [v for v in ref.__dict__.values() - if isinstance(v, Benchmark)] - benchmarks.extend(by_module[modname]) - -for bm in benchmarks: - assert(bm.name is not None) - -import getpass -import sys - -USERNAME = getpass.getuser() - -if sys.platform == 'darwin': - HOME = '/Users/%s' % USERNAME -else: - HOME = '/home/%s' % USERNAME - -try: - import ConfigParser - - config = ConfigParser.ConfigParser() - config.readfp(open(os.path.expanduser('~/.vbenchcfg'))) - - REPO_PATH = config.get('setup', 'repo_path') - REPO_URL = config.get('setup', 'repo_url') - DB_PATH = config.get('setup', 'db_path') - TMP_DIR = config.get('setup', 'tmp_dir') -except: - REPO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) - REPO_URL = 'git@github.com:pandas-dev/pandas.git' - DB_PATH = os.path.join(REPO_PATH, 'vb_suite/benchmarks.db') - TMP_DIR = os.path.join(HOME, 'tmp/vb_pandas') - -PREPARE = """ -python setup.py clean -""" -BUILD = """ -python setup.py build_ext --inplace -""" -dependencies = ['pandas_vb_common.py'] - -START_DATE = datetime(2010, 6, 1) - -# repo = GitRepo(REPO_PATH) - -RST_BASE = 'source' - -# HACK! - -# timespan = [datetime(2011, 1, 1), datetime(2012, 1, 1)] - - -def generate_rst_files(benchmarks): - import matplotlib as mpl - mpl.use('Agg') - import matplotlib.pyplot as plt - - vb_path = os.path.join(RST_BASE, 'vbench') - fig_base_path = os.path.join(vb_path, 'figures') - - if not os.path.exists(vb_path): - print('creating %s' % vb_path) - os.makedirs(vb_path) - - if not os.path.exists(fig_base_path): - print('creating %s' % fig_base_path) - os.makedirs(fig_base_path) - - for bmk in benchmarks: - print('Generating rst file for %s' % bmk.name) - rst_path = os.path.join(RST_BASE, 'vbench/%s.txt' % bmk.name) - - fig_full_path = os.path.join(fig_base_path, '%s.png' % bmk.name) - - # make the figure - plt.figure(figsize=(10, 6)) - ax = plt.gca() - bmk.plot(DB_PATH, ax=ax) - - start, end = ax.get_xlim() - - plt.xlim([start - 30, end + 30]) - plt.savefig(fig_full_path, bbox_inches='tight') - plt.close('all') - - fig_rel_path = 'vbench/figures/%s.png' % bmk.name - rst_text = bmk.to_rst(image_path=fig_rel_path) - with open(rst_path, 'w') as f: - f.write(rst_text) - - with open(os.path.join(RST_BASE, 'index.rst'), 'w') as f: - print >> f, """ -Performance Benchmarks -====================== - -These historical benchmark graphs were produced with `vbench -`__. - -The ``.pandas_vb_common`` setup script can be found here_ - -.. _here: https://github.com/pandas-dev/pandas/tree/master/vb_suite - -Produced on a machine with - - - Intel Core i7 950 processor - - (K)ubuntu Linux 12.10 - - Python 2.7.2 64-bit (Enthought Python Distribution 7.1-2) - - NumPy 1.6.1 - -.. toctree:: - :hidden: - :maxdepth: 3 -""" - for modname, mod_bmks in sorted(by_module.items()): - print >> f, ' vb_%s' % modname - modpath = os.path.join(RST_BASE, 'vb_%s.rst' % modname) - with open(modpath, 'w') as mh: - header = '%s\n%s\n\n' % (modname, '=' * len(modname)) - print >> mh, header - - for bmk in mod_bmks: - print >> mh, bmk.name - print >> mh, '-' * len(bmk.name) - print >> mh, '.. include:: vbench/%s.txt\n' % bmk.name diff --git a/vb_suite/test.py b/vb_suite/test.py deleted file mode 100644 index da30c3e1a5f76..0000000000000 --- a/vb_suite/test.py +++ /dev/null @@ -1,67 +0,0 @@ -from pandas import * -import matplotlib.pyplot as plt - -import sqlite3 - -from vbench.git import GitRepo - - -REPO_PATH = '/home/adam/code/pandas' -repo = GitRepo(REPO_PATH) - -con = sqlite3.connect('vb_suite/benchmarks.db') - -bmk = '36900a889961162138c140ce4ae3c205' -# bmk = '9d7b8c04b532df6c2d55ef497039b0ce' -bmk = '4481aa4efa9926683002a673d2ed3dac' -bmk = '00593cd8c03d769669d7b46585161726' -bmk = '3725ab7cd0a0657d7ae70f171c877cea' -bmk = '3cd376d6d6ef802cdea49ac47a67be21' -bmk2 = '459225186023853494bc345fd180f395' -bmk = 'c22ca82e0cfba8dc42595103113c7da3' -bmk = 'e0e651a8e9fbf0270ab68137f8b9df5f' -bmk = '96bda4b9a60e17acf92a243580f2a0c3' - - -def get_results(bmk): - results = con.execute( - "select * from results where checksum='%s'" % bmk).fetchall() - x = Series(dict((t[1], t[3]) for t in results)) - x.index = x.index.map(repo.timestamps.get) - x = x.sort_index() - return x - -x = get_results(bmk) - - -def graph1(): - dm_getitem = get_results('459225186023853494bc345fd180f395') - dm_getvalue = get_results('c22ca82e0cfba8dc42595103113c7da3') - - plt.figure() - ax = plt.gca() - - dm_getitem.plot(label='df[col][idx]', ax=ax) - dm_getvalue.plot(label='df.get_value(idx, col)', ax=ax) - - plt.ylabel('ms') - plt.legend(loc='best') - - -def graph2(): - bm = get_results('96bda4b9a60e17acf92a243580f2a0c3') - plt.figure() - ax = plt.gca() - - bm.plot(ax=ax) - plt.ylabel('ms') - -bm = get_results('36900a889961162138c140ce4ae3c205') -fig = plt.figure() -ax = plt.gca() -bm.plot(ax=ax) -fig.autofmt_xdate() - -plt.xlim([bm.dropna().index[0] - datetools.MonthEnd(), - bm.dropna().index[-1] + datetools.MonthEnd()]) -plt.ylabel('ms') diff --git a/vb_suite/test_perf.py b/vb_suite/test_perf.py deleted file mode 100755 index be546b72f9465..0000000000000 --- a/vb_suite/test_perf.py +++ /dev/null @@ -1,616 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -What ----- -vbench is a library which can be used to benchmark the performance -of a codebase over time. -Although vbench can collect data over many commites, generate plots -and other niceties, for Pull-Requests the important thing is the -performance of the HEAD commit against a known-good baseline. - -This script tries to automate the process of comparing these -two commits, and is meant to run out of the box on a fresh -clone. - -How ---- -These are the steps taken: -1) create a temp directory into which vbench will clone the temporary repo. -2) instantiate a vbench runner, using the local repo as the source repo. -3) perform a vbench run for the baseline commit, then the target commit. -4) pull the results for both commits from the db. use pandas to align -everything and calculate a ration for the timing information. -5) print the results to the log file and to stdout. - -""" - -# IMPORTANT NOTE -# -# This script should run on pandas versions at least as far back as 0.9.1. -# devs should be able to use the latest version of this script with -# any dusty old commit and expect it to "just work". -# One way in which this is useful is when collecting historical data, -# where writing some logic around this script may prove easier -# in some cases then running vbench directly (think perf bisection). -# -# *please*, when you modify this script for whatever reason, -# make sure you do not break its functionality when running under older -# pandas versions. -# Note that depreaction warnings are turned off in main(), so there's -# no need to change the actual code to supress such warnings. - -import shutil -import os -import sys -import argparse -import tempfile -import time -import re - -import random -import numpy as np - -import pandas as pd -from pandas import DataFrame, Series - -from suite import REPO_PATH -VB_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -DEFAULT_MIN_DURATION = 0.01 -HEAD_COL="head[ms]" -BASE_COL="base[ms]" - -try: - import git # gitpython -except Exception: - print("Error: Please install the `gitpython` package\n") - sys.exit(1) - -class RevParseAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - import subprocess - cmd = 'git rev-parse --short -verify {0}^{{commit}}'.format(values) - rev_parse = subprocess.check_output(cmd, shell=True) - setattr(namespace, self.dest, rev_parse.strip()) - - -parser = argparse.ArgumentParser(description='Use vbench to measure and compare the performance of commits.') -parser.add_argument('-H', '--head', - help='Execute vbenches using the currently checked out copy.', - dest='head', - action='store_true', - default=False) -parser.add_argument('-b', '--base-commit', - help='The commit serving as performance baseline ', - type=str, action=RevParseAction) -parser.add_argument('-t', '--target-commit', - help='The commit to compare against the baseline (default: HEAD).', - type=str, action=RevParseAction) -parser.add_argument('--base-pickle', - help='name of pickle file with timings data generated by a former `-H -d FILE` run. '\ - 'filename must be of the form -*.* or specify --base-commit seperately', - type=str) -parser.add_argument('--target-pickle', - help='name of pickle file with timings data generated by a former `-H -d FILE` run '\ - 'filename must be of the form -*.* or specify --target-commit seperately', - type=str) -parser.add_argument('-m', '--min-duration', - help='Minimum duration (in ms) of baseline test for inclusion in report (default: %.3f).' % DEFAULT_MIN_DURATION, - type=float, - default=0.01) -parser.add_argument('-o', '--output', - metavar="", - dest='log_file', - help='Path of file in which to save the textual report (default: vb_suite.log).') -parser.add_argument('-d', '--outdf', - metavar="FNAME", - dest='outdf', - default=None, - help='Name of file to df.save() the result table into. Will overwrite') -parser.add_argument('-r', '--regex', - metavar="REGEX", - dest='regex', - default="", - help='Regex pat, only tests whose name matches the regext will be run.') -parser.add_argument('-s', '--seed', - metavar="SEED", - dest='seed', - default=1234, - type=int, - help='Integer value to seed PRNG with') -parser.add_argument('-n', '--repeats', - metavar="N", - dest='repeats', - default=3, - type=int, - help='Number of times to run each vbench, result value is the best of') -parser.add_argument('-c', '--ncalls', - metavar="N", - dest='ncalls', - default=3, - type=int, - help='Number of calls to in each repetition of a vbench') -parser.add_argument('-N', '--hrepeats', - metavar="N", - dest='hrepeats', - default=1, - type=int, - help='implies -H, number of times to run the vbench suite on the head commit.\n' - 'Each iteration will yield another column in the output' ) -parser.add_argument('-a', '--affinity', - metavar="a", - dest='affinity', - default=1, - type=int, - help='set processor affinity of process by default bind to cpu/core #1 only. ' - 'Requires the "affinity" or "psutil" python module, will raise Warning otherwise') -parser.add_argument('-u', '--burnin', - metavar="u", - dest='burnin', - default=1, - type=int, - help='Number of extra iteration per benchmark to perform first, then throw away. ' ) - -parser.add_argument('-S', '--stats', - default=False, - action='store_true', - help='when specified with -N, prints the output of describe() per vbench results. ' ) - -parser.add_argument('--temp-dir', - metavar="PATH", - default=None, - help='Specify temp work dir to use. ccache depends on builds being invoked from consistent directory.' ) - -parser.add_argument('-q', '--quiet', - default=False, - action='store_true', - help='Suppress report output to stdout. ' ) - -def get_results_df(db, rev): - """Takes a git commit hash and returns a Dataframe of benchmark results - """ - bench = DataFrame(db.get_benchmarks()) - results = DataFrame(map(list,db.get_rev_results(rev).values())) - - # Sinch vbench.db._reg_rev_results returns an unlabeled dict, - # we have to break encapsulation a bit. - results.columns = db._results.c.keys() - results = results.join(bench['name'], on='checksum').set_index("checksum") - return results - - -def prprint(s): - print("*** %s" % s) - -def pre_hook(): - import gc - gc.disable() - -def post_hook(): - import gc - gc.enable() - -def profile_comparative(benchmarks): - - from vbench.api import BenchmarkRunner - from vbench.db import BenchmarkDB - from vbench.git import GitRepo - from suite import BUILD, DB_PATH, PREPARE, dependencies - - TMP_DIR = args.temp_dir or tempfile.mkdtemp() - - try: - - prprint("Opening DB at '%s'...\n" % DB_PATH) - db = BenchmarkDB(DB_PATH) - - prprint("Initializing Runner...") - - # all in a good cause... - GitRepo._parse_commit_log = _parse_wrapper(args.base_commit) - - runner = BenchmarkRunner( - benchmarks, REPO_PATH, REPO_PATH, BUILD, DB_PATH, - TMP_DIR, PREPARE, always_clean=True, - # run_option='eod', start_date=START_DATE, - module_dependencies=dependencies) - - repo = runner.repo # (steal the parsed git repo used by runner) - h_head = args.target_commit or repo.shas[-1] - h_baseline = args.base_commit - - # ARGH. reparse the repo, without discarding any commits, - # then overwrite the previous parse results - # prprint("Slaughtering kittens...") - (repo.shas, repo.messages, - repo.timestamps, repo.authors) = _parse_commit_log(None,REPO_PATH, - args.base_commit) - - prprint('Target [%s] : %s\n' % (h_head, repo.messages.get(h_head, ""))) - prprint('Baseline [%s] : %s\n' % (h_baseline, - repo.messages.get(h_baseline, ""))) - - prprint("Removing any previous measurements for the commits.") - db.delete_rev_results(h_baseline) - db.delete_rev_results(h_head) - - # TODO: we could skip this, but we need to make sure all - # results are in the DB, which is a little tricky with - # start dates and so on. - prprint("Running benchmarks for baseline [%s]" % h_baseline) - runner._run_and_write_results(h_baseline) - - prprint("Running benchmarks for target [%s]" % h_head) - runner._run_and_write_results(h_head) - - prprint('Processing results...') - - head_res = get_results_df(db, h_head) - baseline_res = get_results_df(db, h_baseline) - - report_comparative(head_res,baseline_res) - - finally: - # print("Disposing of TMP_DIR: %s" % TMP_DIR) - shutil.rmtree(TMP_DIR) - -def prep_pickle_for_total(df, agg_name='median'): - """ - accepts a datafram resulting from invocation with -H -d o.pickle - If multiple data columns are present (-N was used), the - `agg_name` attr of the datafram will be used to reduce - them to a single value per vbench, df.median is used by defa - ult. - - Returns a datadrame of the form expected by prep_totals - """ - def prep(df): - agg = getattr(df,agg_name) - df = DataFrame(agg(1)) - cols = list(df.columns) - cols[0]='timing' - df.columns=cols - df['name'] = list(df.index) - return df - - return prep(df) - -def prep_totals(head_res, baseline_res): - """ - Each argument should be a dataframe with 'timing' and 'name' columns - where name is the name of the vbench. - - returns a 'totals' dataframe, suitable as input for print_report. - """ - head_res, baseline_res = head_res.align(baseline_res) - ratio = head_res['timing'] / baseline_res['timing'] - totals = DataFrame({HEAD_COL:head_res['timing'], - BASE_COL:baseline_res['timing'], - 'ratio':ratio, - 'name':baseline_res.name}, - columns=[HEAD_COL, BASE_COL, "ratio", "name"]) - totals = totals.ix[totals[HEAD_COL] > args.min_duration] - # ignore below threshold - totals = totals.dropna( - ).sort("ratio").set_index('name') # sort in ascending order - return totals - -def report_comparative(head_res,baseline_res): - try: - r=git.Repo(VB_DIR) - except: - import pdb - pdb.set_trace() - - totals = prep_totals(head_res,baseline_res) - - h_head = args.target_commit - h_baseline = args.base_commit - h_msg = b_msg = "Unknown" - try: - h_msg = r.commit(h_head).message.strip() - except git.exc.BadObject: - pass - try: - b_msg = r.commit(h_baseline).message.strip() - except git.exc.BadObject: - pass - - - print_report(totals,h_head=h_head,h_msg=h_msg, - h_baseline=h_baseline,b_msg=b_msg) - - if args.outdf: - prprint("The results DataFrame was written to '%s'\n" % args.outdf) - totals.save(args.outdf) - -def profile_head_single(benchmark): - import gc - results = [] - - # just in case - gc.collect() - - try: - from ctypes import cdll, CDLL - cdll.LoadLibrary("libc.so.6") - libc = CDLL("libc.so.6") - libc.malloc_trim(0) - except: - pass - - - N = args.hrepeats + args.burnin - - results = [] - try: - for i in range(N): - gc.disable() - d=dict() - - try: - d = benchmark.run() - - except KeyboardInterrupt: - raise - except Exception as e: # if a single vbench bursts into flames, don't die. - err="" - try: - err = d.get("traceback") - if err is None: - err = str(e) - except: - pass - print("%s died with:\n%s\nSkipping...\n" % (benchmark.name, err)) - - results.append(d.get('timing',np.nan)) - gc.enable() - gc.collect() - - finally: - gc.enable() - - if results: - # throw away the burn_in - results = results[args.burnin:] - sys.stdout.write('.') - sys.stdout.flush() - return Series(results, name=benchmark.name) - - # df = DataFrame(results) - # df.columns = ["name",HEAD_COL] - # return df.set_index("name")[HEAD_COL] - -def profile_head(benchmarks): - print( "Performing %d benchmarks (%d runs each)" % ( len(benchmarks), args.hrepeats)) - - ss= [profile_head_single(b) for b in benchmarks] - print("\n") - - results = DataFrame(ss) - results.columns=[ "#%d" %i for i in range(args.hrepeats)] - # results.index = ["#%d" % i for i in range(len(ss))] - # results = results.T - - shas, messages, _,_ = _parse_commit_log(None,REPO_PATH,base_commit="HEAD^") - print_report(results,h_head=shas[-1],h_msg=messages[-1]) - - - if args.outdf: - prprint("The results DataFrame was written to '%s'\n" % args.outdf) - DataFrame(results).save(args.outdf) - -def print_report(df,h_head=None,h_msg="",h_baseline=None,b_msg=""): - - name_width=45 - col_width = 10 - - hdr = ("{:%s}" % name_width).format("Test name") - hdr += ("|{:^%d}" % col_width)* len(df.columns) - hdr += "|" - hdr = hdr.format(*df.columns) - hdr = "-"*len(hdr) + "\n" + hdr + "\n" + "-"*len(hdr) + "\n" - ftr=hdr - s = "\n" - s+= "Invoked with :\n" - s+= "--ncalls: %s\n" % (args.ncalls or 'Auto') - s+= "--repeats: %s\n" % (args.repeats) - s+= "\n\n" - - s += hdr - # import ipdb - # ipdb.set_trace() - for i in range(len(df)): - lfmt = ("{:%s}" % name_width) - lfmt += ("| {:%d.4f} " % (col_width-2))* len(df.columns) - lfmt += "|\n" - s += lfmt.format(df.index[i],*list(df.iloc[i].values)) - - s+= ftr + "\n" - - s += "Ratio < 1.0 means the target commit is faster then the baseline.\n" - s += "Seed used: %d\n\n" % args.seed - - if h_head: - s += 'Target [%s] : %s\n' % (h_head, h_msg) - if h_baseline: - s += 'Base [%s] : %s\n\n' % ( - h_baseline, b_msg) - - stats_footer = "\n" - if args.stats : - try: - pd.options.display.expand_frame_repr=False - except: - pass - stats_footer += str(df.T.describe().T) + "\n\n" - - s+= stats_footer - logfile = open(args.log_file, 'w') - logfile.write(s) - logfile.close() - - if not args.quiet: - prprint(s) - - if args.stats and args.quiet: - prprint(stats_footer) - - prprint("Results were also written to the logfile at '%s'" % - args.log_file) - - - -def main(): - from suite import benchmarks - - if not args.log_file: - args.log_file = os.path.abspath( - os.path.join(REPO_PATH, 'vb_suite.log')) - - saved_dir = os.path.curdir - if args.outdf: - # not bullet-proof but enough for us - args.outdf = os.path.realpath(args.outdf) - - if args.log_file: - # not bullet-proof but enough for us - args.log_file = os.path.realpath(args.log_file) - - random.seed(args.seed) - np.random.seed(args.seed) - - if args.base_pickle and args.target_pickle: - baseline_res = prep_pickle_for_total(pd.load(args.base_pickle)) - target_res = prep_pickle_for_total(pd.load(args.target_pickle)) - - report_comparative(target_res, baseline_res) - sys.exit(0) - - if args.affinity is not None: - try: # use psutil rather then stale affinity module. Thanks @yarikoptic - import psutil - if hasattr(psutil.Process, 'set_cpu_affinity'): - psutil.Process(os.getpid()).set_cpu_affinity([args.affinity]) - print("CPU affinity set to %d" % args.affinity) - except ImportError: - print("-a/--affinity specified, but the 'psutil' module is not available, aborting.\n") - sys.exit(1) - - print("\n") - prprint("LOG_FILE = %s" % args.log_file) - if args.outdf: - prprint("PICKE_FILE = %s" % args.outdf) - - print("\n") - - # move away from the pandas root dir, to avoid possible import - # surprises - os.chdir(os.path.dirname(os.path.abspath(__file__))) - - benchmarks = [x for x in benchmarks if re.search(args.regex,x.name)] - - for b in benchmarks: - b.repeat = args.repeats - if args.ncalls: - b.ncalls = args.ncalls - - if benchmarks: - if args.head: - profile_head(benchmarks) - else: - profile_comparative(benchmarks) - else: - print( "No matching benchmarks") - - os.chdir(saved_dir) - -# hack , vbench.git ignores some commits, but we -# need to be able to reference any commit. -# modified from vbench.git -def _parse_commit_log(this,repo_path,base_commit=None): - from vbench.git import _convert_timezones - from pandas import Series - from dateutil import parser as dparser - - git_cmd = 'git --git-dir=%s/.git --work-tree=%s ' % (repo_path, repo_path) - githist = git_cmd + ('log --graph --pretty=format:'+ - '\"::%h::%cd::%s::%an\"'+ - ('%s..' % base_commit)+ - '> githist.txt') - os.system(githist) - githist = open('githist.txt').read() - os.remove('githist.txt') - - shas = [] - timestamps = [] - messages = [] - authors = [] - for line in githist.split('\n'): - if '*' not in line.split("::")[0]: # skip non-commit lines - continue - - _, sha, stamp, message, author = line.split('::', 4) - - # parse timestamp into datetime object - stamp = dparser.parse(stamp) - - shas.append(sha) - timestamps.append(stamp) - messages.append(message) - authors.append(author) - - # to UTC for now - timestamps = _convert_timezones(timestamps) - - shas = Series(shas, timestamps) - messages = Series(messages, shas) - timestamps = Series(timestamps, shas) - authors = Series(authors, shas) - return shas[::-1], messages[::-1], timestamps[::-1], authors[::-1] - -# even worse, monkey patch vbench -def _parse_wrapper(base_commit): - def inner(repo_path): - return _parse_commit_log(repo_path,base_commit) - return inner - -if __name__ == '__main__': - args = parser.parse_args() - if (not args.head - and not (args.base_commit and args.target_commit) - and not (args.base_pickle and args.target_pickle)): - parser.print_help() - sys.exit(1) - elif ((args.base_pickle or args.target_pickle) and not - (args.base_pickle and args.target_pickle)): - print("Must specify Both --base-pickle and --target-pickle.") - sys.exit(1) - - if ((args.base_pickle or args.target_pickle) and not - (args.base_commit and args.target_commit)): - if not args.base_commit: - print("base_commit not specified, Assuming base_pickle is named -foo.*") - args.base_commit = args.base_pickle.split('-')[0] - if not args.target_commit: - print("target_commit not specified, Assuming target_pickle is named -foo.*") - args.target_commit = args.target_pickle.split('-')[0] - - import warnings - warnings.filterwarnings('ignore',category=FutureWarning) - warnings.filterwarnings('ignore',category=DeprecationWarning) - - if args.base_commit and args.target_commit: - print("Verifying specified commits exist in repo...") - r=git.Repo(VB_DIR) - for c in [ args.base_commit, args.target_commit ]: - try: - msg = r.commit(c).message.strip() - except git.BadObject: - print("The commit '%s' was not found, aborting..." % c) - sys.exit(1) - else: - print("%s: %s" % (c,msg)) - - main() diff --git a/vb_suite/timedelta.py b/vb_suite/timedelta.py deleted file mode 100644 index 378968ea1379a..0000000000000 --- a/vb_suite/timedelta.py +++ /dev/null @@ -1,32 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime - -common_setup = """from .pandas_vb_common import * -from pandas import to_timedelta -""" - -#---------------------------------------------------------------------- -# conversion - -setup = common_setup + """ -arr = np.random.randint(0,1000,size=10000) -""" - -stmt = "to_timedelta(arr,unit='s')" -timedelta_convert_int = Benchmark(stmt, setup, start_date=datetime(2014, 1, 1)) - -setup = common_setup + """ -arr = np.random.randint(0,1000,size=10000) -arr = [ '{0} days'.format(i) for i in arr ] -""" - -stmt = "to_timedelta(arr)" -timedelta_convert_string = Benchmark(stmt, setup, start_date=datetime(2014, 1, 1)) - -setup = common_setup + """ -arr = np.random.randint(0,60,size=10000) -arr = [ '00:00:{0:02d}'.format(i) for i in arr ] -""" - -stmt = "to_timedelta(arr)" -timedelta_convert_string_seconds = Benchmark(stmt, setup, start_date=datetime(2014, 1, 1)) diff --git a/vb_suite/timeseries.py b/vb_suite/timeseries.py deleted file mode 100644 index 15bc89d62305f..0000000000000 --- a/vb_suite/timeseries.py +++ /dev/null @@ -1,445 +0,0 @@ -from vbench.api import Benchmark -from datetime import datetime -from pandas import * - -N = 100000 -try: - rng = date_range(start='1/1/2000', periods=N, freq='min') -except NameError: - rng = DatetimeIndex(start='1/1/2000', periods=N, freq='T') - def date_range(start=None, end=None, periods=None, freq=None): - return DatetimeIndex(start=start, end=end, periods=periods, offset=freq) - - -common_setup = """from .pandas_vb_common import * -from datetime import timedelta -N = 100000 - -rng = date_range(start='1/1/2000', periods=N, freq='T') - -if hasattr(Series, 'convert'): - Series.resample = Series.convert - -ts = Series(np.random.randn(N), index=rng) -""" - -#---------------------------------------------------------------------- -# Lookup value in large time series, hash map population - -setup = common_setup + """ -rng = date_range(start='1/1/2000', periods=1500000, freq='S') -ts = Series(1, index=rng) -""" - -stmt = "ts[ts.index[len(ts) // 2]]; ts.index._cleanup()" -timeseries_large_lookup_value = Benchmark(stmt, setup, - start_date=datetime(2012, 1, 1)) - -#---------------------------------------------------------------------- -# Test slice minutely series - -timeseries_slice_minutely = Benchmark('ts[:10000]', common_setup) - -#---------------------------------------------------------------------- -# Test conversion - -setup = common_setup + """ - -""" - -timeseries_1min_5min_ohlc = Benchmark( - "ts[:10000].resample('5min', how='ohlc')", - common_setup, - start_date=datetime(2012, 5, 1)) - -timeseries_1min_5min_mean = Benchmark( - "ts[:10000].resample('5min', how='mean')", - common_setup, - start_date=datetime(2012, 5, 1)) - -#---------------------------------------------------------------------- -# Irregular alignment - -setup = common_setup + """ -lindex = np.random.permutation(N)[:N // 2] -rindex = np.random.permutation(N)[:N // 2] -left = Series(ts.values.take(lindex), index=ts.index.take(lindex)) -right = Series(ts.values.take(rindex), index=ts.index.take(rindex)) -""" - -timeseries_add_irregular = Benchmark('left + right', setup) - -#---------------------------------------------------------------------- -# Sort large irregular time series - -setup = common_setup + """ -N = 100000 -rng = date_range(start='1/1/2000', periods=N, freq='s') -rng = rng.take(np.random.permutation(N)) -ts = Series(np.random.randn(N), index=rng) -""" - -timeseries_sort_index = Benchmark('ts.sort_index()', setup, - start_date=datetime(2012, 4, 1)) - -#---------------------------------------------------------------------- -# Shifting, add offset - -setup = common_setup + """ -rng = date_range(start='1/1/2000', periods=10000, freq='T') -""" - -datetimeindex_add_offset = Benchmark('rng + timedelta(minutes=2)', setup, - start_date=datetime(2012, 4, 1)) - -setup = common_setup + """ -N = 10000 -rng = date_range(start='1/1/1990', periods=N, freq='53s') -ts = Series(np.random.randn(N), index=rng) -dates = date_range(start='1/1/1990', periods=N * 10, freq='5s') -""" -timeseries_asof_single = Benchmark('ts.asof(dates[0])', setup, - start_date=datetime(2012, 4, 27)) - -timeseries_asof = Benchmark('ts.asof(dates)', setup, - start_date=datetime(2012, 4, 27)) - -setup = setup + 'ts[250:5000] = np.nan' - -timeseries_asof_nan = Benchmark('ts.asof(dates)', setup, - start_date=datetime(2012, 4, 27)) - -#---------------------------------------------------------------------- -# Time zone - -setup = common_setup + """ -rng = date_range(start='1/1/2000', end='3/1/2000', tz='US/Eastern') -""" - -timeseries_timestamp_tzinfo_cons = \ - Benchmark('rng[0]', setup, start_date=datetime(2012, 5, 5)) - -#---------------------------------------------------------------------- -# Resampling period - -setup = common_setup + """ -rng = period_range(start='1/1/2000', end='1/1/2001', freq='T') -ts = Series(np.random.randn(len(rng)), index=rng) -""" - -timeseries_period_downsample_mean = \ - Benchmark("ts.resample('D', how='mean')", setup, - start_date=datetime(2012, 4, 25)) - -setup = common_setup + """ -rng = date_range(start='1/1/2000', end='1/1/2001', freq='T') -ts = Series(np.random.randn(len(rng)), index=rng) -""" - -timeseries_timestamp_downsample_mean = \ - Benchmark("ts.resample('D', how='mean')", setup, - start_date=datetime(2012, 4, 25)) - -# GH 7754 -setup = common_setup + """ -rng = date_range(start='2000-01-01 00:00:00', - end='2000-01-01 10:00:00', freq='555000U') -int_ts = Series(5, rng, dtype='int64') -ts = int_ts.astype('datetime64[ns]') -""" - -timeseries_resample_datetime64 = Benchmark("ts.resample('1S', how='last')", setup) - -#---------------------------------------------------------------------- -# to_datetime - -setup = common_setup + """ -rng = date_range(start='1/1/2000', periods=20000, freq='H') -strings = [x.strftime('%Y-%m-%d %H:%M:%S') for x in rng] -""" - -timeseries_to_datetime_iso8601 = \ - Benchmark('to_datetime(strings)', setup, - start_date=datetime(2012, 7, 11)) - -timeseries_to_datetime_iso8601_format = \ - Benchmark("to_datetime(strings, format='%Y-%m-%d %H:%M:%S')", setup, - start_date=datetime(2012, 7, 11)) - -setup = common_setup + """ -rng = date_range(start='1/1/2000', periods=10000, freq='D') -strings = Series(rng.year*10000+rng.month*100+rng.day,dtype=np.int64).apply(str) -""" - -timeseries_to_datetime_YYYYMMDD = \ - Benchmark('to_datetime(strings,format="%Y%m%d")', setup, - start_date=datetime(2012, 7, 1)) - -setup = common_setup + """ -s = Series(['19MAY11','19MAY11:00:00:00']*100000) -""" -timeseries_with_format_no_exact = Benchmark("to_datetime(s,format='%d%b%y',exact=False)", \ - setup, start_date=datetime(2014, 11, 26)) -timeseries_with_format_replace = Benchmark("to_datetime(s.str.replace(':\S+$',''),format='%d%b%y')", \ - setup, start_date=datetime(2014, 11, 26)) - -# ---- infer_freq -# infer_freq - -setup = common_setup + """ -from pandas.tseries.frequencies import infer_freq -rng = date_range(start='1/1/1700', freq='D', periods=100000) -a = rng[:50000].append(rng[50002:]) -""" - -timeseries_infer_freq = \ - Benchmark('infer_freq(a)', setup, start_date=datetime(2012, 7, 1)) - -# setitem PeriodIndex - -setup = common_setup + """ -rng = period_range(start='1/1/1990', freq='S', periods=20000) -df = DataFrame(index=range(len(rng))) -""" - -period_setitem = \ - Benchmark("df['col'] = rng", setup, - start_date=datetime(2012, 8, 1)) - -setup = common_setup + """ -rng = date_range(start='1/1/2000 9:30', periods=10000, freq='S', tz='US/Eastern') -""" - -datetimeindex_normalize = \ - Benchmark('rng.normalize()', setup, - start_date=datetime(2012, 9, 1)) - -setup = common_setup + """ -from pandas.tseries.offsets import Second -s1 = date_range(start='1/1/2000', periods=100, freq='S') -curr = s1[-1] -slst = [] -for i in range(100): - slst.append(curr + Second()), periods=100, freq='S') - curr = slst[-1][-1] -""" - -# dti_append_tz = \ -# Benchmark('s1.append(slst)', setup, start_date=datetime(2012, 9, 1)) - - -setup = common_setup + """ -rng = date_range(start='1/1/2000', periods=1000, freq='H') -df = DataFrame(np.random.randn(len(rng), 2), rng) -""" - -dti_reset_index = \ - Benchmark('df.reset_index()', setup, start_date=datetime(2012, 9, 1)) - -setup = common_setup + """ -rng = date_range(start='1/1/2000', periods=1000, freq='H', - tz='US/Eastern') -df = DataFrame(np.random.randn(len(rng), 2), index=rng) -""" - -dti_reset_index_tz = \ - Benchmark('df.reset_index()', setup, start_date=datetime(2012, 9, 1)) - -setup = common_setup + """ -rng = date_range(start='1/1/2000', periods=1000, freq='T') -index = rng.repeat(10) -""" - -datetimeindex_unique = Benchmark('index.unique()', setup, - start_date=datetime(2012, 7, 1)) - -# tz_localize with infer argument. This is an attempt to emulate the results -# of read_csv with duplicated data. Not passing infer_dst will fail -setup = common_setup + """ -dst_rng = date_range(start='10/29/2000 1:00:00', - end='10/29/2000 1:59:59', freq='S') -index = date_range(start='10/29/2000', end='10/29/2000 00:59:59', freq='S') -index = index.append(dst_rng) -index = index.append(dst_rng) -index = index.append(date_range(start='10/29/2000 2:00:00', - end='10/29/2000 3:00:00', freq='S')) -""" - -datetimeindex_infer_dst = \ -Benchmark('index.tz_localize("US/Eastern", infer_dst=True)', - setup, start_date=datetime(2013, 9, 30)) - - -#---------------------------------------------------------------------- -# Resampling: fast-path various functions - -setup = common_setup + """ -rng = date_range(start='20130101',periods=100000,freq='50L') -df = DataFrame(np.random.randn(100000,2),index=rng) -""" - -dataframe_resample_mean_string = \ - Benchmark("df.resample('1s', how='mean')", setup) - -dataframe_resample_mean_numpy = \ - Benchmark("df.resample('1s', how=np.mean)", setup) - -dataframe_resample_min_string = \ - Benchmark("df.resample('1s', how='min')", setup) - -dataframe_resample_min_numpy = \ - Benchmark("df.resample('1s', how=np.min)", setup) - -dataframe_resample_max_string = \ - Benchmark("df.resample('1s', how='max')", setup) - -dataframe_resample_max_numpy = \ - Benchmark("df.resample('1s', how=np.max)", setup) - - -#---------------------------------------------------------------------- -# DatetimeConverter - -setup = common_setup + """ -from pandas.tseries.converter import DatetimeConverter -""" - -datetimeindex_converter = \ - Benchmark('DatetimeConverter.convert(rng, None, None)', - setup, start_date=datetime(2013, 1, 1)) - -# Adding custom business day -setup = common_setup + """ -import datetime as dt -import pandas as pd -try: - import pandas.tseries.holiday -except ImportError: - pass -import numpy as np - -date = dt.datetime(2011,1,1) -dt64 = np.datetime64('2011-01-01 09:00Z') -hcal = pd.tseries.holiday.USFederalHolidayCalendar() - -day = pd.offsets.Day() -year = pd.offsets.YearBegin() -cday = pd.offsets.CustomBusinessDay() -cmb = pd.offsets.CustomBusinessMonthBegin(calendar=hcal) -cme = pd.offsets.CustomBusinessMonthEnd(calendar=hcal) - -cdayh = pd.offsets.CustomBusinessDay(calendar=hcal) -""" -timeseries_day_incr = Benchmark("date + day",setup) - -timeseries_day_apply = Benchmark("day.apply(date)",setup) - -timeseries_year_incr = Benchmark("date + year",setup) - -timeseries_year_apply = Benchmark("year.apply(date)",setup) - -timeseries_custom_bday_incr = \ - Benchmark("date + cday",setup) - -timeseries_custom_bday_decr = \ - Benchmark("date - cday",setup) - -timeseries_custom_bday_apply = \ - Benchmark("cday.apply(date)",setup) - -timeseries_custom_bday_apply_dt64 = \ - Benchmark("cday.apply(dt64)",setup) - -timeseries_custom_bday_cal_incr = \ - Benchmark("date + 1 * cdayh",setup) - -timeseries_custom_bday_cal_decr = \ - Benchmark("date - 1 * cdayh",setup) - -timeseries_custom_bday_cal_incr_n = \ - Benchmark("date + 10 * cdayh",setup) - -timeseries_custom_bday_cal_incr_neg_n = \ - Benchmark("date - 10 * cdayh",setup) - -# Increment custom business month -timeseries_custom_bmonthend_incr = \ - Benchmark("date + cme",setup) - -timeseries_custom_bmonthend_incr_n = \ - Benchmark("date + 10 * cme",setup) - -timeseries_custom_bmonthend_decr_n = \ - Benchmark("date - 10 * cme",setup) - -timeseries_custom_bmonthbegin_incr_n = \ - Benchmark("date + 10 * cmb",setup) - -timeseries_custom_bmonthbegin_decr_n = \ - Benchmark("date - 10 * cmb",setup) - - -#---------------------------------------------------------------------- -# month/quarter/year start/end accessors - -setup = common_setup + """ -N = 10000 -rng = date_range(start='1/1/1', periods=N, freq='B') -""" - -timeseries_is_month_start = Benchmark('rng.is_month_start', setup, - start_date=datetime(2014, 4, 1)) - -#---------------------------------------------------------------------- -# iterate over DatetimeIndex/PeriodIndex -setup = common_setup + """ -N = 1000000 -M = 10000 -idx1 = date_range(start='20140101', freq='T', periods=N) -idx2 = period_range(start='20140101', freq='T', periods=N) - -def iter_n(iterable, n=None): - i = 0 - for _ in iterable: - i += 1 - if n is not None and i > n: - break -""" - -timeseries_iter_datetimeindex = Benchmark('iter_n(idx1)', setup) - -timeseries_iter_periodindex = Benchmark('iter_n(idx2)', setup) - -timeseries_iter_datetimeindex_preexit = Benchmark('iter_n(idx1, M)', setup) - -timeseries_iter_periodindex_preexit = Benchmark('iter_n(idx2, M)', setup) - - -#---------------------------------------------------------------------- -# apply an Offset to a DatetimeIndex -setup = common_setup + """ -N = 100000 -idx1 = date_range(start='20140101', freq='T', periods=N) -delta_offset = pd.offsets.Day() -fast_offset = pd.offsets.DateOffset(months=2, days=2) -slow_offset = pd.offsets.BusinessDay() - -""" - -timeseries_datetimeindex_offset_delta = Benchmark('idx1 + delta_offset', setup) -timeseries_datetimeindex_offset_fast = Benchmark('idx1 + fast_offset', setup) -timeseries_datetimeindex_offset_slow = Benchmark('idx1 + slow_offset', setup) - -# apply an Offset to a Series containing datetime64 values -setup = common_setup + """ -N = 100000 -s = Series(date_range(start='20140101', freq='T', periods=N)) -delta_offset = pd.offsets.Day() -fast_offset = pd.offsets.DateOffset(months=2, days=2) -slow_offset = pd.offsets.BusinessDay() - -""" - -timeseries_series_offset_delta = Benchmark('s + delta_offset', setup) -timeseries_series_offset_fast = Benchmark('s + fast_offset', setup) -timeseries_series_offset_slow = Benchmark('s + slow_offset', setup) From e0cbc37ccc02c3ee539a19d9043b541ace0d2733 Mon Sep 17 00:00:00 2001 From: Pietro Battiston Date: Mon, 17 Apr 2017 22:44:24 +0200 Subject: [PATCH 47/56] TST: partial indexing with __getitem__ and integer labels (#16029) closes #12416 --- pandas/tests/indexing/test_multiindex.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/pandas/tests/indexing/test_multiindex.py b/pandas/tests/indexing/test_multiindex.py index 07786b9fb4b72..c39e25a1f1d74 100644 --- a/pandas/tests/indexing/test_multiindex.py +++ b/pandas/tests/indexing/test_multiindex.py @@ -275,6 +275,30 @@ def test_loc_multiindex(self): xp = mi_int.ix[4] tm.assert_frame_equal(rs, xp) + def test_getitem_partial_int(self): + # GH 12416 + # with single item + l1 = [10, 20] + l2 = ['a', 'b'] + df = DataFrame(index=range(2), + columns=pd.MultiIndex.from_product([l1, l2])) + expected = DataFrame(index=range(2), + columns=l2) + result = df[20] + tm.assert_frame_equal(result, expected) + + # with list + expected = DataFrame(index=range(2), + columns=pd.MultiIndex.from_product([l1[1:], l2])) + result = df[[20]] + tm.assert_frame_equal(result, expected) + + # missing item: + with tm.assertRaisesRegexp(KeyError, '1'): + df[1] + with tm.assertRaisesRegexp(KeyError, "'\[1\] not in index'"): + df[[1]] + def test_loc_multiindex_indexer_none(self): # GH6788 From a65492f884653c81d684786ce6098f88def3b0fe Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Mon, 17 Apr 2017 19:58:04 -0400 Subject: [PATCH 48/56] TST: skip 32bit platform on test_get_indexer_closer for interval index --- pandas/tests/indexes/test_interval.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index d99ef9538c5b1..825e508174374 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -4,7 +4,8 @@ import numpy as np from pandas import (Interval, IntervalIndex, Index, isnull, - interval_range, Timestamp, Timedelta) + interval_range, Timestamp, Timedelta, + compat) from pandas._libs.interval import IntervalTree from pandas.tests.indexes.common import Base import pandas.util.testing as tm @@ -778,6 +779,8 @@ def test_get_loc_closed(self): self.assert_numpy_array_equal(tree.get_loc(p), np.array([0], dtype='int64')) + @pytest.mark.skipif(compat.is_platform_32bit(), + reason="int type mistmach on 32bit") def test_get_indexer_closed(self): x = np.arange(1000, dtype='float64') found = x.astype('intp') From cd1031f28664928c7e258dbb04d1f5861bad6b37 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Mon, 17 Apr 2017 19:24:28 -0500 Subject: [PATCH 49/56] BUG: Handle iterable of arrays in convert (#16026) * BUG: Handle iterable of arrays in convert DatetimeConverter.convert can take an array or iterable of arrays. Fixed the converter to detect which case we're in and then re-use the existing logic. --- doc/source/whatsnew/v0.20.0.txt | 1 + pandas/core/dtypes/inference.py | 44 ++++++++++++++++++++++ pandas/plotting/_converter.py | 24 +++++++++++- pandas/tests/dtypes/test_inference.py | 22 +++++++++++ pandas/tests/plotting/test_converter.py | 13 +++++++ pandas/tests/plotting/test_datetimelike.py | 8 ++++ 6 files changed, 110 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 9df82b8ac7338..0b95bf98b401d 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1566,6 +1566,7 @@ Plotting - Bug in ``DataFrame.hist`` where ``plt.tight_layout`` caused an ``AttributeError`` (use ``matplotlib >= 2.0.1``) (:issue:`9351`) - Bug in ``DataFrame.boxplot`` where ``fontsize`` was not applied to the tick labels on both axes (:issue:`15108`) +- Bug in the date and time converters pandas registers with matplotlib not handling multiple dimensions (:issue:`16026`) - Bug in ``pd.scatter_matrix()`` could accept either ``color`` or ``c``, but not both (:issue:`14855`) Groupby/Resample/Rolling diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index b0a93d24228af..66f4d87aa8e33 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -273,6 +273,50 @@ def is_list_like(obj): not isinstance(obj, string_and_binary_types)) +def is_nested_list_like(obj): + """ + Check if the object is list-like, and that all of its elements + are also list-like. + + .. versionadded:: 0.20.0 + + Parameters + ---------- + obj : The object to check. + + Returns + ------- + is_list_like : bool + Whether `obj` has list-like properties. + + Examples + -------- + >>> is_nested_list_like([[1, 2, 3]]) + True + >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) + True + >>> is_nested_list_like(["foo"]) + False + >>> is_nested_list_like([]) + False + >>> is_nested_list_like([[1, 2, 3], 1]) + False + + Notes + ----- + This won't reliably detect whether a consumable iterator (e. g. + a generator) is a nested-list-like without consuming the iterator. + To avoid consuming it, we always return False if the outer container + doesn't define `__len__`. + + See Also + -------- + is_list_like + """ + return (is_list_like(obj) and hasattr(obj, '__len__') and + len(obj) > 0 and all(is_list_like(item) for item in obj)) + + def is_dict_like(obj): """ Check if the object is dict-like. diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 0e51e95057be2..9621ee3d0cad4 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -10,13 +10,14 @@ from matplotlib.ticker import Formatter, AutoLocator, Locator from matplotlib.transforms import nonsingular - from pandas.core.dtypes.common import ( is_float, is_integer, is_integer_dtype, is_float_dtype, is_datetime64_ns_dtype, - is_period_arraylike) + is_period_arraylike, + is_nested_list_like +) from pandas.compat import lrange import pandas.compat as compat @@ -127,6 +128,15 @@ class PeriodConverter(dates.DateConverter): @staticmethod def convert(values, units, axis): + if is_nested_list_like(values): + values = [PeriodConverter._convert_1d(v, units, axis) + for v in values] + else: + values = PeriodConverter._convert_1d(values, units, axis) + return values + + @staticmethod + def _convert_1d(values, units, axis): if not hasattr(axis, 'freq'): raise TypeError('Axis must have `freq` set to convert to Periods') valid_types = (compat.string_types, datetime, @@ -178,6 +188,16 @@ class DatetimeConverter(dates.DateConverter): @staticmethod def convert(values, unit, axis): + # values might be a 1-d array, or a list-like of arrays. + if is_nested_list_like(values): + values = [DatetimeConverter._convert_1d(v, unit, axis) + for v in values] + else: + values = DatetimeConverter._convert_1d(values, unit, axis) + return values + + @staticmethod + def _convert_1d(values, unit, axis): def try_parse(values): try: return _dt_to_float_ordinal(tools.to_datetime(values)) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 94d1d21d59d88..dd8f65a8e48ff 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -11,6 +11,7 @@ from datetime import datetime, date, timedelta, time import numpy as np import pytz +import pytest import pandas as pd from pandas._libs import tslib, lib @@ -66,6 +67,27 @@ def test_is_list_like(): assert not inference.is_list_like(f) +@pytest.mark.parametrize('inner', [ + [], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]), + Series([]), Series(['a']).str, (x for x in range(5)) +]) +@pytest.mark.parametrize('outer', [ + list, Series, np.array, tuple +]) +def test_is_nested_list_like_passes(inner, outer): + result = outer([inner for _ in range(5)]) + assert inference.is_list_like(result) + + +@pytest.mark.parametrize('obj', [ + 'abc', [], [1], (1,), ['a'], 'a', {'a'}, + [1, 2, 3], Series([1]), DataFrame({"A": [1]}), + ([1, 2] for _ in range(5)), +]) +def test_is_nested_list_like_fails(obj): + assert not inference.is_nested_list_like(obj) + + def test_is_dict_like(): passes = [{}, {'A': 1}, Series([1])] fails = ['1', 1, [1, 2], (1, 2), range(2), Index([1])] diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index 683f4ee89687f..30eb3ef24fe30 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -138,6 +138,13 @@ def _assert_less(ts1, ts2): _assert_less(ts, ts + Milli()) _assert_less(ts, ts + Micro(50)) + def test_convert_nested(self): + inner = [Timestamp('2017-01-01', Timestamp('2017-01-02'))] + data = [inner, inner] + result = self.dtc.convert(data, None, None) + expected = [self.dtc.convert(x, None, None) for x in data] + assert result == expected + class TestPeriodConverter(tm.TestCase): @@ -196,3 +203,9 @@ def test_integer_passthrough(self): rs = self.pc.convert([0, 1], None, self.axis) xp = [0, 1] self.assertEqual(rs, xp) + + def test_convert_nested(self): + data = ['2012-1-1', '2012-1-2'] + r1 = self.pc.convert([data, data], None, self.axis) + r2 = [self.pc.convert(data, None, self.axis) for _ in range(2)] + assert r1 == r2 diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 547770ebcf6e5..4beb804acacc5 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1334,6 +1334,14 @@ def test_timedelta_plot(self): s = Series(np.random.randn(len(index)), index) _check_plot_works(s.plot) + def test_hist(self): + # https://github.com/matplotlib/matplotlib/issues/8459 + rng = date_range('1/1/2011', periods=10, freq='H') + x = rng + w1 = np.arange(0, 1, .1) + w2 = np.arange(0, 1, .1)[::-1] + self.plt.hist([x, x], weights=[w1, w2]) + def _check_plot_works(f, freq=None, series=None, *args, **kwargs): import matplotlib.pyplot as plt From f53d38b94d963cff081b4fe0a1e7242e8d5eb221 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Tue, 18 Apr 2017 10:34:36 +0200 Subject: [PATCH 50/56] BUG: show series length in repr when truncated (GH15962) (#15974) --- doc/source/whatsnew/v0.20.0.txt | 3 ++ pandas/core/series.py | 40 ++++++++------------------ pandas/io/formats/format.py | 3 +- pandas/tests/io/formats/test_format.py | 33 +++++++++++++++++---- pandas/tests/sparse/test_format.py | 10 ++++--- pandas/tests/test_categorical.py | 2 +- 6 files changed, 51 insertions(+), 40 deletions(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 0b95bf98b401d..4583e0d6eb836 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -518,6 +518,8 @@ Other Enhancements - :method:`~MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels `. (:issue:`15694`) - ``pd.read_csv()`` will now raise a ``ParserError`` error whenever any parsing error occurs (:issue:`15913`, :issue:`15925`) - ``pd.read_csv()`` now supports the ``error_bad_lines`` and ``warn_bad_lines`` arguments for the Python parser (:issue:`15925`) +- The ``display.show_dimensions`` option can now also be used to specify + whether the length of a ``Series`` should be shown in its repr (:issue:`7117`). - ``parallel_coordinates()`` has gained a ``sort_labels`` keyword arg that sorts class labels and the colours assigned to them (:issue:`15908`) @@ -1560,6 +1562,7 @@ I/O - Bug in ``pd.read_hdf()`` passing a ``Timestamp`` to the ``where`` parameter with a non date column (:issue:`15492`) - Bug in ``DataFrame.to_stata()`` and ``StataWriter`` which produces incorrectly formatted files to be produced for some locales (:issue:`13856`) - Bug in ``StataReader`` and ``StataWriter`` which allows invalid encodings (:issue:`15723`) +- Bug in the ``Series`` repr not showing the length when the output was truncated (:issue:`15962`). Plotting ^^^^^^^^ diff --git a/pandas/core/series.py b/pandas/core/series.py index 9022bff092ac3..2a99481274e9e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -980,9 +980,10 @@ def __unicode__(self): width, height = get_terminal_size() max_rows = (height if get_option("display.max_rows") == 0 else get_option("display.max_rows")) + show_dimensions = get_option("display.show_dimensions") self.to_string(buf=buf, name=self.name, dtype=self.dtype, - max_rows=max_rows) + max_rows=max_rows, length=show_dimensions) result = buf.getvalue() return result @@ -1021,31 +1022,6 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True, formatted : string (if not buffer passed) """ - the_repr = self._get_repr(float_format=float_format, na_rep=na_rep, - header=header, index=index, length=length, - dtype=dtype, name=name, max_rows=max_rows) - - # catch contract violations - if not isinstance(the_repr, compat.text_type): - raise AssertionError("result must be of type unicode, type" - " of result is {0!r}" - "".format(the_repr.__class__.__name__)) - - if buf is None: - return the_repr - else: - try: - buf.write(the_repr) - except AttributeError: - with open(buf, 'w') as f: - f.write(the_repr) - - def _get_repr(self, name=False, header=True, index=True, length=True, - dtype=True, na_rep='NaN', float_format=None, max_rows=None): - """ - - Internal function, should always return unicode string - """ formatter = fmt.SeriesFormatter(self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, @@ -1053,12 +1029,20 @@ def _get_repr(self, name=False, header=True, index=True, length=True, max_rows=max_rows) result = formatter.to_string() - # TODO: following check prob. not neces. + # catch contract violations if not isinstance(result, compat.text_type): raise AssertionError("result must be of type unicode, type" " of result is {0!r}" "".format(result.__class__.__name__)) - return result + + if buf is None: + return result + else: + try: + buf.write(result) + except AttributeError: + with open(buf, 'w') as f: + f.write(result) def __iter__(self): """ provide iteration over the values of the Series diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 20df60eb96299..ae0814d5566a8 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -199,7 +199,8 @@ def _get_footer(self): escape_chars=('\t', '\r', '\n')) footer += ("Name: %s" % series_name) if name is not None else "" - if self.length: + if (self.length is True or + (self.length == 'truncate' and self.truncate_v)): if footer: footer += ', ' footer += 'Length: %d' % len(self.series) diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 354ce99f567ea..20fbaf781d72f 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1770,12 +1770,14 @@ def test_east_asian_unicode_series(self): name=u'おおおおおおお') expected = (u"0 あ\n ... \n" - u"3 ええええ\nName: おおおおおおお, dtype: object") + u"3 ええええ\n" + u"Name: おおおおおおお, Length: 4, dtype: object") self.assertEqual(_rep(s), expected) s.index = [u'ああ', u'いいいい', u'う', u'えええ'] expected = (u"ああ あ\n ... \n" - u"えええ ええええ\nName: おおおおおおお, dtype: object") + u"えええ ええええ\n" + u"Name: おおおおおおお, Length: 4, dtype: object") self.assertEqual(_rep(s), expected) # Emable Unicode option ----------------------------------------- @@ -1846,14 +1848,15 @@ def test_east_asian_unicode_series(self): s = Series([u'あ', u'いい', u'ううう', u'ええええ'], name=u'おおおおおおお') expected = (u"0 あ\n ... \n" - u"3 ええええ\nName: おおおおおおお, dtype: object") + u"3 ええええ\n" + u"Name: おおおおおおお, Length: 4, dtype: object") self.assertEqual(_rep(s), expected) s.index = [u'ああ', u'いいいい', u'う', u'えええ'] expected = (u"ああ あ\n" u" ... \n" u"えええ ええええ\n" - u"Name: おおおおおおお, dtype: object") + u"Name: おおおおおおお, Length: 4, dtype: object") self.assertEqual(_rep(s), expected) # ambiguous unicode @@ -2021,7 +2024,8 @@ def test_max_multi_index_display(self): # Make sure #8532 is fixed def test_consistent_format(self): s = pd.Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10) - with option_context("display.max_rows", 10): + with option_context("display.max_rows", 10, + "display.show_dimensions", False): res = repr(s) exp = ('0 1.0000\n1 1.0000\n2 1.0000\n3 ' '1.0000\n4 1.0000\n ... \n125 ' @@ -2040,7 +2044,8 @@ def chck_ncols(self, s): def test_format_explicit(self): test_sers = gen_series_formatting() - with option_context("display.max_rows", 4): + with option_context("display.max_rows", 4, + "display.show_dimensions", False): res = repr(test_sers['onel']) exp = '0 a\n1 a\n ..\n98 a\n99 a\ndtype: object' self.assertEqual(exp, res) @@ -2087,6 +2092,22 @@ def getndots(s): strrepr = repr(s).replace('\n', '') self.assertEqual(getndots(strrepr), 3) + def test_show_dimensions(self): + # gh-7117 + s = Series(range(5)) + + assert 'Length' not in repr(s) + + with option_context("display.max_rows", 4): + assert 'Length' in repr(s) + + with option_context("display.show_dimensions", True): + assert 'Length' in repr(s) + + with option_context("display.max_rows", 4, + "display.show_dimensions", False): + assert 'Length' not in repr(s) + def test_to_string_name(self): s = Series(range(100), dtype='int64') s.name = 'myser' diff --git a/pandas/tests/sparse/test_format.py b/pandas/tests/sparse/test_format.py index ba870a2c33801..eafb493319e40 100644 --- a/pandas/tests/sparse/test_format.py +++ b/pandas/tests/sparse/test_format.py @@ -33,7 +33,7 @@ def test_sparse_max_row(self): # GH 10560 result = repr(s) exp = ("0 1.0\n ... \n4 NaN\n" - "dtype: float64\nBlockIndex\n" + "Length: 5, dtype: float64\nBlockIndex\n" "Block locations: array([0, 3]{0})\n" "Block lengths: array([1, 1]{0})".format(dfm)) self.assertEqual(result, exp) @@ -52,7 +52,8 @@ def test_sparse_mi_max_row(self): "Block lengths: array([1, 1]{0})".format(dfm)) self.assertEqual(result, exp) - with option_context("display.max_rows", 3): + with option_context("display.max_rows", 3, + "display.show_dimensions", False): # GH 13144 result = repr(s) exp = ("A 0 1.0\n ... \nC 2 NaN\n" @@ -77,7 +78,7 @@ def test_sparse_bool(self): with option_context("display.max_rows", 3): result = repr(s) exp = ("0 True\n ... \n5 False\n" - "dtype: bool\nBlockIndex\n" + "Length: 6, dtype: bool\nBlockIndex\n" "Block locations: array([0, 3]{0})\n" "Block lengths: array([1, 1]{0})".format(dtype)) self.assertEqual(result, exp) @@ -94,7 +95,8 @@ def test_sparse_int(self): "Block lengths: array([1, 1]{0})".format(dtype)) self.assertEqual(result, exp) - with option_context("display.max_rows", 3): + with option_context("display.max_rows", 3, + "display.show_dimensions", False): result = repr(s) exp = ("0 0\n ..\n5 0\n" "dtype: int64\nBlockIndex\n" diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 3296673e96316..17f55b41970b1 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -2088,7 +2088,7 @@ def test_repr(self): a = pd.Series(pd.Categorical(["a", "b"] * 25)) exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" + - "dtype: category\nCategories (2, object): [a, b]") + "Length: 50, dtype: category\nCategories (2, object): [a, b]") with option_context("display.max_rows", 5): self.assertEqual(exp, repr(a)) From 0ba305b15713aac0ada62e13768732485e374902 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Tue, 18 Apr 2017 12:01:04 +0200 Subject: [PATCH 51/56] ENH: level keyword in rename (GH4160) (#13766) --- doc/source/whatsnew/v0.20.0.txt | 2 + pandas/core/generic.py | 9 +++- pandas/core/internals.py | 14 ++++-- pandas/tests/frame/test_alter_axes.py | 67 ++++++++++++++++++++++++--- 4 files changed, 81 insertions(+), 11 deletions(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 4583e0d6eb836..44c79fd131705 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -490,6 +490,8 @@ Other Enhancements - ``DataFrame.plot`` now prints a title above each subplot if ``suplots=True`` and ``title`` is a list of strings (:issue:`14753`) - ``DataFrame.plot`` can pass the matplotlib 2.0 default color cycle as a single string as color parameter, see `here `__. (:issue:`15516`) - ``Series.interpolate()`` now supports timedelta as an index type with ``method='time'`` (:issue:`6424`) +- Addition of a ``level`` keyword to ``DataFrame/Series.rename`` to rename + labels in the specified level of a MultiIndex (:issue:`4160`). - ``Timedelta.isoformat`` method added for formatting Timedeltas as an `ISO 8601 duration`_. See the :ref:`Timedelta docs ` (:issue:`15136`) - ``.select_dtypes()`` now allows the string ``datetimetz`` to generically select datetimes with tz (:issue:`14910`) - The ``.to_latex()`` method will now accept ``multicolumn`` and ``multirow`` arguments to use the accompanying LaTeX enhancements diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 5f0c65ddfb9c3..841df3727e5a6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -645,6 +645,9 @@ def swaplevel(self, i=-2, j=-1, axis=0): inplace : boolean, default False Whether to return a new %(klass)s. If True then value of copy is ignored. + level : int or level name, default None + In case of a MultiIndex, only rename labels in the specified + level. Returns ------- @@ -701,6 +704,7 @@ def rename(self, *args, **kwargs): axes, kwargs = self._construct_axes_from_arguments(args, kwargs) copy = kwargs.pop('copy', True) inplace = kwargs.pop('inplace', False) + level = kwargs.pop('level', None) if kwargs: raise TypeError('rename() got an unexpected keyword ' @@ -734,7 +738,10 @@ def f(x): f = _get_rename_function(v) baxis = self._get_block_manager_axis(axis) - result._data = result._data.rename_axis(f, axis=baxis, copy=copy) + if level is not None: + level = self.axes[axis]._get_level_number(level) + result._data = result._data.rename_axis(f, axis=baxis, copy=copy, + level=level) result._clear_item_cache() if inplace: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index c698bcb9fa5ee..5a87574455a63 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2837,7 +2837,7 @@ def set_axis(self, axis, new_labels): self.axes[axis] = new_labels - def rename_axis(self, mapper, axis, copy=True): + def rename_axis(self, mapper, axis, copy=True, level=None): """ Rename one of axes. @@ -2846,10 +2846,11 @@ def rename_axis(self, mapper, axis, copy=True): mapper : unary callable axis : int copy : boolean, default True + level : int, default None """ obj = self.copy(deep=copy) - obj.set_axis(axis, _transform_index(self.axes[axis], mapper)) + obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) return obj def add_prefix(self, prefix): @@ -4735,15 +4736,20 @@ def _safe_reshape(arr, new_shape): return arr -def _transform_index(index, func): +def _transform_index(index, func, level=None): """ Apply function to all values found in index. This includes transforming multiindex entries separately. + Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(index, MultiIndex): - items = [tuple(func(y) for y in x) for x in index] + if level is not None: + items = [tuple(func(y) if i == level else y + for i, y in enumerate(x)) for x in index] + else: + items = [tuple(func(y) for y in x) for x in index] return MultiIndex.from_tuples(items, names=index.names) else: items = [func(x) for x in index] diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 9add944d2293e..ce4dd6d38eeeb 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -415,15 +415,20 @@ def test_rename(self): pd.Index(['bar', 'foo'], name='name')) self.assertEqual(renamed.index.name, renamer.index.name) - # MultiIndex + def test_rename_multiindex(self): + tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')] tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')] index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar']) columns = MultiIndex.from_tuples( tuples_columns, names=['fizz', 'buzz']) - renamer = DataFrame([(0, 0), (1, 1)], index=index, columns=columns) - renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'}, - columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}) + df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns) + + # + # without specifying level -> accross all levels + + renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'}, + columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}) new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')], names=['foo', 'bar']) @@ -432,8 +437,58 @@ def test_rename(self): names=['fizz', 'buzz']) self.assert_index_equal(renamed.index, new_index) self.assert_index_equal(renamed.columns, new_columns) - self.assertEqual(renamed.index.names, renamer.index.names) - self.assertEqual(renamed.columns.names, renamer.columns.names) + self.assertEqual(renamed.index.names, df.index.names) + self.assertEqual(renamed.columns.names, df.columns.names) + + # + # with specifying a level (GH13766) + + # dict + new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), + ('fizz2', 'buzz2')], + names=['fizz', 'buzz']) + renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}, + level=0) + self.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}, + level='fizz') + self.assert_index_equal(renamed.columns, new_columns) + + new_columns = MultiIndex.from_tuples([('fizz1', 'buzz1'), + ('fizz2', 'buzz3')], + names=['fizz', 'buzz']) + renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}, + level=1) + self.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}, + level='buzz') + self.assert_index_equal(renamed.columns, new_columns) + + # function + func = str.upper + new_columns = MultiIndex.from_tuples([('FIZZ1', 'buzz1'), + ('FIZZ2', 'buzz2')], + names=['fizz', 'buzz']) + renamed = df.rename(columns=func, level=0) + self.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns=func, level='fizz') + self.assert_index_equal(renamed.columns, new_columns) + + new_columns = MultiIndex.from_tuples([('fizz1', 'BUZZ1'), + ('fizz2', 'BUZZ2')], + names=['fizz', 'buzz']) + renamed = df.rename(columns=func, level=1) + self.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns=func, level='buzz') + self.assert_index_equal(renamed.columns, new_columns) + + # index + new_index = MultiIndex.from_tuples([('foo3', 'bar1'), + ('foo2', 'bar2')], + names=['foo', 'bar']) + renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'}, + level=0) + self.assert_index_equal(renamed.index, new_index) def test_rename_nocopy(self): renamed = self.frame.rename(columns={'C': 'foo'}, copy=False) From d16cce89521c84fcb9c7b7bb2e95629a6fe7acb7 Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Tue, 18 Apr 2017 06:10:54 -0400 Subject: [PATCH 52/56] CLN: move/reorg pandas.tools -> pandas.core.reshape xref #13634 Author: Jeff Reback Closes #16032 from jreback/move_tools and squashes the following commits: 376cef5 [Jeff Reback] move to_numeric cc6e059 [Jeff Reback] CLN: move/reorg pandas.tools -> pandas.core.reshape --- doc/source/whatsnew/v0.20.0.txt | 1 + pandas/__init__.py | 9 +- pandas/core/algorithms.py | 2 +- pandas/core/api.py | 6 +- pandas/core/base.py | 4 +- pandas/core/categorical.py | 2 +- pandas/core/computation/expr.py | 2 +- pandas/core/dtypes/cast.py | 164 +- pandas/core/frame.py | 20 +- pandas/core/groupby.py | 20 +- pandas/core/indexes/base.py | 2 +- pandas/core/indexes/multi.py | 2 +- pandas/core/panel.py | 4 +- .../{tests/tools => core/reshape}/__init__.py | 0 pandas/core/reshape/api.py | 8 + pandas/{tools => core/reshape}/concat.py | 0 pandas/core/reshape/merge.py | 1481 ++++++++++++ pandas/{tools => core/reshape}/pivot.py | 5 +- pandas/core/{ => reshape}/reshape.py | 2 +- pandas/{tools => core/reshape}/tile.py | 0 pandas/core/reshape/util.py | 76 + pandas/core/series.py | 4 +- pandas/io/formats/format.py | 4 +- pandas/plotting/_core.py | 2 +- pandas/tests/dtypes/test_cast.py | 370 ++- pandas/tests/dtypes/test_convert.py | 0 pandas/tests/reshape/__init__.py | 0 .../data/allow_exact_matches.csv | 0 .../allow_exact_matches_and_tolerance.csv | 0 pandas/tests/{tools => reshape}/data/asof.csv | 0 .../tests/{tools => reshape}/data/asof2.csv | 0 .../{tools => reshape}/data/cut_data.csv | 0 .../tests/{tools => reshape}/data/quotes.csv | 0 .../tests/{tools => reshape}/data/quotes2.csv | 0 .../{tools => reshape}/data/tolerance.csv | 0 .../tests/{tools => reshape}/data/trades.csv | 0 .../tests/{tools => reshape}/data/trades2.csv | 0 .../tests/{tools => reshape}/test_concat.py | 0 .../tests/{tools => reshape}/test_hashing.py | 0 pandas/tests/{tools => reshape}/test_join.py | 2 +- pandas/tests/{tools => reshape}/test_merge.py | 4 +- .../{tools => reshape}/test_merge_asof.py | 2 +- .../{tools => reshape}/test_merge_ordered.py | 0 pandas/tests/{tools => reshape}/test_pivot.py | 2 +- pandas/tests/{ => reshape}/test_reshape.py | 5 +- pandas/tests/{tools => reshape}/test_tile.py | 2 +- .../test_union_categoricals.py | 0 pandas/tests/reshape/test_util.py | 49 + pandas/tests/sparse/test_series.py | 2 +- pandas/tests/test_algos.py | 2 +- pandas/tests/test_generic.py | 2076 ----------------- pandas/tests/test_panel.py | 6 +- pandas/tests/test_util.py | 78 +- pandas/tests/tools/test_util.py | 485 ---- pandas/tools/merge.py | 1482 +----------- pandas/tools/util.py | 245 -- setup.py | 4 +- 57 files changed, 2281 insertions(+), 4355 deletions(-) rename pandas/{tests/tools => core/reshape}/__init__.py (100%) create mode 100644 pandas/core/reshape/api.py rename pandas/{tools => core/reshape}/concat.py (100%) create mode 100644 pandas/core/reshape/merge.py rename pandas/{tools => core/reshape}/pivot.py (99%) rename pandas/core/{ => reshape}/reshape.py (99%) rename pandas/{tools => core/reshape}/tile.py (100%) create mode 100644 pandas/core/reshape/util.py create mode 100644 pandas/tests/dtypes/test_convert.py create mode 100644 pandas/tests/reshape/__init__.py rename pandas/tests/{tools => reshape}/data/allow_exact_matches.csv (100%) rename pandas/tests/{tools => reshape}/data/allow_exact_matches_and_tolerance.csv (100%) rename pandas/tests/{tools => reshape}/data/asof.csv (100%) rename pandas/tests/{tools => reshape}/data/asof2.csv (100%) rename pandas/tests/{tools => reshape}/data/cut_data.csv (100%) rename pandas/tests/{tools => reshape}/data/quotes.csv (100%) rename pandas/tests/{tools => reshape}/data/quotes2.csv (100%) rename pandas/tests/{tools => reshape}/data/tolerance.csv (100%) rename pandas/tests/{tools => reshape}/data/trades.csv (100%) rename pandas/tests/{tools => reshape}/data/trades2.csv (100%) rename pandas/tests/{tools => reshape}/test_concat.py (100%) rename pandas/tests/{tools => reshape}/test_hashing.py (100%) rename pandas/tests/{tools => reshape}/test_join.py (99%) rename pandas/tests/{tools => reshape}/test_merge.py (99%) rename pandas/tests/{tools => reshape}/test_merge_asof.py (99%) rename pandas/tests/{tools => reshape}/test_merge_ordered.py (100%) rename pandas/tests/{tools => reshape}/test_pivot.py (99%) rename pandas/tests/{ => reshape}/test_reshape.py (99%) rename pandas/tests/{tools => reshape}/test_tile.py (99%) rename pandas/tests/{tools => reshape}/test_union_categoricals.py (100%) create mode 100644 pandas/tests/reshape/test_util.py delete mode 100644 pandas/tests/test_generic.py delete mode 100644 pandas/tests/tools/test_util.py delete mode 100644 pandas/tools/util.py diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 44c79fd131705..9fe0b66028ac5 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1353,6 +1353,7 @@ If indicated, a deprecation warning will be issued if you reference theses modul "pandas.parser", "pandas.io.libparsers", "X" "pandas.formats", "pandas.io.formats", "" "pandas.sparse", "pandas.core.sparse", "" + "pandas.tools", "pandas.core.reshape", "" "pandas.types", "pandas.core.dtypes", "" "pandas.io.sas.saslib", "pandas.io.sas.libsas", "" "pandas._join", "pandas._libs.join", "" diff --git a/pandas/__init__.py b/pandas/__init__.py index 5f6d54fd904b1..43fa362b66ed5 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -44,11 +44,7 @@ from pandas.stats.api import * from pandas.tseries.api import * from pandas.core.computation.api import * - -from pandas.tools.concat import concat -from pandas.tools.merge import (merge, ordered_merge, - merge_ordered, merge_asof) -from pandas.tools.pivot import pivot_table, crosstab +from pandas.core.reshape.api import * # deprecate tools.plotting, plot_params and scatter_matrix on the top namespace import pandas.tools.plotting @@ -58,9 +54,6 @@ 'pandas.scatter_matrix', pandas.plotting.scatter_matrix, 'pandas.plotting.scatter_matrix') -from pandas.tools.tile import cut, qcut -from pandas.tools.util import to_numeric -from pandas.core.reshape import melt from pandas.util.print_versions import show_versions from pandas.io.api import * from pandas.util._tester import test diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 6df7fce631a3c..63df4b3d94bc8 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -605,7 +605,7 @@ def value_counts(values, sort=True, ascending=False, normalize=False, if bins is not None: try: - from pandas.tools.tile import cut + from pandas.core.reshape.tile import cut values = Series(values) ii = cut(values, bins, include_lowest=True) except TypeError: diff --git a/pandas/core/api.py b/pandas/core/api.py index 865fe367873d8..f3191283b85eb 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -18,10 +18,12 @@ from pandas.core.frame import DataFrame from pandas.core.panel import Panel, WidePanel from pandas.core.panel4d import Panel4D -from pandas.core.reshape import (pivot_simple as pivot, get_dummies, - lreshape, wide_to_long) +from pandas.core.reshape.reshape import ( + pivot_simple as pivot, get_dummies, + lreshape, wide_to_long) from pandas.core.indexing import IndexSlice +from pandas.core.dtypes.cast import to_numeric from pandas.tseries.offsets import DateOffset from pandas.tseries.tools import to_datetime from pandas.tseries.index import (DatetimeIndex, Timestamp, diff --git a/pandas/core/base.py b/pandas/core/base.py index e30751a6582f9..87c649c5fbd79 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -522,7 +522,7 @@ def nested_renaming_depr(level=4): len(obj.columns.intersection(keys)) != len(keys)): nested_renaming_depr() - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat def _agg_1dim(name, how, subset=None): """ @@ -671,7 +671,7 @@ def is_any_frame(): return result, True def _aggregate_multiple_funcs(self, arg, _level, _axis): - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat if _axis != 0: raise NotImplementedError("axis other than 0 is not supported") diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index a12cec33fb350..a3667e9322959 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1995,7 +1995,7 @@ def describe(self): counts = self.value_counts(dropna=False) freqs = counts / float(counts.sum()) - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat result = concat([counts, freqs], axis=1) result.columns = ['counts', 'freqs'] result.index.name = 'categories' diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 51785ebcd9ec8..73c27f4d772ca 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -13,7 +13,7 @@ from pandas.core.base import StringMixin from pandas.core import common as com import pandas.io.formats.printing as printing -from pandas.tools.util import compose +from pandas.core.reshape.util import compose from pandas.core.computation.ops import ( _cmp_ops_syms, _bool_ops_syms, _arith_ops_syms, _unary_ops_syms, is_term) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 3954fb5c93da8..3c1f480787d3a 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -5,6 +5,7 @@ import numpy as np import warnings +import pandas as pd from pandas._libs import tslib, lib from pandas._libs.tslib import iNaT from pandas.compat import string_types, text_type, PY3 @@ -18,6 +19,8 @@ is_integer_dtype, is_datetime_or_timedelta_dtype, is_bool_dtype, is_scalar, + is_numeric_dtype, is_decimal, + is_number, _string_dtypes, _coerce_to_dtype, _ensure_int8, _ensure_int16, @@ -25,7 +28,8 @@ _NS_DTYPE, _TD_DTYPE, _INT64_DTYPE, _POSSIBLY_CAST_DTYPES) from .dtypes import ExtensionDtype, DatetimeTZDtype, PeriodDtype -from .generic import ABCDatetimeIndex, ABCPeriodIndex, ABCSeries +from .generic import (ABCDatetimeIndex, ABCPeriodIndex, + ABCSeries, ABCIndexClass) from .missing import isnull, notnull from .inference import is_list_like @@ -1025,3 +1029,161 @@ def find_common_type(types): return np.object return np.find_common_type(types, []) + + +def to_numeric(arg, errors='raise', downcast=None): + """ + Convert argument to a numeric type. + + Parameters + ---------- + arg : list, tuple, 1-d array, or Series + errors : {'ignore', 'raise', 'coerce'}, default 'raise' + - If 'raise', then invalid parsing will raise an exception + - If 'coerce', then invalid parsing will be set as NaN + - If 'ignore', then invalid parsing will return the input + downcast : {'integer', 'signed', 'unsigned', 'float'} , default None + If not None, and if the data has been successfully cast to a + numerical dtype (or if the data was numeric to begin with), + downcast that resulting data to the smallest numerical dtype + possible according to the following rules: + + - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) + - 'unsigned': smallest unsigned int dtype (min.: np.uint8) + - 'float': smallest float dtype (min.: np.float32) + + As this behaviour is separate from the core conversion to + numeric values, any errors raised during the downcasting + will be surfaced regardless of the value of the 'errors' input. + + In addition, downcasting will only occur if the size + of the resulting data's dtype is strictly larger than + the dtype it is to be cast to, so if none of the dtypes + checked satisfy that specification, no downcasting will be + performed on the data. + + .. versionadded:: 0.19.0 + + Returns + ------- + ret : numeric if parsing succeeded. + Return type depends on input. Series if Series, otherwise ndarray + + Examples + -------- + Take separate series and convert to numeric, coercing when told to + + >>> import pandas as pd + >>> s = pd.Series(['1.0', '2', -3]) + >>> pd.to_numeric(s) + 0 1.0 + 1 2.0 + 2 -3.0 + dtype: float64 + >>> pd.to_numeric(s, downcast='float') + 0 1.0 + 1 2.0 + 2 -3.0 + dtype: float32 + >>> pd.to_numeric(s, downcast='signed') + 0 1 + 1 2 + 2 -3 + dtype: int8 + >>> s = pd.Series(['apple', '1.0', '2', -3]) + >>> pd.to_numeric(s, errors='ignore') + 0 apple + 1 1.0 + 2 2 + 3 -3 + dtype: object + >>> pd.to_numeric(s, errors='coerce') + 0 NaN + 1 1.0 + 2 2.0 + 3 -3.0 + dtype: float64 + """ + if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'): + raise ValueError('invalid downcasting method provided') + + is_series = False + is_index = False + is_scalars = False + + if isinstance(arg, ABCSeries): + is_series = True + values = arg.values + elif isinstance(arg, ABCIndexClass): + is_index = True + values = arg.asi8 + if values is None: + values = arg.values + elif isinstance(arg, (list, tuple)): + values = np.array(arg, dtype='O') + elif is_scalar(arg): + if is_decimal(arg): + return float(arg) + if is_number(arg): + return arg + is_scalars = True + values = np.array([arg], dtype='O') + elif getattr(arg, 'ndim', 1) > 1: + raise TypeError('arg must be a list, tuple, 1-d array, or Series') + else: + values = arg + + try: + if is_numeric_dtype(values): + pass + elif is_datetime_or_timedelta_dtype(values): + values = values.astype(np.int64) + else: + values = _ensure_object(values) + coerce_numeric = False if errors in ('ignore', 'raise') else True + values = lib.maybe_convert_numeric(values, set(), + coerce_numeric=coerce_numeric) + + except Exception: + if errors == 'raise': + raise + + # attempt downcast only if the data has been successfully converted + # to a numerical dtype and if a downcast method has been specified + if downcast is not None and is_numeric_dtype(values): + typecodes = None + + if downcast in ('integer', 'signed'): + typecodes = np.typecodes['Integer'] + elif downcast == 'unsigned' and np.min(values) >= 0: + typecodes = np.typecodes['UnsignedInteger'] + elif downcast == 'float': + typecodes = np.typecodes['Float'] + + # pandas support goes only to np.float32, + # as float dtypes smaller than that are + # extremely rare and not well supported + float_32_char = np.dtype(np.float32).char + float_32_ind = typecodes.index(float_32_char) + typecodes = typecodes[float_32_ind:] + + if typecodes is not None: + # from smallest to largest + for dtype in typecodes: + if np.dtype(dtype).itemsize <= values.dtype.itemsize: + values = maybe_downcast_to_dtype(values, dtype) + + # successful conversion + if values.dtype == dtype: + break + + if is_series: + return pd.Series(values, index=arg.index, name=arg.name) + elif is_index: + # because we want to coerce to numeric if possible, + # do not use _shallow_copy_with_infer + return pd.Index(values, name=arg.name) + elif is_scalars: + return values[0] + else: + return values diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 732ce7ce695b0..9b9039455b948 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3956,7 +3956,7 @@ def pivot(self, index=None, columns=None, values=None): """ - from pandas.core.reshape import pivot + from pandas.core.reshape.reshape import pivot return pivot(self, index=index, columns=columns, values=values) def stack(self, level=-1, dropna=True): @@ -3992,7 +3992,7 @@ def stack(self, level=-1, dropna=True): ------- stacked : DataFrame or Series """ - from pandas.core.reshape import stack, stack_multiple + from pandas.core.reshape.reshape import stack, stack_multiple if isinstance(level, (tuple, list)): return stack_multiple(self, level, dropna=dropna) @@ -4057,7 +4057,7 @@ def unstack(self, level=-1, fill_value=None): ------- unstacked : DataFrame or Series """ - from pandas.core.reshape import unstack + from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) _shared_docs['melt'] = (""" @@ -4159,7 +4159,7 @@ def unstack(self, level=-1, fill_value=None): other='melt')) def melt(self, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None): - from pandas.core.reshape import melt + from pandas.core.reshape.reshape import melt return melt(self, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level) @@ -4609,7 +4609,7 @@ def append(self, other, ignore_index=False, verify_integrity=False): if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.loc[:, self.columns] - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat if isinstance(other, (list, tuple)): to_concat = [self] + other else: @@ -4741,8 +4741,8 @@ def join(self, other, on=None, how='left', lsuffix='', rsuffix='', def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): - from pandas.tools.merge import merge - from pandas.tools.concat import concat + from pandas.core.reshape.merge import merge + from pandas.core.reshape.concat import concat if isinstance(other, Series): if other.name is None: @@ -4786,7 +4786,7 @@ def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='', def merge(self, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False): - from pandas.tools.merge import merge + from pandas.core.reshape.merge import merge return merge(self, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, @@ -4846,7 +4846,7 @@ def round(self, decimals=0, *args, **kwargs): Series.round """ - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat def _dict_round(df, decimals): for col, vals in df.iteritems(): @@ -5523,7 +5523,7 @@ def isin(self, values): """ if isinstance(values, dict): from collections import defaultdict - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat values = defaultdict(list, values) return concat((self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns)), axis=1) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 3fd41f3456732..8f788aed3950d 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -870,7 +870,7 @@ def _wrap_applied_output(self, *args, **kwargs): raise AbstractMethodError(self) def _concat_objects(self, keys, values, not_indexed_same=False): - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat def reset_identity(values): # reset the identities of the components @@ -2985,7 +2985,7 @@ def transform(self, func, *args, **kwargs): s = klass(res, indexer) results.append(s) - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat result = concat(results).sort_index() # we will only try to coerce the result type if @@ -3126,8 +3126,8 @@ def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): from functools import partial - from pandas.tools.tile import cut - from pandas.tools.merge import _get_join_indexers + from pandas.core.reshape.tile import cut + from pandas.core.reshape.merge import _get_join_indexers if bins is not None and not np.iterable(bins): # scalar bins cannot be done at top level @@ -3509,7 +3509,7 @@ def _decide_output_index(self, output, labels): def _wrap_applied_output(self, keys, values, not_indexed_same=False): from pandas.core.index import _all_indexes_same - from pandas.tools.util import to_numeric + from pandas.core.dtypes.cast import to_numeric if len(keys) == 0: return DataFrame(index=keys) @@ -3600,7 +3600,7 @@ def first_non_None_value(values): # still a series # path added as of GH 5545 elif all_indexed_same: - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat return concat(values) if not all_indexed_same: @@ -3633,7 +3633,7 @@ def first_non_None_value(values): else: # GH5788 instead of stacking; concat gets the # dtypes correct - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat result = concat(values, keys=key_index, names=key_index.names, axis=self.axis).unstack() @@ -3684,7 +3684,7 @@ def first_non_None_value(values): not_indexed_same=not_indexed_same) def _transform_general(self, func, *args, **kwargs): - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat applied = [] obj = self._obj_with_exclusions @@ -4071,7 +4071,7 @@ def _iterate_column_groupbys(self): exclusions=self.exclusions) def _apply_to_column_groupbys(self, func): - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat return concat( (func(col_groupby) for _, col_groupby in self._iterate_column_groupbys()), @@ -4151,7 +4151,7 @@ def groupby_series(obj, col=None): if isinstance(obj, Series): results = groupby_series(obj) else: - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat results = [groupby_series(obj[col], col) for col in obj.columns] results = concat(results, axis=1) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5149d45514e2e..705b7a186dced 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3064,7 +3064,7 @@ def _join_multi(self, other, how, return_indexers=True): "implemented") def _join_non_unique(self, other, how='left', return_indexers=False): - from pandas.tools.merge import _get_join_indexers + from pandas.core.reshape.merge import _get_join_indexers left_idx, right_idx = _get_join_indexers([self.values], [other._values], how=how, diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 40e7118ca0f6a..6d9a9aa691f66 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1170,7 +1170,7 @@ def from_product(cls, iterables, sortorder=None, names=None): MultiIndex.from_tuples : Convert list of tuples to MultiIndex """ from pandas.core.categorical import _factorize_from_iterables - from pandas.tools.util import cartesian_product + from pandas.core.reshape.util import cartesian_product labels, levels = _factorize_from_iterables(iterables) labels = cartesian_product(labels) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index fefe75163d033..39d2ebdeec3ac 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -33,7 +33,7 @@ create_block_manager_from_blocks) from pandas.core.ops import _op_descriptions from pandas.core.series import Series -from pandas.tools.util import cartesian_product +from pandas.core.reshape.util import cartesian_product from pandas.util.decorators import (deprecate, Appender) _shared_doc_kwargs = dict( @@ -1294,7 +1294,7 @@ def join(self, other, how='left', lsuffix='', rsuffix=''): ------- joined : Panel """ - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat if isinstance(other, Panel): join_major, join_minor = self._get_join_index(other, how) diff --git a/pandas/tests/tools/__init__.py b/pandas/core/reshape/__init__.py similarity index 100% rename from pandas/tests/tools/__init__.py rename to pandas/core/reshape/__init__.py diff --git a/pandas/core/reshape/api.py b/pandas/core/reshape/api.py new file mode 100644 index 0000000000000..c75e0341918bb --- /dev/null +++ b/pandas/core/reshape/api.py @@ -0,0 +1,8 @@ +# flake8: noqa + +from pandas.core.reshape.concat import concat +from pandas.core.reshape.reshape import melt +from pandas.core.reshape.merge import ( + merge, ordered_merge, merge_ordered, merge_asof) +from pandas.core.reshape.pivot import pivot_table, crosstab +from pandas.core.reshape.tile import cut, qcut diff --git a/pandas/tools/concat.py b/pandas/core/reshape/concat.py similarity index 100% rename from pandas/tools/concat.py rename to pandas/core/reshape/concat.py diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py new file mode 100644 index 0000000000000..1ca3786ecc174 --- /dev/null +++ b/pandas/core/reshape/merge.py @@ -0,0 +1,1481 @@ +""" +SQL-style merge routines +""" + +import copy +import warnings +import string + +import numpy as np +from pandas.compat import range, lzip, zip, map, filter +import pandas.compat as compat + +from pandas import (Categorical, Series, DataFrame, + Index, MultiIndex, Timedelta) +from pandas.core.frame import _merge_doc +from pandas.core.dtypes.common import ( + is_datetime64tz_dtype, + is_datetime64_dtype, + needs_i8_conversion, + is_int64_dtype, + is_categorical_dtype, + is_integer_dtype, + is_float_dtype, + is_numeric_dtype, + is_integer, + is_int_or_datetime_dtype, + is_dtype_equal, + is_bool, + is_list_like, + _ensure_int64, + _ensure_float64, + _ensure_object, + _get_dtype) +from pandas.core.dtypes.missing import na_value_for_dtype +from pandas.core.internals import (items_overlap_with_suffix, + concatenate_block_managers) +from pandas.util.decorators import Appender, Substitution + +from pandas.core.sorting import is_int64_overflow_possible +import pandas.core.algorithms as algos +import pandas.core.common as com +from pandas._libs import hashtable as libhashtable, join as libjoin, lib + + +@Substitution('\nleft : DataFrame') +@Appender(_merge_doc, indents=0) +def merge(left, right, how='inner', on=None, left_on=None, right_on=None, + left_index=False, right_index=False, sort=False, + suffixes=('_x', '_y'), copy=True, indicator=False): + op = _MergeOperation(left, right, how=how, on=on, left_on=left_on, + right_on=right_on, left_index=left_index, + right_index=right_index, sort=sort, suffixes=suffixes, + copy=copy, indicator=indicator) + return op.get_result() + + +if __debug__: + merge.__doc__ = _merge_doc % '\nleft : DataFrame' + + +class MergeError(ValueError): + pass + + +def _groupby_and_merge(by, on, left, right, _merge_pieces, + check_duplicates=True): + """ + groupby & merge; we are always performing a left-by type operation + + Parameters + ---------- + by: field to group + on: duplicates field + left: left frame + right: right frame + _merge_pieces: function for merging + check_duplicates: boolean, default True + should we check & clean duplicates + """ + + pieces = [] + if not isinstance(by, (list, tuple)): + by = [by] + + lby = left.groupby(by, sort=False) + + # if we can groupby the rhs + # then we can get vastly better perf + try: + + # we will check & remove duplicates if indicated + if check_duplicates: + if on is None: + on = [] + elif not isinstance(on, (list, tuple)): + on = [on] + + if right.duplicated(by + on).any(): + right = right.drop_duplicates(by + on, keep='last') + rby = right.groupby(by, sort=False) + except KeyError: + rby = None + + for key, lhs in lby: + + if rby is None: + rhs = right + else: + try: + rhs = right.take(rby.indices[key]) + except KeyError: + # key doesn't exist in left + lcols = lhs.columns.tolist() + cols = lcols + [r for r in right.columns + if r not in set(lcols)] + merged = lhs.reindex(columns=cols) + merged.index = range(len(merged)) + pieces.append(merged) + continue + + merged = _merge_pieces(lhs, rhs) + + # make sure join keys are in the merged + # TODO, should _merge_pieces do this? + for k in by: + try: + if k in merged: + merged[k] = key + except: + pass + + pieces.append(merged) + + # preserve the original order + # if we have a missing piece this can be reset + from pandas.core.reshape.concat import concat + result = concat(pieces, ignore_index=True) + result = result.reindex(columns=pieces[0].columns, copy=False) + return result, lby + + +def ordered_merge(left, right, on=None, + left_on=None, right_on=None, + left_by=None, right_by=None, + fill_method=None, suffixes=('_x', '_y')): + + warnings.warn("ordered_merge is deprecated and replaced by merge_ordered", + FutureWarning, stacklevel=2) + return merge_ordered(left, right, on=on, + left_on=left_on, right_on=right_on, + left_by=left_by, right_by=right_by, + fill_method=fill_method, suffixes=suffixes) + + +def merge_ordered(left, right, on=None, + left_on=None, right_on=None, + left_by=None, right_by=None, + fill_method=None, suffixes=('_x', '_y'), + how='outer'): + """Perform merge with optional filling/interpolation designed for ordered + data like time series data. Optionally perform group-wise merge (see + examples) + + Parameters + ---------- + left : DataFrame + right : DataFrame + on : label or list + Field names to join on. Must be found in both DataFrames. + left_on : label or list, or array-like + Field names to join on in left DataFrame. Can be a vector or list of + vectors of the length of the DataFrame to use a particular vector as + the join key instead of columns + right_on : label or list, or array-like + Field names to join on in right DataFrame or vector/list of vectors per + left_on docs + left_by : column name or list of column names + Group left DataFrame by group columns and merge piece by piece with + right DataFrame + right_by : column name or list of column names + Group right DataFrame by group columns and merge piece by piece with + left DataFrame + fill_method : {'ffill', None}, default None + Interpolation method for data + suffixes : 2-length sequence (tuple, list, ...) + Suffix to apply to overlapping column names in the left and right + side, respectively + how : {'left', 'right', 'outer', 'inner'}, default 'outer' + * left: use only keys from left frame (SQL: left outer join) + * right: use only keys from right frame (SQL: right outer join) + * outer: use union of keys from both frames (SQL: full outer join) + * inner: use intersection of keys from both frames (SQL: inner join) + + .. versionadded:: 0.19.0 + + Examples + -------- + >>> A >>> B + key lvalue group key rvalue + 0 a 1 a 0 b 1 + 1 c 2 a 1 c 2 + 2 e 3 a 2 d 3 + 3 a 1 b + 4 c 2 b + 5 e 3 b + + >>> ordered_merge(A, B, fill_method='ffill', left_by='group') + key lvalue group rvalue + 0 a 1 a NaN + 1 b 1 a 1 + 2 c 2 a 2 + 3 d 2 a 3 + 4 e 3 a 3 + 5 f 3 a 4 + 6 a 1 b NaN + 7 b 1 b 1 + 8 c 2 b 2 + 9 d 2 b 3 + 10 e 3 b 3 + 11 f 3 b 4 + + Returns + ------- + merged : DataFrame + The output type will the be same as 'left', if it is a subclass + of DataFrame. + + See also + -------- + merge + merge_asof + + """ + def _merger(x, y): + # perform the ordered merge operation + op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on, + suffixes=suffixes, fill_method=fill_method, + how=how) + return op.get_result() + + if left_by is not None and right_by is not None: + raise ValueError('Can only group either left or right frames') + elif left_by is not None: + result, _ = _groupby_and_merge(left_by, on, left, right, + lambda x, y: _merger(x, y), + check_duplicates=False) + elif right_by is not None: + result, _ = _groupby_and_merge(right_by, on, right, left, + lambda x, y: _merger(y, x), + check_duplicates=False) + else: + result = _merger(left, right) + return result + + +ordered_merge.__doc__ = merge_ordered.__doc__ + + +def merge_asof(left, right, on=None, + left_on=None, right_on=None, + left_index=False, right_index=False, + by=None, left_by=None, right_by=None, + suffixes=('_x', '_y'), + tolerance=None, + allow_exact_matches=True, + direction='backward'): + """Perform an asof merge. This is similar to a left-join except that we + match on nearest key rather than equal keys. + + Both DataFrames must be sorted by the key. + + For each row in the left DataFrame: + + - A "backward" search selects the last row in the right DataFrame whose + 'on' key is less than or equal to the left's key. + + - A "forward" search selects the first row in the right DataFrame whose + 'on' key is greater than or equal to the left's key. + + - A "nearest" search selects the row in the right DataFrame whose 'on' + key is closest in absolute distance to the left's key. + + The default is "backward" and is compatible in versions below 0.20.0. + The direction parameter was added in version 0.20.0 and introduces + "forward" and "nearest". + + Optionally match on equivalent keys with 'by' before searching with 'on'. + + .. versionadded:: 0.19.0 + + Parameters + ---------- + left : DataFrame + right : DataFrame + on : label + Field name to join on. Must be found in both DataFrames. + The data MUST be ordered. Furthermore this must be a numeric column, + such as datetimelike, integer, or float. On or left_on/right_on + must be given. + left_on : label + Field name to join on in left DataFrame. + right_on : label + Field name to join on in right DataFrame. + left_index : boolean + Use the index of the left DataFrame as the join key. + + .. versionadded:: 0.19.2 + + right_index : boolean + Use the index of the right DataFrame as the join key. + + .. versionadded:: 0.19.2 + + by : column name or list of column names + Match on these columns before performing merge operation. + left_by : column name + Field names to match on in the left DataFrame. + + .. versionadded:: 0.19.2 + + right_by : column name + Field names to match on in the right DataFrame. + + .. versionadded:: 0.19.2 + + suffixes : 2-length sequence (tuple, list, ...) + Suffix to apply to overlapping column names in the left and right + side, respectively. + tolerance : integer or Timedelta, optional, default None + Select asof tolerance within this range; must be compatible + with the merge index. + allow_exact_matches : boolean, default True + + - If True, allow matching with the same 'on' value + (i.e. less-than-or-equal-to / greater-than-or-equal-to) + - If False, don't match the same 'on' value + (i.e., stricly less-than / strictly greater-than) + + direction : 'backward' (default), 'forward', or 'nearest' + Whether to search for prior, subsequent, or closest matches. + + .. versionadded:: 0.20.0 + + Returns + ------- + merged : DataFrame + + Examples + -------- + >>> left + a left_val + 0 1 a + 1 5 b + 2 10 c + + >>> right + a right_val + 0 1 1 + 1 2 2 + 2 3 3 + 3 6 6 + 4 7 7 + + >>> pd.merge_asof(left, right, on='a') + a left_val right_val + 0 1 a 1 + 1 5 b 3 + 2 10 c 7 + + >>> pd.merge_asof(left, right, on='a', allow_exact_matches=False) + a left_val right_val + 0 1 a NaN + 1 5 b 3.0 + 2 10 c 7.0 + + >>> pd.merge_asof(left, right, on='a', direction='forward') + a left_val right_val + 0 1 a 1.0 + 1 5 b 6.0 + 2 10 c NaN + + >>> pd.merge_asof(left, right, on='a', direction='nearest') + a left_val right_val + 0 1 a 1 + 1 5 b 6 + 2 10 c 7 + + We can use indexed DataFrames as well. + + >>> left + left_val + 1 a + 5 b + 10 c + + >>> right + right_val + 1 1 + 2 2 + 3 3 + 6 6 + 7 7 + + >>> pd.merge_asof(left, right, left_index=True, right_index=True) + left_val right_val + 1 a 1 + 5 b 3 + 10 c 7 + + Here is a real-world times-series example + + >>> quotes + time ticker bid ask + 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 + 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 + 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 + 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 + 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 + 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 + 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 + 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 + + >>> trades + time ticker price quantity + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 + + By default we are taking the asof of the quotes + + >>> pd.merge_asof(trades, quotes, + ... on='time', + ... by='ticker') + time ticker price quantity bid ask + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN + + We only asof within 2ms betwen the quote time and the trade time + + >>> pd.merge_asof(trades, quotes, + ... on='time', + ... by='ticker', + ... tolerance=pd.Timedelta('2ms')) + time ticker price quantity bid ask + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN + + We only asof within 10ms betwen the quote time and the trade time + and we exclude exact matches on time. However *prior* data will + propogate forward + + >>> pd.merge_asof(trades, quotes, + ... on='time', + ... by='ticker', + ... tolerance=pd.Timedelta('10ms'), + ... allow_exact_matches=False) + time ticker price quantity bid ask + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN + + See also + -------- + merge + merge_ordered + + """ + op = _AsOfMerge(left, right, + on=on, left_on=left_on, right_on=right_on, + left_index=left_index, right_index=right_index, + by=by, left_by=left_by, right_by=right_by, + suffixes=suffixes, + how='asof', tolerance=tolerance, + allow_exact_matches=allow_exact_matches, + direction=direction) + return op.get_result() + + +# TODO: transformations?? +# TODO: only copy DataFrames when modification necessary +class _MergeOperation(object): + """ + Perform a database (SQL) merge operation between two DataFrame objects + using either columns as keys or their row indexes + """ + _merge_type = 'merge' + + def __init__(self, left, right, how='inner', on=None, + left_on=None, right_on=None, axis=1, + left_index=False, right_index=False, sort=True, + suffixes=('_x', '_y'), copy=True, indicator=False): + self.left = self.orig_left = left + self.right = self.orig_right = right + self.how = how + self.axis = axis + + self.on = com._maybe_make_list(on) + self.left_on = com._maybe_make_list(left_on) + self.right_on = com._maybe_make_list(right_on) + + self.copy = copy + self.suffixes = suffixes + self.sort = sort + + self.left_index = left_index + self.right_index = right_index + + self.indicator = indicator + + if isinstance(self.indicator, compat.string_types): + self.indicator_name = self.indicator + elif isinstance(self.indicator, bool): + self.indicator_name = '_merge' if self.indicator else None + else: + raise ValueError( + 'indicator option can only accept boolean or string arguments') + + if not isinstance(left, DataFrame): + raise ValueError( + 'can not merge DataFrame with instance of ' + 'type {0}'.format(type(left))) + if not isinstance(right, DataFrame): + raise ValueError( + 'can not merge DataFrame with instance of ' + 'type {0}'.format(type(right))) + + if not is_bool(left_index): + raise ValueError( + 'left_index parameter must be of type bool, not ' + '{0}'.format(type(left_index))) + if not is_bool(right_index): + raise ValueError( + 'right_index parameter must be of type bool, not ' + '{0}'.format(type(right_index))) + + # warn user when merging between different levels + if left.columns.nlevels != right.columns.nlevels: + msg = ('merging between different levels can give an unintended ' + 'result ({0} levels on the left, {1} on the right)') + msg = msg.format(left.columns.nlevels, right.columns.nlevels) + warnings.warn(msg, UserWarning) + + self._validate_specification() + + # note this function has side effects + (self.left_join_keys, + self.right_join_keys, + self.join_names) = self._get_merge_keys() + + # validate the merge keys dtypes. We may need to coerce + # to avoid incompat dtypes + self._maybe_coerce_merge_keys() + + def get_result(self): + if self.indicator: + self.left, self.right = self._indicator_pre_merge( + self.left, self.right) + + join_index, left_indexer, right_indexer = self._get_join_info() + + ldata, rdata = self.left._data, self.right._data + lsuf, rsuf = self.suffixes + + llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf, + rdata.items, rsuf) + + lindexers = {1: left_indexer} if left_indexer is not None else {} + rindexers = {1: right_indexer} if right_indexer is not None else {} + + result_data = concatenate_block_managers( + [(ldata, lindexers), (rdata, rindexers)], + axes=[llabels.append(rlabels), join_index], + concat_axis=0, copy=self.copy) + + typ = self.left._constructor + result = typ(result_data).__finalize__(self, method=self._merge_type) + + if self.indicator: + result = self._indicator_post_merge(result) + + self._maybe_add_join_keys(result, left_indexer, right_indexer) + + return result + + def _indicator_pre_merge(self, left, right): + + columns = left.columns.union(right.columns) + + for i in ['_left_indicator', '_right_indicator']: + if i in columns: + raise ValueError("Cannot use `indicator=True` option when " + "data contains a column named {}".format(i)) + if self.indicator_name in columns: + raise ValueError( + "Cannot use name of an existing column for indicator column") + + left = left.copy() + right = right.copy() + + left['_left_indicator'] = 1 + left['_left_indicator'] = left['_left_indicator'].astype('int8') + + right['_right_indicator'] = 2 + right['_right_indicator'] = right['_right_indicator'].astype('int8') + + return left, right + + def _indicator_post_merge(self, result): + + result['_left_indicator'] = result['_left_indicator'].fillna(0) + result['_right_indicator'] = result['_right_indicator'].fillna(0) + + result[self.indicator_name] = Categorical((result['_left_indicator'] + + result['_right_indicator']), + categories=[1, 2, 3]) + result[self.indicator_name] = ( + result[self.indicator_name] + .cat.rename_categories(['left_only', 'right_only', 'both'])) + + result = result.drop(labels=['_left_indicator', '_right_indicator'], + axis=1) + return result + + def _maybe_add_join_keys(self, result, left_indexer, right_indexer): + + left_has_missing = None + right_has_missing = None + + keys = zip(self.join_names, self.left_on, self.right_on) + for i, (name, lname, rname) in enumerate(keys): + if not _should_fill(lname, rname): + continue + + take_left, take_right = None, None + + if name in result: + + if left_indexer is not None and right_indexer is not None: + if name in self.left: + + if left_has_missing is None: + left_has_missing = (left_indexer == -1).any() + + if left_has_missing: + take_right = self.right_join_keys[i] + + if not is_dtype_equal(result[name].dtype, + self.left[name].dtype): + take_left = self.left[name]._values + + elif name in self.right: + + if right_has_missing is None: + right_has_missing = (right_indexer == -1).any() + + if right_has_missing: + take_left = self.left_join_keys[i] + + if not is_dtype_equal(result[name].dtype, + self.right[name].dtype): + take_right = self.right[name]._values + + elif left_indexer is not None \ + and isinstance(self.left_join_keys[i], np.ndarray): + + take_left = self.left_join_keys[i] + take_right = self.right_join_keys[i] + + if take_left is not None or take_right is not None: + + if take_left is None: + lvals = result[name]._values + else: + lfill = na_value_for_dtype(take_left.dtype) + lvals = algos.take_1d(take_left, left_indexer, + fill_value=lfill) + + if take_right is None: + rvals = result[name]._values + else: + rfill = na_value_for_dtype(take_right.dtype) + rvals = algos.take_1d(take_right, right_indexer, + fill_value=rfill) + + # if we have an all missing left_indexer + # make sure to just use the right values + mask = left_indexer == -1 + if mask.all(): + key_col = rvals + else: + key_col = Index(lvals).where(~mask, rvals) + + if name in result: + result[name] = key_col + else: + result.insert(i, name or 'key_%d' % i, key_col) + + def _get_join_indexers(self): + """ return the join indexers """ + return _get_join_indexers(self.left_join_keys, + self.right_join_keys, + sort=self.sort, + how=self.how) + + def _get_join_info(self): + left_ax = self.left._data.axes[self.axis] + right_ax = self.right._data.axes[self.axis] + + if self.left_index and self.right_index and self.how != 'asof': + join_index, left_indexer, right_indexer = \ + left_ax.join(right_ax, how=self.how, return_indexers=True, + sort=self.sort) + elif self.right_index and self.how == 'left': + join_index, left_indexer, right_indexer = \ + _left_join_on_index(left_ax, right_ax, self.left_join_keys, + sort=self.sort) + + elif self.left_index and self.how == 'right': + join_index, right_indexer, left_indexer = \ + _left_join_on_index(right_ax, left_ax, self.right_join_keys, + sort=self.sort) + else: + (left_indexer, + right_indexer) = self._get_join_indexers() + + if self.right_index: + if len(self.left) > 0: + join_index = self.left.index.take(left_indexer) + else: + join_index = self.right.index.take(right_indexer) + left_indexer = np.array([-1] * len(join_index)) + elif self.left_index: + if len(self.right) > 0: + join_index = self.right.index.take(right_indexer) + else: + join_index = self.left.index.take(left_indexer) + right_indexer = np.array([-1] * len(join_index)) + else: + join_index = Index(np.arange(len(left_indexer))) + + if len(join_index) == 0: + join_index = join_index.astype(object) + return join_index, left_indexer, right_indexer + + def _get_merge_keys(self): + """ + Note: has side effects (copy/delete key columns) + + Parameters + ---------- + left + right + on + + Returns + ------- + left_keys, right_keys + """ + left_keys = [] + right_keys = [] + join_names = [] + right_drop = [] + left_drop = [] + left, right = self.left, self.right + + is_lkey = lambda x: isinstance( + x, (np.ndarray, Series)) and len(x) == len(left) + is_rkey = lambda x: isinstance( + x, (np.ndarray, Series)) and len(x) == len(right) + + # Note that pd.merge_asof() has separate 'on' and 'by' parameters. A + # user could, for example, request 'left_index' and 'left_by'. In a + # regular pd.merge(), users cannot specify both 'left_index' and + # 'left_on'. (Instead, users have a MultiIndex). That means the + # self.left_on in this function is always empty in a pd.merge(), but + # a pd.merge_asof(left_index=True, left_by=...) will result in a + # self.left_on array with a None in the middle of it. This requires + # a work-around as designated in the code below. + # See _validate_specification() for where this happens. + + # ugh, spaghetti re #733 + if _any(self.left_on) and _any(self.right_on): + for lk, rk in zip(self.left_on, self.right_on): + if is_lkey(lk): + left_keys.append(lk) + if is_rkey(rk): + right_keys.append(rk) + join_names.append(None) # what to do? + else: + if rk is not None: + right_keys.append(right[rk]._values) + join_names.append(rk) + else: + # work-around for merge_asof(right_index=True) + right_keys.append(right.index) + join_names.append(right.index.name) + else: + if not is_rkey(rk): + if rk is not None: + right_keys.append(right[rk]._values) + else: + # work-around for merge_asof(right_index=True) + right_keys.append(right.index) + if lk is not None and lk == rk: + # avoid key upcast in corner case (length-0) + if len(left) > 0: + right_drop.append(rk) + else: + left_drop.append(lk) + else: + right_keys.append(rk) + if lk is not None: + left_keys.append(left[lk]._values) + join_names.append(lk) + else: + # work-around for merge_asof(left_index=True) + left_keys.append(left.index) + join_names.append(left.index.name) + elif _any(self.left_on): + for k in self.left_on: + if is_lkey(k): + left_keys.append(k) + join_names.append(None) + else: + left_keys.append(left[k]._values) + join_names.append(k) + if isinstance(self.right.index, MultiIndex): + right_keys = [lev._values.take(lab) + for lev, lab in zip(self.right.index.levels, + self.right.index.labels)] + else: + right_keys = [self.right.index.values] + elif _any(self.right_on): + for k in self.right_on: + if is_rkey(k): + right_keys.append(k) + join_names.append(None) + else: + right_keys.append(right[k]._values) + join_names.append(k) + if isinstance(self.left.index, MultiIndex): + left_keys = [lev._values.take(lab) + for lev, lab in zip(self.left.index.levels, + self.left.index.labels)] + else: + left_keys = [self.left.index.values] + + if left_drop: + self.left = self.left.drop(left_drop, axis=1) + + if right_drop: + self.right = self.right.drop(right_drop, axis=1) + + return left_keys, right_keys, join_names + + def _maybe_coerce_merge_keys(self): + # we have valid mergee's but we may have to further + # coerce these if they are originally incompatible types + # + # for example if these are categorical, but are not dtype_equal + # or if we have object and integer dtypes + + for lk, rk, name in zip(self.left_join_keys, + self.right_join_keys, + self.join_names): + if (len(lk) and not len(rk)) or (not len(lk) and len(rk)): + continue + + # if either left or right is a categorical + # then the must match exactly in categories & ordered + if is_categorical_dtype(lk) and is_categorical_dtype(rk): + if lk.is_dtype_equal(rk): + continue + elif is_categorical_dtype(lk) or is_categorical_dtype(rk): + pass + + elif is_dtype_equal(lk.dtype, rk.dtype): + continue + + # if we are numeric, then allow differing + # kinds to proceed, eg. int64 and int8 + # further if we are object, but we infer to + # the same, then proceed + if (is_numeric_dtype(lk) and is_numeric_dtype(rk)): + if lk.dtype.kind == rk.dtype.kind: + continue + + # let's infer and see if we are ok + if lib.infer_dtype(lk) == lib.infer_dtype(rk): + continue + + # Houston, we have a problem! + # let's coerce to object + if name in self.left.columns: + self.left = self.left.assign( + **{name: self.left[name].astype(object)}) + if name in self.right.columns: + self.right = self.right.assign( + **{name: self.right[name].astype(object)}) + + def _validate_specification(self): + # Hm, any way to make this logic less complicated?? + if self.on is None and self.left_on is None and self.right_on is None: + + if self.left_index and self.right_index: + self.left_on, self.right_on = (), () + elif self.left_index: + if self.right_on is None: + raise MergeError('Must pass right_on or right_index=True') + elif self.right_index: + if self.left_on is None: + raise MergeError('Must pass left_on or left_index=True') + else: + # use the common columns + common_cols = self.left.columns.intersection( + self.right.columns) + if len(common_cols) == 0: + raise MergeError('No common columns to perform merge on') + if not common_cols.is_unique: + raise MergeError("Data columns not unique: %s" + % repr(common_cols)) + self.left_on = self.right_on = common_cols + elif self.on is not None: + if self.left_on is not None or self.right_on is not None: + raise MergeError('Can only pass argument "on" OR "left_on" ' + 'and "right_on", not a combination of both.') + self.left_on = self.right_on = self.on + elif self.left_on is not None: + n = len(self.left_on) + if self.right_index: + if len(self.left_on) != self.right.index.nlevels: + raise ValueError('len(left_on) must equal the number ' + 'of levels in the index of "right"') + self.right_on = [None] * n + elif self.right_on is not None: + n = len(self.right_on) + if self.left_index: + if len(self.right_on) != self.left.index.nlevels: + raise ValueError('len(right_on) must equal the number ' + 'of levels in the index of "left"') + self.left_on = [None] * n + if len(self.right_on) != len(self.left_on): + raise ValueError("len(right_on) must equal len(left_on)") + + +def _get_join_indexers(left_keys, right_keys, sort=False, how='inner', + **kwargs): + """ + + Parameters + ---------- + left_keys: ndarray, Index, Series + right_keys: ndarray, Index, Series + sort: boolean, default False + how: string {'inner', 'outer', 'left', 'right'}, default 'inner' + + Returns + ------- + tuple of (left_indexer, right_indexer) + indexers into the left_keys, right_keys + + """ + from functools import partial + + assert len(left_keys) == len(right_keys), \ + 'left_key and right_keys must be the same length' + + # bind `sort` arg. of _factorize_keys + fkeys = partial(_factorize_keys, sort=sort) + + # get left & right join labels and num. of levels at each location + llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys))) + + # get flat i8 keys from label lists + lkey, rkey = _get_join_keys(llab, rlab, shape, sort) + + # factorize keys to a dense i8 space + # `count` is the num. of unique keys + # set(lkey) | set(rkey) == range(count) + lkey, rkey, count = fkeys(lkey, rkey) + + # preserve left frame order if how == 'left' and sort == False + kwargs = copy.copy(kwargs) + if how == 'left': + kwargs['sort'] = sort + join_func = _join_functions[how] + + return join_func(lkey, rkey, count, **kwargs) + + +class _OrderedMerge(_MergeOperation): + _merge_type = 'ordered_merge' + + def __init__(self, left, right, on=None, left_on=None, right_on=None, + left_index=False, right_index=False, axis=1, + suffixes=('_x', '_y'), copy=True, + fill_method=None, how='outer'): + + self.fill_method = fill_method + _MergeOperation.__init__(self, left, right, on=on, left_on=left_on, + left_index=left_index, + right_index=right_index, + right_on=right_on, axis=axis, + how=how, suffixes=suffixes, + sort=True # factorize sorts + ) + + def get_result(self): + join_index, left_indexer, right_indexer = self._get_join_info() + + # this is a bit kludgy + ldata, rdata = self.left._data, self.right._data + lsuf, rsuf = self.suffixes + + llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf, + rdata.items, rsuf) + + if self.fill_method == 'ffill': + left_join_indexer = libjoin.ffill_indexer(left_indexer) + right_join_indexer = libjoin.ffill_indexer(right_indexer) + else: + left_join_indexer = left_indexer + right_join_indexer = right_indexer + + lindexers = { + 1: left_join_indexer} if left_join_indexer is not None else {} + rindexers = { + 1: right_join_indexer} if right_join_indexer is not None else {} + + result_data = concatenate_block_managers( + [(ldata, lindexers), (rdata, rindexers)], + axes=[llabels.append(rlabels), join_index], + concat_axis=0, copy=self.copy) + + typ = self.left._constructor + result = typ(result_data).__finalize__(self, method=self._merge_type) + + self._maybe_add_join_keys(result, left_indexer, right_indexer) + + return result + + +def _asof_function(direction, on_type): + return getattr(libjoin, 'asof_join_%s_%s' % (direction, on_type), None) + + +def _asof_by_function(direction, on_type, by_type): + return getattr(libjoin, 'asof_join_%s_%s_by_%s' % + (direction, on_type, by_type), None) + + +_type_casters = { + 'int64_t': _ensure_int64, + 'double': _ensure_float64, + 'object': _ensure_object, +} + +_cython_types = { + 'uint8': 'uint8_t', + 'uint32': 'uint32_t', + 'uint16': 'uint16_t', + 'uint64': 'uint64_t', + 'int8': 'int8_t', + 'int32': 'int32_t', + 'int16': 'int16_t', + 'int64': 'int64_t', + 'float16': 'error', + 'float32': 'float', + 'float64': 'double', +} + + +def _get_cython_type(dtype): + """ Given a dtype, return a C name like 'int64_t' or 'double' """ + type_name = _get_dtype(dtype).name + ctype = _cython_types.get(type_name, 'object') + if ctype == 'error': + raise MergeError('unsupported type: ' + type_name) + return ctype + + +def _get_cython_type_upcast(dtype): + """ Upcast a dtype to 'int64_t', 'double', or 'object' """ + if is_integer_dtype(dtype): + return 'int64_t' + elif is_float_dtype(dtype): + return 'double' + else: + return 'object' + + +class _AsOfMerge(_OrderedMerge): + _merge_type = 'asof_merge' + + def __init__(self, left, right, on=None, left_on=None, right_on=None, + left_index=False, right_index=False, + by=None, left_by=None, right_by=None, + axis=1, suffixes=('_x', '_y'), copy=True, + fill_method=None, + how='asof', tolerance=None, + allow_exact_matches=True, + direction='backward'): + + self.by = by + self.left_by = left_by + self.right_by = right_by + self.tolerance = tolerance + self.allow_exact_matches = allow_exact_matches + self.direction = direction + + _OrderedMerge.__init__(self, left, right, on=on, left_on=left_on, + right_on=right_on, left_index=left_index, + right_index=right_index, axis=axis, + how=how, suffixes=suffixes, + fill_method=fill_method) + + def _validate_specification(self): + super(_AsOfMerge, self)._validate_specification() + + # we only allow on to be a single item for on + if len(self.left_on) != 1 and not self.left_index: + raise MergeError("can only asof on a key for left") + + if len(self.right_on) != 1 and not self.right_index: + raise MergeError("can only asof on a key for right") + + if self.left_index and isinstance(self.left.index, MultiIndex): + raise MergeError("left can only have one index") + + if self.right_index and isinstance(self.right.index, MultiIndex): + raise MergeError("right can only have one index") + + # set 'by' columns + if self.by is not None: + if self.left_by is not None or self.right_by is not None: + raise MergeError('Can only pass by OR left_by ' + 'and right_by') + self.left_by = self.right_by = self.by + if self.left_by is None and self.right_by is not None: + raise MergeError('missing left_by') + if self.left_by is not None and self.right_by is None: + raise MergeError('missing right_by') + + # add 'by' to our key-list so we can have it in the + # output as a key + if self.left_by is not None: + if not is_list_like(self.left_by): + self.left_by = [self.left_by] + if not is_list_like(self.right_by): + self.right_by = [self.right_by] + + if len(self.left_by) != len(self.right_by): + raise MergeError('left_by and right_by must be same length') + + self.left_on = self.left_by + list(self.left_on) + self.right_on = self.right_by + list(self.right_on) + + # check 'direction' is valid + if self.direction not in ['backward', 'forward', 'nearest']: + raise MergeError('direction invalid: ' + self.direction) + + @property + def _asof_key(self): + """ This is our asof key, the 'on' """ + return self.left_on[-1] + + def _get_merge_keys(self): + + # note this function has side effects + (left_join_keys, + right_join_keys, + join_names) = super(_AsOfMerge, self)._get_merge_keys() + + # validate index types are the same + for lk, rk in zip(left_join_keys, right_join_keys): + if not is_dtype_equal(lk.dtype, rk.dtype): + raise MergeError("incompatible merge keys, " + "must be the same type") + + # validate tolerance; must be a Timedelta if we have a DTI + if self.tolerance is not None: + + if self.left_index: + lt = self.left.index + else: + lt = left_join_keys[-1] + + msg = "incompatible tolerance, must be compat " \ + "with type {0}".format(type(lt)) + + if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt): + if not isinstance(self.tolerance, Timedelta): + raise MergeError(msg) + if self.tolerance < Timedelta(0): + raise MergeError("tolerance must be positive") + + elif is_int64_dtype(lt): + if not is_integer(self.tolerance): + raise MergeError(msg) + if self.tolerance < 0: + raise MergeError("tolerance must be positive") + + else: + raise MergeError("key must be integer or timestamp") + + # validate allow_exact_matches + if not is_bool(self.allow_exact_matches): + raise MergeError("allow_exact_matches must be boolean, " + "passed {0}".format(self.allow_exact_matches)) + + return left_join_keys, right_join_keys, join_names + + def _get_join_indexers(self): + """ return the join indexers """ + + def flip(xs): + """ unlike np.transpose, this returns an array of tuples """ + labels = list(string.ascii_lowercase[:len(xs)]) + dtypes = [x.dtype for x in xs] + labeled_dtypes = list(zip(labels, dtypes)) + return np.array(lzip(*xs), labeled_dtypes) + + # values to compare + left_values = (self.left.index.values if self.left_index else + self.left_join_keys[-1]) + right_values = (self.right.index.values if self.right_index else + self.right_join_keys[-1]) + tolerance = self.tolerance + + # we required sortedness in the join keys + msg = " keys must be sorted" + if not Index(left_values).is_monotonic: + raise ValueError('left' + msg) + if not Index(right_values).is_monotonic: + raise ValueError('right' + msg) + + # initial type conversion as needed + if needs_i8_conversion(left_values): + left_values = left_values.view('i8') + right_values = right_values.view('i8') + if tolerance is not None: + tolerance = tolerance.value + + # a "by" parameter requires special handling + if self.left_by is not None: + # remove 'on' parameter from values if one existed + if self.left_index and self.right_index: + left_by_values = self.left_join_keys + right_by_values = self.right_join_keys + else: + left_by_values = self.left_join_keys[0:-1] + right_by_values = self.right_join_keys[0:-1] + + # get tuple representation of values if more than one + if len(left_by_values) == 1: + left_by_values = left_by_values[0] + right_by_values = right_by_values[0] + else: + left_by_values = flip(left_by_values) + right_by_values = flip(right_by_values) + + # upcast 'by' parameter because HashTable is limited + by_type = _get_cython_type_upcast(left_by_values.dtype) + by_type_caster = _type_casters[by_type] + left_by_values = by_type_caster(left_by_values) + right_by_values = by_type_caster(right_by_values) + + # choose appropriate function by type + on_type = _get_cython_type(left_values.dtype) + func = _asof_by_function(self.direction, on_type, by_type) + return func(left_values, + right_values, + left_by_values, + right_by_values, + self.allow_exact_matches, + tolerance) + else: + # choose appropriate function by type + on_type = _get_cython_type(left_values.dtype) + func = _asof_function(self.direction, on_type) + return func(left_values, + right_values, + self.allow_exact_matches, + tolerance) + + +def _get_multiindex_indexer(join_keys, index, sort): + from functools import partial + + # bind `sort` argument + fkeys = partial(_factorize_keys, sort=sort) + + # left & right join labels and num. of levels at each location + rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys))) + if sort: + rlab = list(map(np.take, rlab, index.labels)) + else: + i8copy = lambda a: a.astype('i8', subok=False, copy=True) + rlab = list(map(i8copy, index.labels)) + + # fix right labels if there were any nulls + for i in range(len(join_keys)): + mask = index.labels[i] == -1 + if mask.any(): + # check if there already was any nulls at this location + # if there was, it is factorized to `shape[i] - 1` + a = join_keys[i][llab[i] == shape[i] - 1] + if a.size == 0 or not a[0] != a[0]: + shape[i] += 1 + + rlab[i][mask] = shape[i] - 1 + + # get flat i8 join keys + lkey, rkey = _get_join_keys(llab, rlab, shape, sort) + + # factorize keys to a dense i8 space + lkey, rkey, count = fkeys(lkey, rkey) + + return libjoin.left_outer_join(lkey, rkey, count, sort=sort) + + +def _get_single_indexer(join_key, index, sort=False): + left_key, right_key, count = _factorize_keys(join_key, index, sort=sort) + + left_indexer, right_indexer = libjoin.left_outer_join( + _ensure_int64(left_key), + _ensure_int64(right_key), + count, sort=sort) + + return left_indexer, right_indexer + + +def _left_join_on_index(left_ax, right_ax, join_keys, sort=False): + if len(join_keys) > 1: + if not ((isinstance(right_ax, MultiIndex) and + len(join_keys) == right_ax.nlevels)): + raise AssertionError("If more than one join key is given then " + "'right_ax' must be a MultiIndex and the " + "number of join keys must be the number of " + "levels in right_ax") + + left_indexer, right_indexer = \ + _get_multiindex_indexer(join_keys, right_ax, sort=sort) + else: + jkey = join_keys[0] + + left_indexer, right_indexer = \ + _get_single_indexer(jkey, right_ax, sort=sort) + + if sort or len(left_ax) != len(left_indexer): + # if asked to sort or there are 1-to-many matches + join_index = left_ax.take(left_indexer) + return join_index, left_indexer, right_indexer + + # left frame preserves order & length of its index + return left_ax, None, right_indexer + + +def _right_outer_join(x, y, max_groups): + right_indexer, left_indexer = libjoin.left_outer_join(y, x, max_groups) + return left_indexer, right_indexer + + +_join_functions = { + 'inner': libjoin.inner_join, + 'left': libjoin.left_outer_join, + 'right': _right_outer_join, + 'outer': libjoin.full_outer_join, +} + + +def _factorize_keys(lk, rk, sort=True): + if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk): + lk = lk.values + rk = rk.values + + # if we exactly match in categories, allow us to use codes + if (is_categorical_dtype(lk) and + is_categorical_dtype(rk) and + lk.is_dtype_equal(rk)): + return lk.codes, rk.codes, len(lk.categories) + + if is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk): + klass = libhashtable.Int64Factorizer + lk = _ensure_int64(com._values_from_object(lk)) + rk = _ensure_int64(com._values_from_object(rk)) + else: + klass = libhashtable.Factorizer + lk = _ensure_object(lk) + rk = _ensure_object(rk) + + rizer = klass(max(len(lk), len(rk))) + + llab = rizer.factorize(lk) + rlab = rizer.factorize(rk) + + count = rizer.get_count() + + if sort: + uniques = rizer.uniques.to_array() + llab, rlab = _sort_labels(uniques, llab, rlab) + + # NA group + lmask = llab == -1 + lany = lmask.any() + rmask = rlab == -1 + rany = rmask.any() + + if lany or rany: + if lany: + np.putmask(llab, lmask, count) + if rany: + np.putmask(rlab, rmask, count) + count += 1 + + return llab, rlab, count + + +def _sort_labels(uniques, left, right): + if not isinstance(uniques, np.ndarray): + # tuplesafe + uniques = Index(uniques).values + + l = len(left) + labels = np.concatenate([left, right]) + + _, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1) + new_labels = _ensure_int64(new_labels) + new_left, new_right = new_labels[:l], new_labels[l:] + + return new_left, new_right + + +def _get_join_keys(llab, rlab, shape, sort): + + # how many levels can be done without overflow + pred = lambda i: not is_int64_overflow_possible(shape[:i]) + nlev = next(filter(pred, range(len(shape), 0, -1))) + + # get keys for the first `nlev` levels + stride = np.prod(shape[1:nlev], dtype='i8') + lkey = stride * llab[0].astype('i8', subok=False, copy=False) + rkey = stride * rlab[0].astype('i8', subok=False, copy=False) + + for i in range(1, nlev): + stride //= shape[i] + lkey += llab[i] * stride + rkey += rlab[i] * stride + + if nlev == len(shape): # all done! + return lkey, rkey + + # densify current keys to avoid overflow + lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort) + + llab = [lkey] + llab[nlev:] + rlab = [rkey] + rlab[nlev:] + shape = [count] + shape[nlev:] + + return _get_join_keys(llab, rlab, shape, sort) + + +def _should_fill(lname, rname): + if (not isinstance(lname, compat.string_types) or + not isinstance(rname, compat.string_types)): + return True + return lname == rname + + +def _any(x): + return x is not None and len(x) > 0 and any([y is not None for y in x]) diff --git a/pandas/tools/pivot.py b/pandas/core/reshape/pivot.py similarity index 99% rename from pandas/tools/pivot.py rename to pandas/core/reshape/pivot.py index 11ca2e548f171..1c5250615d410 100644 --- a/pandas/tools/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -2,9 +2,10 @@ from pandas.core.dtypes.common import is_list_like, is_scalar -from pandas import Series, DataFrame, MultiIndex, Index, concat +from pandas.core.reshape.concat import concat +from pandas import Series, DataFrame, MultiIndex, Index from pandas.core.groupby import Grouper -from pandas.tools.util import cartesian_product +from pandas.core.reshape.util import cartesian_product from pandas.compat import range, lrange, zip from pandas import compat import pandas.core.common as com diff --git a/pandas/core/reshape.py b/pandas/core/reshape/reshape.py similarity index 99% rename from pandas/core/reshape.py rename to pandas/core/reshape/reshape.py index b3a06d85967f2..bfd5320af13fb 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -1151,7 +1151,7 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, -------- Series.str.get_dummies """ - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat from itertools import cycle if isinstance(data, DataFrame): diff --git a/pandas/tools/tile.py b/pandas/core/reshape/tile.py similarity index 100% rename from pandas/tools/tile.py rename to pandas/core/reshape/tile.py diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py new file mode 100644 index 0000000000000..2fe82e5d6bc57 --- /dev/null +++ b/pandas/core/reshape/util.py @@ -0,0 +1,76 @@ +import numpy as np + +from pandas.core.dtypes.common import is_list_like + +from pandas.compat import reduce +from pandas.core.index import Index +from pandas.core import common as com + + +def match(needles, haystack): + haystack = Index(haystack) + needles = Index(needles) + return haystack.get_indexer(needles) + + +def cartesian_product(X): + """ + Numpy version of itertools.product or pandas.compat.product. + Sometimes faster (for large inputs)... + + Parameters + ---------- + X : list-like of list-likes + + Returns + ------- + product : list of ndarrays + + Examples + -------- + >>> cartesian_product([list('ABC'), [1, 2]]) + [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'), + array([1, 2, 1, 2, 1, 2])] + + See also + -------- + itertools.product : Cartesian product of input iterables. Equivalent to + nested for-loops. + pandas.compat.product : An alias for itertools.product. + """ + msg = "Input must be a list-like of list-likes" + if not is_list_like(X): + raise TypeError(msg) + for x in X: + if not is_list_like(x): + raise TypeError(msg) + + if len(X) == 0: + return [] + + lenX = np.fromiter((len(x) for x in X), dtype=np.intp) + cumprodX = np.cumproduct(lenX) + + a = np.roll(cumprodX, 1) + a[0] = 1 + + if cumprodX[-1] != 0: + b = cumprodX[-1] / cumprodX + else: + # if any factor is empty, the cartesian product is empty + b = np.zeros_like(cumprodX) + + return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]), + np.product(a[i])) + for i, x in enumerate(X)] + + +def _compose2(f, g): + """Compose 2 callables""" + return lambda *args, **kwargs: f(g(*args, **kwargs)) + + +def compose(*funcs): + """Compose 2 or more callables""" + assert len(funcs) > 1, 'At least 2 callables must be passed to compose' + return reduce(_compose2, funcs) diff --git a/pandas/core/series.py b/pandas/core/series.py index 2a99481274e9e..69a2b35d88460 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1541,7 +1541,7 @@ def append(self, to_append, ignore_index=False, verify_integrity=False): """ - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] + to_append @@ -2019,7 +2019,7 @@ def unstack(self, level=-1, fill_value=None): ------- unstacked : DataFrame """ - from pandas.core.reshape import unstack + from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index ae0814d5566a8..6fbcbe7d645e1 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -168,7 +168,7 @@ def __init__(self, series, buf=None, length=True, header=True, index=True, self._chk_truncate() def _chk_truncate(self): - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat max_rows = self.max_rows truncate_v = max_rows and (len(self.series) > max_rows) series = self.series @@ -410,7 +410,7 @@ def _chk_truncate(self): Checks whether the frame should be truncated. If so, slices the frame up. """ - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat # Column of which first element is used to determine width of a dot col self.tr_size_col = -1 diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 374244acfe173..934c05ba5f130 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2317,7 +2317,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) else: - from pandas.tools.concat import concat + from pandas.core.reshape.concat import concat keys, frames = zip(*grouped) if grouped.axis == 0: df = concat(frames, keys=keys, axis=1) diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index a1490426ebf9d..e59784d233367 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -6,10 +6,14 @@ """ import pytest +import decimal from datetime import datetime, timedelta, date import numpy as np -from pandas import Timedelta, Timestamp, DatetimeIndex +import pandas as pd +from pandas import (Timedelta, Timestamp, DatetimeIndex, + to_numeric, _np_version_under1p9) + from pandas.core.dtypes.cast import ( maybe_downcast_to_dtype, maybe_convert_objects, @@ -24,6 +28,8 @@ PeriodDtype) from pandas.util import testing as tm +from numpy import iinfo + class TestMaybeDowncast(tm.TestCase): @@ -321,3 +327,365 @@ def test_period_dtype(self): np.dtype('datetime64[ns]'), np.object, np.int64]: self.assertEqual(find_common_type([dtype, dtype2]), np.object) self.assertEqual(find_common_type([dtype2, dtype]), np.object) + + +class TestToNumeric(tm.TestCase): + + def test_series(self): + s = pd.Series(['1', '-3.14', '7']) + res = to_numeric(s) + expected = pd.Series([1, -3.14, 7]) + tm.assert_series_equal(res, expected) + + s = pd.Series(['1', '-3.14', 7]) + res = to_numeric(s) + tm.assert_series_equal(res, expected) + + def test_series_numeric(self): + s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX') + res = to_numeric(s) + tm.assert_series_equal(res, s) + + s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX') + res = to_numeric(s) + tm.assert_series_equal(res, s) + + # bool is regarded as numeric + s = pd.Series([True, False, True, True], + index=list('ABCD'), name='XXX') + res = to_numeric(s) + tm.assert_series_equal(res, s) + + def test_error(self): + s = pd.Series([1, -3.14, 'apple']) + msg = 'Unable to parse string "apple" at position 2' + with tm.assertRaisesRegexp(ValueError, msg): + to_numeric(s, errors='raise') + + res = to_numeric(s, errors='ignore') + expected = pd.Series([1, -3.14, 'apple']) + tm.assert_series_equal(res, expected) + + res = to_numeric(s, errors='coerce') + expected = pd.Series([1, -3.14, np.nan]) + tm.assert_series_equal(res, expected) + + s = pd.Series(['orange', 1, -3.14, 'apple']) + msg = 'Unable to parse string "orange" at position 0' + with tm.assertRaisesRegexp(ValueError, msg): + to_numeric(s, errors='raise') + + def test_error_seen_bool(self): + s = pd.Series([True, False, 'apple']) + msg = 'Unable to parse string "apple" at position 2' + with tm.assertRaisesRegexp(ValueError, msg): + to_numeric(s, errors='raise') + + res = to_numeric(s, errors='ignore') + expected = pd.Series([True, False, 'apple']) + tm.assert_series_equal(res, expected) + + # coerces to float + res = to_numeric(s, errors='coerce') + expected = pd.Series([1., 0., np.nan]) + tm.assert_series_equal(res, expected) + + def test_list(self): + s = ['1', '-3.14', '7'] + res = to_numeric(s) + expected = np.array([1, -3.14, 7]) + tm.assert_numpy_array_equal(res, expected) + + def test_list_numeric(self): + s = [1, 3, 4, 5] + res = to_numeric(s) + tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64)) + + s = [1., 3., 4., 5.] + res = to_numeric(s) + tm.assert_numpy_array_equal(res, np.array(s)) + + # bool is regarded as numeric + s = [True, False, True, True] + res = to_numeric(s) + tm.assert_numpy_array_equal(res, np.array(s)) + + def test_numeric(self): + s = pd.Series([1, -3.14, 7], dtype='O') + res = to_numeric(s) + expected = pd.Series([1, -3.14, 7]) + tm.assert_series_equal(res, expected) + + s = pd.Series([1, -3.14, 7]) + res = to_numeric(s) + tm.assert_series_equal(res, expected) + + # GH 14827 + df = pd.DataFrame(dict( + a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'], + b=[1.0, 2.0, 3.0, 4.0], + )) + expected = pd.DataFrame(dict( + a=[1.2, 3.14, np.inf, 0.1], + b=[1.0, 2.0, 3.0, 4.0], + )) + + # Test to_numeric over one column + df_copy = df.copy() + df_copy['a'] = df_copy['a'].apply(to_numeric) + tm.assert_frame_equal(df_copy, expected) + + # Test to_numeric over multiple columns + df_copy = df.copy() + df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric) + tm.assert_frame_equal(df_copy, expected) + + def test_numeric_lists_and_arrays(self): + # Test to_numeric with embedded lists and arrays + df = pd.DataFrame(dict( + a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1] + )) + df['a'] = df['a'].apply(to_numeric) + expected = pd.DataFrame(dict( + a=[[3.14, 1.0], 1.6, 0.1], + )) + tm.assert_frame_equal(df, expected) + + df = pd.DataFrame(dict( + a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1] + )) + df['a'] = df['a'].apply(to_numeric) + expected = pd.DataFrame(dict( + a=[[3.14, 1.0], 0.1], + )) + tm.assert_frame_equal(df, expected) + + def test_all_nan(self): + s = pd.Series(['a', 'b', 'c']) + res = to_numeric(s, errors='coerce') + expected = pd.Series([np.nan, np.nan, np.nan]) + tm.assert_series_equal(res, expected) + + def test_type_check(self): + # GH 11776 + df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']}) + with tm.assertRaisesRegexp(TypeError, "1-d array"): + to_numeric(df) + for errors in ['ignore', 'raise', 'coerce']: + with tm.assertRaisesRegexp(TypeError, "1-d array"): + to_numeric(df, errors=errors) + + def test_scalar(self): + self.assertEqual(pd.to_numeric(1), 1) + self.assertEqual(pd.to_numeric(1.1), 1.1) + + self.assertEqual(pd.to_numeric('1'), 1) + self.assertEqual(pd.to_numeric('1.1'), 1.1) + + with tm.assertRaises(ValueError): + to_numeric('XX', errors='raise') + + self.assertEqual(to_numeric('XX', errors='ignore'), 'XX') + self.assertTrue(np.isnan(to_numeric('XX', errors='coerce'))) + + def test_numeric_dtypes(self): + idx = pd.Index([1, 2, 3], name='xxx') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, idx) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(idx, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, idx.values) + + idx = pd.Index([1., np.nan, 3., np.nan], name='xxx') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, idx) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(idx, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, idx.values) + + def test_str(self): + idx = pd.Index(['1', '2', '3'], name='xxx') + exp = np.array([1, 2, 3], dtype='int64') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, pd.Index(exp, name='xxx')) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(exp, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, exp) + + idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx') + exp = np.array([1.5, 2.7, 3.4]) + res = pd.to_numeric(idx) + tm.assert_index_equal(res, pd.Index(exp, name='xxx')) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(exp, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, exp) + + def test_datetimelike(self): + for tz in [None, 'US/Eastern', 'Asia/Tokyo']: + idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, idx.asi8) + + def test_timedelta(self): + idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, idx.asi8) + + def test_period(self): + idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) + + # ToDo: enable when we can support native PeriodDtype + # res = pd.to_numeric(pd.Series(idx, name='xxx')) + # tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) + + def test_non_hashable(self): + # Test for Bug #13324 + s = pd.Series([[10.0, 2], 1.0, 'apple']) + res = pd.to_numeric(s, errors='coerce') + tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan])) + + res = pd.to_numeric(s, errors='ignore') + tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple'])) + + with self.assertRaisesRegexp(TypeError, "Invalid object type"): + pd.to_numeric(s) + + def test_downcast(self): + # see gh-13352 + mixed_data = ['1', 2, 3] + int_data = [1, 2, 3] + date_data = np.array(['1970-01-02', '1970-01-03', + '1970-01-04'], dtype='datetime64[D]') + + invalid_downcast = 'unsigned-integer' + msg = 'invalid downcasting method provided' + + smallest_int_dtype = np.dtype(np.typecodes['Integer'][0]) + smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0]) + + # support below np.float32 is rare and far between + float_32_char = np.dtype(np.float32).char + smallest_float_dtype = float_32_char + + for data in (mixed_data, int_data, date_data): + with self.assertRaisesRegexp(ValueError, msg): + pd.to_numeric(data, downcast=invalid_downcast) + + expected = np.array([1, 2, 3], dtype=np.int64) + + res = pd.to_numeric(data) + tm.assert_numpy_array_equal(res, expected) + + res = pd.to_numeric(data, downcast=None) + tm.assert_numpy_array_equal(res, expected) + + expected = np.array([1, 2, 3], dtype=smallest_int_dtype) + + for signed_downcast in ('integer', 'signed'): + res = pd.to_numeric(data, downcast=signed_downcast) + tm.assert_numpy_array_equal(res, expected) + + expected = np.array([1, 2, 3], dtype=smallest_uint_dtype) + res = pd.to_numeric(data, downcast='unsigned') + tm.assert_numpy_array_equal(res, expected) + + expected = np.array([1, 2, 3], dtype=smallest_float_dtype) + res = pd.to_numeric(data, downcast='float') + tm.assert_numpy_array_equal(res, expected) + + # if we can't successfully cast the given + # data to a numeric dtype, do not bother + # with the downcast parameter + data = ['foo', 2, 3] + expected = np.array(data, dtype=object) + res = pd.to_numeric(data, errors='ignore', + downcast='unsigned') + tm.assert_numpy_array_equal(res, expected) + + # cannot cast to an unsigned integer because + # we have a negative number + data = ['-1', 2, 3] + expected = np.array([-1, 2, 3], dtype=np.int64) + res = pd.to_numeric(data, downcast='unsigned') + tm.assert_numpy_array_equal(res, expected) + + # cannot cast to an integer (signed or unsigned) + # because we have a float number + data = (['1.1', 2, 3], + [10000.0, 20000, 3000, 40000.36, 50000, 50000.00]) + expected = (np.array([1.1, 2, 3], dtype=np.float64), + np.array([10000.0, 20000, 3000, + 40000.36, 50000, 50000.00], dtype=np.float64)) + + for _data, _expected in zip(data, expected): + for downcast in ('integer', 'signed', 'unsigned'): + res = pd.to_numeric(_data, downcast=downcast) + tm.assert_numpy_array_equal(res, _expected) + + # the smallest integer dtype need not be np.(u)int8 + data = ['256', 257, 258] + + for downcast, expected_dtype in zip( + ['integer', 'signed', 'unsigned'], + [np.int16, np.int16, np.uint16]): + expected = np.array([256, 257, 258], dtype=expected_dtype) + res = pd.to_numeric(data, downcast=downcast) + tm.assert_numpy_array_equal(res, expected) + + def test_downcast_limits(self): + # Test the limits of each downcast. Bug: #14401. + # Check to make sure numpy is new enough to run this test. + if _np_version_under1p9: + pytest.skip("Numpy version is under 1.9") + + i = 'integer' + u = 'unsigned' + dtype_downcast_min_max = [ + ('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]), + ('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]), + ('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]), + ('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]), + ('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]), + ('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]), + ('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]), + ('uint64', u, [iinfo(np.uint64).min, iinfo(np.uint64).max]), + ('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]), + ('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]), + ('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]), + ('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]), + ('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]), + ('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]), + ('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]), + ('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]), + ('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]) + ] + + for dtype, downcast, min_max in dtype_downcast_min_max: + series = pd.to_numeric(pd.Series(min_max), downcast=downcast) + assert series.dtype == dtype diff --git a/pandas/tests/dtypes/test_convert.py b/pandas/tests/dtypes/test_convert.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/reshape/__init__.py b/pandas/tests/reshape/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/tools/data/allow_exact_matches.csv b/pandas/tests/reshape/data/allow_exact_matches.csv similarity index 100% rename from pandas/tests/tools/data/allow_exact_matches.csv rename to pandas/tests/reshape/data/allow_exact_matches.csv diff --git a/pandas/tests/tools/data/allow_exact_matches_and_tolerance.csv b/pandas/tests/reshape/data/allow_exact_matches_and_tolerance.csv similarity index 100% rename from pandas/tests/tools/data/allow_exact_matches_and_tolerance.csv rename to pandas/tests/reshape/data/allow_exact_matches_and_tolerance.csv diff --git a/pandas/tests/tools/data/asof.csv b/pandas/tests/reshape/data/asof.csv similarity index 100% rename from pandas/tests/tools/data/asof.csv rename to pandas/tests/reshape/data/asof.csv diff --git a/pandas/tests/tools/data/asof2.csv b/pandas/tests/reshape/data/asof2.csv similarity index 100% rename from pandas/tests/tools/data/asof2.csv rename to pandas/tests/reshape/data/asof2.csv diff --git a/pandas/tests/tools/data/cut_data.csv b/pandas/tests/reshape/data/cut_data.csv similarity index 100% rename from pandas/tests/tools/data/cut_data.csv rename to pandas/tests/reshape/data/cut_data.csv diff --git a/pandas/tests/tools/data/quotes.csv b/pandas/tests/reshape/data/quotes.csv similarity index 100% rename from pandas/tests/tools/data/quotes.csv rename to pandas/tests/reshape/data/quotes.csv diff --git a/pandas/tests/tools/data/quotes2.csv b/pandas/tests/reshape/data/quotes2.csv similarity index 100% rename from pandas/tests/tools/data/quotes2.csv rename to pandas/tests/reshape/data/quotes2.csv diff --git a/pandas/tests/tools/data/tolerance.csv b/pandas/tests/reshape/data/tolerance.csv similarity index 100% rename from pandas/tests/tools/data/tolerance.csv rename to pandas/tests/reshape/data/tolerance.csv diff --git a/pandas/tests/tools/data/trades.csv b/pandas/tests/reshape/data/trades.csv similarity index 100% rename from pandas/tests/tools/data/trades.csv rename to pandas/tests/reshape/data/trades.csv diff --git a/pandas/tests/tools/data/trades2.csv b/pandas/tests/reshape/data/trades2.csv similarity index 100% rename from pandas/tests/tools/data/trades2.csv rename to pandas/tests/reshape/data/trades2.csv diff --git a/pandas/tests/tools/test_concat.py b/pandas/tests/reshape/test_concat.py similarity index 100% rename from pandas/tests/tools/test_concat.py rename to pandas/tests/reshape/test_concat.py diff --git a/pandas/tests/tools/test_hashing.py b/pandas/tests/reshape/test_hashing.py similarity index 100% rename from pandas/tests/tools/test_hashing.py rename to pandas/tests/reshape/test_hashing.py diff --git a/pandas/tests/tools/test_join.py b/pandas/tests/reshape/test_join.py similarity index 99% rename from pandas/tests/tools/test_join.py rename to pandas/tests/reshape/test_join.py index 8571a1ff16701..51e5beadee8a7 100644 --- a/pandas/tests/tools/test_join.py +++ b/pandas/tests/reshape/test_join.py @@ -12,7 +12,7 @@ from pandas._libs import join as libjoin import pandas.util.testing as tm -from pandas.tests.tools.test_merge import get_test_data, N, NGROUPS +from pandas.tests.reshape.test_merge import get_test_data, N, NGROUPS a_ = np.array diff --git a/pandas/tests/tools/test_merge.py b/pandas/tests/reshape/test_merge.py similarity index 99% rename from pandas/tests/tools/test_merge.py rename to pandas/tests/reshape/test_merge.py index cc4a97df33801..67a8c5084eef6 100644 --- a/pandas/tests/tools/test_merge.py +++ b/pandas/tests/reshape/test_merge.py @@ -9,8 +9,8 @@ import pandas as pd from pandas.compat import lrange, lzip -from pandas.tools.concat import concat -from pandas.tools.merge import merge, MergeError +from pandas.core.reshape.concat import concat +from pandas.core.reshape.merge import merge, MergeError from pandas.util.testing import assert_frame_equal, assert_series_equal from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.common import is_categorical_dtype, is_object_dtype diff --git a/pandas/tests/tools/test_merge_asof.py b/pandas/tests/reshape/test_merge_asof.py similarity index 99% rename from pandas/tests/tools/test_merge_asof.py rename to pandas/tests/reshape/test_merge_asof.py index c9460cc74c94a..865c413bad11e 100644 --- a/pandas/tests/tools/test_merge_asof.py +++ b/pandas/tests/reshape/test_merge_asof.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import (merge_asof, read_csv, to_datetime, Timedelta) -from pandas.tools.merge import MergeError +from pandas.core.reshape.merge import MergeError from pandas.util import testing as tm from pandas.util.testing import assert_frame_equal diff --git a/pandas/tests/tools/test_merge_ordered.py b/pandas/tests/reshape/test_merge_ordered.py similarity index 100% rename from pandas/tests/tools/test_merge_ordered.py rename to pandas/tests/reshape/test_merge_ordered.py diff --git a/pandas/tests/tools/test_pivot.py b/pandas/tests/reshape/test_pivot.py similarity index 99% rename from pandas/tests/tools/test_pivot.py rename to pandas/tests/reshape/test_pivot.py index c8dfaf5e29bc6..88d25b9d053c3 100644 --- a/pandas/tests/tools/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import (DataFrame, Series, Index, MultiIndex, Grouper, date_range, concat) -from pandas.tools.pivot import pivot_table, crosstab +from pandas.core.reshape.pivot import pivot_table, crosstab from pandas.compat import range, product import pandas.util.testing as tm from pandas.tseries.util import pivot_annual, isleapyear diff --git a/pandas/tests/test_reshape.py b/pandas/tests/reshape/test_reshape.py similarity index 99% rename from pandas/tests/test_reshape.py rename to pandas/tests/reshape/test_reshape.py index ee255c1863b41..0eb1e5ff3cf11 100644 --- a/pandas/tests/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -9,7 +9,8 @@ from pandas.util.testing import assert_frame_equal -from pandas.core.reshape import (melt, lreshape, get_dummies, wide_to_long) +from pandas.core.reshape.reshape import ( + melt, lreshape, get_dummies, wide_to_long) import pandas.util.testing as tm from pandas.compat import range, u @@ -662,7 +663,7 @@ def test_preserve_categorical_dtype(self): expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], index=midx, columns=cidx) - from pandas.core.reshape import make_axis_dummies + from pandas.core.reshape.reshape import make_axis_dummies result = make_axis_dummies(df) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/tools/test_tile.py b/pandas/tests/reshape/test_tile.py similarity index 99% rename from pandas/tests/tools/test_tile.py rename to pandas/tests/reshape/test_tile.py index 742568870c3c3..1cdd87dc67bd8 100644 --- a/pandas/tests/tools/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -10,7 +10,7 @@ import pandas.util.testing as tm from pandas.core.algorithms import quantile -import pandas.tools.tile as tmod +import pandas.core.reshape.tile as tmod class TestCut(tm.TestCase): diff --git a/pandas/tests/tools/test_union_categoricals.py b/pandas/tests/reshape/test_union_categoricals.py similarity index 100% rename from pandas/tests/tools/test_union_categoricals.py rename to pandas/tests/reshape/test_union_categoricals.py diff --git a/pandas/tests/reshape/test_util.py b/pandas/tests/reshape/test_util.py new file mode 100644 index 0000000000000..fd3a683e80397 --- /dev/null +++ b/pandas/tests/reshape/test_util.py @@ -0,0 +1,49 @@ + +import numpy as np +from pandas import date_range, Index +import pandas.util.testing as tm +from pandas.core.reshape.util import cartesian_product + + +class TestCartesianProduct(tm.TestCase): + + def test_simple(self): + x, y = list('ABC'), [1, 22] + result1, result2 = cartesian_product([x, y]) + expected1 = np.array(['A', 'A', 'B', 'B', 'C', 'C']) + expected2 = np.array([1, 22, 1, 22, 1, 22]) + tm.assert_numpy_array_equal(result1, expected1) + tm.assert_numpy_array_equal(result2, expected2) + + def test_datetimeindex(self): + # regression test for GitHub issue #6439 + # make sure that the ordering on datetimeindex is consistent + x = date_range('2000-01-01', periods=2) + result1, result2 = [Index(y).day for y in cartesian_product([x, x])] + expected1 = Index([1, 1, 2, 2]) + expected2 = Index([1, 2, 1, 2]) + tm.assert_index_equal(result1, expected1) + tm.assert_index_equal(result2, expected2) + + def test_empty(self): + # product of empty factors + X = [[], [0, 1], []] + Y = [[], [], ['a', 'b', 'c']] + for x, y in zip(X, Y): + expected1 = np.array([], dtype=np.asarray(x).dtype) + expected2 = np.array([], dtype=np.asarray(y).dtype) + result1, result2 = cartesian_product([x, y]) + tm.assert_numpy_array_equal(result1, expected1) + tm.assert_numpy_array_equal(result2, expected2) + + # empty product (empty input): + result = cartesian_product([]) + expected = [] + assert result == expected + + def test_invalid_input(self): + invalid_inputs = [1, [1], [1, 2], [[1], 2], + 'a', ['a'], ['a', 'b'], [['a'], 'b']] + msg = "Input must be a list-like of list-likes" + for X in invalid_inputs: + tm.assertRaisesRegexp(TypeError, msg, cartesian_product, X=X) diff --git a/pandas/tests/sparse/test_series.py b/pandas/tests/sparse/test_series.py index f5a27a8161909..b8e74073e9eb9 100644 --- a/pandas/tests/sparse/test_series.py +++ b/pandas/tests/sparse/test_series.py @@ -12,7 +12,7 @@ import pandas.util.testing as tm from pandas.compat import range from pandas import compat -from pandas.tools.util import cartesian_product +from pandas.core.reshape.util import cartesian_product import pandas.core.sparse.frame as spf diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index cd1ec915d3aeb..6f4c145d74cd1 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -583,7 +583,7 @@ class TestValueCounts(tm.TestCase): def test_value_counts(self): np.random.seed(1234) - from pandas.tools.tile import cut + from pandas.core.reshape.tile import cut arr = np.random.randn(4) factor = cut(arr, 4) diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py deleted file mode 100644 index d740d8bd26581..0000000000000 --- a/pandas/tests/test_generic.py +++ /dev/null @@ -1,2076 +0,0 @@ -# -*- coding: utf-8 -*- -# pylint: disable-msg=E1101,W0612 - -from operator import methodcaller -from copy import copy, deepcopy -from warnings import catch_warnings - -import pytest -import numpy as np -from numpy import nan -import pandas as pd - -from distutils.version import LooseVersion -from pandas.core.dtypes.common import is_scalar -from pandas import (Index, Series, DataFrame, Panel, isnull, - date_range, period_range, Panel4D) -from pandas.core.index import MultiIndex - -import pandas.io.formats.printing as printing - -from pandas.compat import range, zip, PY3 -from pandas import compat -from pandas.util.testing import (assertRaisesRegexp, - assert_series_equal, - assert_frame_equal, - assert_panel_equal, - assert_panel4d_equal, - assert_almost_equal) - -import pandas.util.testing as tm - - -# ---------------------------------------------------------------------- -# Generic types test cases - - -class Generic(object): - - def setUp(self): - pass - - @property - def _ndim(self): - return self._typ._AXIS_LEN - - def _axes(self): - """ return the axes for my object typ """ - return self._typ._AXIS_ORDERS - - def _construct(self, shape, value=None, dtype=None, **kwargs): - """ construct an object for the given shape - if value is specified use that if its a scalar - if value is an array, repeat it as needed """ - - if isinstance(shape, int): - shape = tuple([shape] * self._ndim) - if value is not None: - if is_scalar(value): - if value == 'empty': - arr = None - - # remove the info axis - kwargs.pop(self._typ._info_axis_name, None) - else: - arr = np.empty(shape, dtype=dtype) - arr.fill(value) - else: - fshape = np.prod(shape) - arr = value.ravel() - new_shape = fshape / arr.shape[0] - if fshape % arr.shape[0] != 0: - raise Exception("invalid value passed in _construct") - - arr = np.repeat(arr, new_shape).reshape(shape) - else: - arr = np.random.randn(*shape) - return self._typ(arr, dtype=dtype, **kwargs) - - def _compare(self, result, expected): - self._comparator(result, expected) - - def test_rename(self): - - # single axis - idx = list('ABCD') - # relabeling values passed into self.rename - args = [ - str.lower, - {x: x.lower() for x in idx}, - Series({x: x.lower() for x in idx}), - ] - - for axis in self._axes(): - kwargs = {axis: idx} - obj = self._construct(4, **kwargs) - - for arg in args: - # rename a single axis - result = obj.rename(**{axis: arg}) - expected = obj.copy() - setattr(expected, axis, list('abcd')) - self._compare(result, expected) - - # multiple axes at once - - def test_rename_axis(self): - idx = list('ABCD') - # relabeling values passed into self.rename - args = [ - str.lower, - {x: x.lower() for x in idx}, - Series({x: x.lower() for x in idx}), - ] - - for axis in self._axes(): - kwargs = {axis: idx} - obj = self._construct(4, **kwargs) - - for arg in args: - # rename a single axis - result = obj.rename_axis(arg, axis=axis) - expected = obj.copy() - setattr(expected, axis, list('abcd')) - self._compare(result, expected) - # scalar values - for arg in ['foo', None]: - result = obj.rename_axis(arg, axis=axis) - expected = obj.copy() - getattr(expected, axis).name = arg - self._compare(result, expected) - - def test_get_numeric_data(self): - - n = 4 - kwargs = {} - for i in range(self._ndim): - kwargs[self._typ._AXIS_NAMES[i]] = list(range(n)) - - # get the numeric data - o = self._construct(n, **kwargs) - result = o._get_numeric_data() - self._compare(result, o) - - # non-inclusion - result = o._get_bool_data() - expected = self._construct(n, value='empty', **kwargs) - self._compare(result, expected) - - # get the bool data - arr = np.array([True, True, False, True]) - o = self._construct(n, value=arr, **kwargs) - result = o._get_numeric_data() - self._compare(result, o) - - # _get_numeric_data is includes _get_bool_data, so can't test for - # non-inclusion - - def test_get_default(self): - - # GH 7725 - d0 = "a", "b", "c", "d" - d1 = np.arange(4, dtype='int64') - others = "e", 10 - - for data, index in ((d0, d1), (d1, d0)): - s = Series(data, index=index) - for i, d in zip(index, data): - self.assertEqual(s.get(i), d) - self.assertEqual(s.get(i, d), d) - self.assertEqual(s.get(i, "z"), d) - for other in others: - self.assertEqual(s.get(other, "z"), "z") - self.assertEqual(s.get(other, other), other) - - def test_nonzero(self): - - # GH 4633 - # look at the boolean/nonzero behavior for objects - obj = self._construct(shape=4) - self.assertRaises(ValueError, lambda: bool(obj == 0)) - self.assertRaises(ValueError, lambda: bool(obj == 1)) - self.assertRaises(ValueError, lambda: bool(obj)) - - obj = self._construct(shape=4, value=1) - self.assertRaises(ValueError, lambda: bool(obj == 0)) - self.assertRaises(ValueError, lambda: bool(obj == 1)) - self.assertRaises(ValueError, lambda: bool(obj)) - - obj = self._construct(shape=4, value=np.nan) - self.assertRaises(ValueError, lambda: bool(obj == 0)) - self.assertRaises(ValueError, lambda: bool(obj == 1)) - self.assertRaises(ValueError, lambda: bool(obj)) - - # empty - obj = self._construct(shape=0) - self.assertRaises(ValueError, lambda: bool(obj)) - - # invalid behaviors - - obj1 = self._construct(shape=4, value=1) - obj2 = self._construct(shape=4, value=1) - - def f(): - if obj1: - printing.pprint_thing("this works and shouldn't") - - self.assertRaises(ValueError, f) - self.assertRaises(ValueError, lambda: obj1 and obj2) - self.assertRaises(ValueError, lambda: obj1 or obj2) - self.assertRaises(ValueError, lambda: not obj1) - - def test_numpy_1_7_compat_numeric_methods(self): - # GH 4435 - # numpy in 1.7 tries to pass addtional arguments to pandas functions - - o = self._construct(shape=4) - for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum', - 'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax', - 'cummin', 'all', 'any']: - f = getattr(np, op, None) - if f is not None: - f(o) - - def test_downcast(self): - # test close downcasting - - o = self._construct(shape=4, value=9, dtype=np.int64) - result = o.copy() - result._data = o._data.downcast(dtypes='infer') - self._compare(result, o) - - o = self._construct(shape=4, value=9.) - expected = o.astype(np.int64) - result = o.copy() - result._data = o._data.downcast(dtypes='infer') - self._compare(result, expected) - - o = self._construct(shape=4, value=9.5) - result = o.copy() - result._data = o._data.downcast(dtypes='infer') - self._compare(result, o) - - # are close - o = self._construct(shape=4, value=9.000000000005) - result = o.copy() - result._data = o._data.downcast(dtypes='infer') - expected = o.astype(np.int64) - self._compare(result, expected) - - def test_constructor_compound_dtypes(self): - # GH 5191 - # compound dtypes should raise not-implementederror - - def f(dtype): - return self._construct(shape=3, dtype=dtype) - - self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"), - ("B", "str"), - ("C", "int32")]) - - # these work (though results may be unexpected) - f('int64') - f('float64') - f('M8[ns]') - - def check_metadata(self, x, y=None): - for m in x._metadata: - v = getattr(x, m, None) - if y is None: - self.assertIsNone(v) - else: - self.assertEqual(v, getattr(y, m, None)) - - def test_metadata_propagation(self): - # check that the metadata matches up on the resulting ops - - o = self._construct(shape=3) - o.name = 'foo' - o2 = self._construct(shape=3) - o2.name = 'bar' - - # TODO - # Once panel can do non-trivial combine operations - # (currently there is an a raise in the Panel arith_ops to prevent - # this, though it actually does work) - # can remove all of these try: except: blocks on the actual operations - - # ---------- - # preserving - # ---------- - - # simple ops with scalars - for op in ['__add__', '__sub__', '__truediv__', '__mul__']: - result = getattr(o, op)(1) - self.check_metadata(o, result) - - # ops with like - for op in ['__add__', '__sub__', '__truediv__', '__mul__']: - try: - result = getattr(o, op)(o) - self.check_metadata(o, result) - except (ValueError, AttributeError): - pass - - # simple boolean - for op in ['__eq__', '__le__', '__ge__']: - v1 = getattr(o, op)(o) - self.check_metadata(o, v1) - - try: - self.check_metadata(o, v1 & v1) - except (ValueError): - pass - - try: - self.check_metadata(o, v1 | v1) - except (ValueError): - pass - - # combine_first - try: - result = o.combine_first(o2) - self.check_metadata(o, result) - except (AttributeError): - pass - - # --------------------------- - # non-preserving (by default) - # --------------------------- - - # add non-like - try: - result = o + o2 - self.check_metadata(result) - except (ValueError, AttributeError): - pass - - # simple boolean - for op in ['__eq__', '__le__', '__ge__']: - - # this is a name matching op - v1 = getattr(o, op)(o) - - v2 = getattr(o, op)(o2) - self.check_metadata(v2) - - try: - self.check_metadata(v1 & v2) - except (ValueError): - pass - - try: - self.check_metadata(v1 | v2) - except (ValueError): - pass - - def test_head_tail(self): - # GH5370 - - o = self._construct(shape=10) - - # check all index types - for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex, - tm.makeUnicodeIndex, tm.makeDateIndex, - tm.makePeriodIndex]: - axis = o._get_axis_name(0) - setattr(o, axis, index(len(getattr(o, axis)))) - - # Panel + dims - try: - o.head() - except (NotImplementedError): - pytest.skip('not implemented on {0}'.format( - o.__class__.__name__)) - - self._compare(o.head(), o.iloc[:5]) - self._compare(o.tail(), o.iloc[-5:]) - - # 0-len - self._compare(o.head(0), o.iloc[0:0]) - self._compare(o.tail(0), o.iloc[0:0]) - - # bounded - self._compare(o.head(len(o) + 1), o) - self._compare(o.tail(len(o) + 1), o) - - # neg index - self._compare(o.head(-3), o.head(7)) - self._compare(o.tail(-3), o.tail(7)) - - def test_sample(self): - # Fixes issue: 2419 - - o = self._construct(shape=10) - - ### - # Check behavior of random_state argument - ### - - # Check for stability when receives seed or random state -- run 10 - # times. - for test in range(10): - seed = np.random.randint(0, 100) - self._compare( - o.sample(n=4, random_state=seed), o.sample(n=4, - random_state=seed)) - self._compare( - o.sample(frac=0.7, random_state=seed), o.sample( - frac=0.7, random_state=seed)) - - self._compare( - o.sample(n=4, random_state=np.random.RandomState(test)), - o.sample(n=4, random_state=np.random.RandomState(test))) - - self._compare( - o.sample(frac=0.7, random_state=np.random.RandomState(test)), - o.sample(frac=0.7, random_state=np.random.RandomState(test))) - - os1, os2 = [], [] - for _ in range(2): - np.random.seed(test) - os1.append(o.sample(n=4)) - os2.append(o.sample(frac=0.7)) - self._compare(*os1) - self._compare(*os2) - - # Check for error when random_state argument invalid. - with tm.assertRaises(ValueError): - o.sample(random_state='astring!') - - ### - # Check behavior of `frac` and `N` - ### - - # Giving both frac and N throws error - with tm.assertRaises(ValueError): - o.sample(n=3, frac=0.3) - - # Check that raises right error for negative lengths - with tm.assertRaises(ValueError): - o.sample(n=-3) - with tm.assertRaises(ValueError): - o.sample(frac=-0.3) - - # Make sure float values of `n` give error - with tm.assertRaises(ValueError): - o.sample(n=3.2) - - # Check lengths are right - self.assertTrue(len(o.sample(n=4) == 4)) - self.assertTrue(len(o.sample(frac=0.34) == 3)) - self.assertTrue(len(o.sample(frac=0.36) == 4)) - - ### - # Check weights - ### - - # Weight length must be right - with tm.assertRaises(ValueError): - o.sample(n=3, weights=[0, 1]) - - with tm.assertRaises(ValueError): - bad_weights = [0.5] * 11 - o.sample(n=3, weights=bad_weights) - - with tm.assertRaises(ValueError): - bad_weight_series = Series([0, 0, 0.2]) - o.sample(n=4, weights=bad_weight_series) - - # Check won't accept negative weights - with tm.assertRaises(ValueError): - bad_weights = [-0.1] * 10 - o.sample(n=3, weights=bad_weights) - - # Check inf and -inf throw errors: - with tm.assertRaises(ValueError): - weights_with_inf = [0.1] * 10 - weights_with_inf[0] = np.inf - o.sample(n=3, weights=weights_with_inf) - - with tm.assertRaises(ValueError): - weights_with_ninf = [0.1] * 10 - weights_with_ninf[0] = -np.inf - o.sample(n=3, weights=weights_with_ninf) - - # All zeros raises errors - zero_weights = [0] * 10 - with tm.assertRaises(ValueError): - o.sample(n=3, weights=zero_weights) - - # All missing weights - nan_weights = [np.nan] * 10 - with tm.assertRaises(ValueError): - o.sample(n=3, weights=nan_weights) - - # Check np.nan are replaced by zeros. - weights_with_nan = [np.nan] * 10 - weights_with_nan[5] = 0.5 - self._compare( - o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6]) - - # Check None are also replaced by zeros. - weights_with_None = [None] * 10 - weights_with_None[5] = 0.5 - self._compare( - o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6]) - - def test_size_compat(self): - # GH8846 - # size property should be defined - - o = self._construct(shape=10) - self.assertTrue(o.size == np.prod(o.shape)) - self.assertTrue(o.size == 10 ** len(o.axes)) - - def test_split_compat(self): - # xref GH8846 - o = self._construct(shape=10) - self.assertTrue(len(np.array_split(o, 5)) == 5) - self.assertTrue(len(np.array_split(o, 2)) == 2) - - def test_unexpected_keyword(self): # GH8597 - df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe']) - ca = pd.Categorical([0, 0, 2, 2, 3, np.nan]) - ts = df['joe'].copy() - ts[2] = np.nan - - with assertRaisesRegexp(TypeError, 'unexpected keyword'): - df.drop('joe', axis=1, in_place=True) - - with assertRaisesRegexp(TypeError, 'unexpected keyword'): - df.reindex([1, 0], inplace=True) - - with assertRaisesRegexp(TypeError, 'unexpected keyword'): - ca.fillna(0, inplace=True) - - with assertRaisesRegexp(TypeError, 'unexpected keyword'): - ts.fillna(0, in_place=True) - - # See gh-12301 - def test_stat_unexpected_keyword(self): - obj = self._construct(5) - starwars = 'Star Wars' - errmsg = 'unexpected keyword' - - with assertRaisesRegexp(TypeError, errmsg): - obj.max(epic=starwars) # stat_function - with assertRaisesRegexp(TypeError, errmsg): - obj.var(epic=starwars) # stat_function_ddof - with assertRaisesRegexp(TypeError, errmsg): - obj.sum(epic=starwars) # cum_function - with assertRaisesRegexp(TypeError, errmsg): - obj.any(epic=starwars) # logical_function - - def test_api_compat(self): - - # GH 12021 - # compat for __name__, __qualname__ - - obj = self._construct(5) - for func in ['sum', 'cumsum', 'any', 'var']: - f = getattr(obj, func) - self.assertEqual(f.__name__, func) - if PY3: - self.assertTrue(f.__qualname__.endswith(func)) - - def test_stat_non_defaults_args(self): - obj = self._construct(5) - out = np.array([0]) - errmsg = "the 'out' parameter is not supported" - - with assertRaisesRegexp(ValueError, errmsg): - obj.max(out=out) # stat_function - with assertRaisesRegexp(ValueError, errmsg): - obj.var(out=out) # stat_function_ddof - with assertRaisesRegexp(ValueError, errmsg): - obj.sum(out=out) # cum_function - with assertRaisesRegexp(ValueError, errmsg): - obj.any(out=out) # logical_function - - def test_clip(self): - lower = 1 - upper = 3 - col = np.arange(5) - - obj = self._construct(len(col), value=col) - - if isinstance(obj, Panel): - msg = "clip is not supported yet for panels" - tm.assertRaisesRegexp(NotImplementedError, msg, - obj.clip, lower=lower, - upper=upper) - - else: - out = obj.clip(lower=lower, upper=upper) - expected = self._construct(len(col), value=col - .clip(lower, upper)) - self._compare(out, expected) - - bad_axis = 'foo' - msg = ('No axis named {axis} ' - 'for object').format(axis=bad_axis) - assertRaisesRegexp(ValueError, msg, obj.clip, - lower=lower, upper=upper, - axis=bad_axis) - - def test_truncate_out_of_bounds(self): - # GH11382 - - # small - shape = [int(2e3)] + ([1] * (self._ndim - 1)) - small = self._construct(shape, dtype='int8') - self._compare(small.truncate(), small) - self._compare(small.truncate(before=0, after=3e3), small) - self._compare(small.truncate(before=-1, after=2e3), small) - - # big - shape = [int(2e6)] + ([1] * (self._ndim - 1)) - big = self._construct(shape, dtype='int8') - self._compare(big.truncate(), big) - self._compare(big.truncate(before=0, after=3e6), big) - self._compare(big.truncate(before=-1, after=2e6), big) - - def test_numpy_clip(self): - lower = 1 - upper = 3 - col = np.arange(5) - - obj = self._construct(len(col), value=col) - - if isinstance(obj, Panel): - msg = "clip is not supported yet for panels" - tm.assertRaisesRegexp(NotImplementedError, msg, - np.clip, obj, - lower, upper) - else: - out = np.clip(obj, lower, upper) - expected = self._construct(len(col), value=col - .clip(lower, upper)) - self._compare(out, expected) - - msg = "the 'out' parameter is not supported" - tm.assertRaisesRegexp(ValueError, msg, - np.clip, obj, - lower, upper, out=col) - - def test_validate_bool_args(self): - df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}) - invalid_values = [1, "True", [1, 2, 3], 5.0] - - for value in invalid_values: - with self.assertRaises(ValueError): - super(DataFrame, df).rename_axis(mapper={'a': 'x', 'b': 'y'}, - axis=1, inplace=value) - - with self.assertRaises(ValueError): - super(DataFrame, df).drop('a', axis=1, inplace=value) - - with self.assertRaises(ValueError): - super(DataFrame, df).sort_index(inplace=value) - - with self.assertRaises(ValueError): - super(DataFrame, df)._consolidate(inplace=value) - - with self.assertRaises(ValueError): - super(DataFrame, df).fillna(value=0, inplace=value) - - with self.assertRaises(ValueError): - super(DataFrame, df).replace(to_replace=1, value=7, - inplace=value) - - with self.assertRaises(ValueError): - super(DataFrame, df).interpolate(inplace=value) - - with self.assertRaises(ValueError): - super(DataFrame, df)._where(cond=df.a > 2, inplace=value) - - with self.assertRaises(ValueError): - super(DataFrame, df).mask(cond=df.a > 2, inplace=value) - - def test_copy_and_deepcopy(self): - # GH 15444 - for shape in [0, 1, 2]: - obj = self._construct(shape) - for func in [copy, - deepcopy, - lambda x: x.copy(deep=False), - lambda x: x.copy(deep=True)]: - obj_copy = func(obj) - assert obj_copy is not obj - self._compare(obj_copy, obj) - - -class TestSeries(tm.TestCase, Generic): - _typ = Series - _comparator = lambda self, x, y: assert_series_equal(x, y) - - def setUp(self): - self.ts = tm.makeTimeSeries() # Was at top level in test_series - self.ts.name = 'ts' - - self.series = tm.makeStringSeries() - self.series.name = 'series' - - def test_rename_mi(self): - s = Series([11, 21, 31], - index=MultiIndex.from_tuples( - [("A", x) for x in ["a", "B", "c"]])) - s.rename(str.lower) - - def test_set_axis_name(self): - s = Series([1, 2, 3], index=['a', 'b', 'c']) - funcs = ['rename_axis', '_set_axis_name'] - name = 'foo' - for func in funcs: - result = methodcaller(func, name)(s) - self.assertTrue(s.index.name is None) - self.assertEqual(result.index.name, name) - - def test_set_axis_name_mi(self): - s = Series([11, 21, 31], index=MultiIndex.from_tuples( - [("A", x) for x in ["a", "B", "c"]], - names=['l1', 'l2']) - ) - funcs = ['rename_axis', '_set_axis_name'] - for func in funcs: - result = methodcaller(func, ['L1', 'L2'])(s) - self.assertTrue(s.index.name is None) - self.assertEqual(s.index.names, ['l1', 'l2']) - self.assertTrue(result.index.name is None) - self.assertTrue(result.index.names, ['L1', 'L2']) - - def test_set_axis_name_raises(self): - s = pd.Series([1]) - with tm.assertRaises(ValueError): - s._set_axis_name(name='a', axis=1) - - def test_get_numeric_data_preserve_dtype(self): - - # get the numeric data - o = Series([1, 2, 3]) - result = o._get_numeric_data() - self._compare(result, o) - - o = Series([1, '2', 3.]) - result = o._get_numeric_data() - expected = Series([], dtype=object, index=pd.Index([], dtype=object)) - self._compare(result, expected) - - o = Series([True, False, True]) - result = o._get_numeric_data() - self._compare(result, o) - - o = Series([True, False, True]) - result = o._get_bool_data() - self._compare(result, o) - - o = Series(date_range('20130101', periods=3)) - result = o._get_numeric_data() - expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object)) - self._compare(result, expected) - - def test_nonzero_single_element(self): - - # allow single item via bool method - s = Series([True]) - self.assertTrue(s.bool()) - - s = Series([False]) - self.assertFalse(s.bool()) - - # single item nan to raise - for s in [Series([np.nan]), Series([pd.NaT]), Series([True]), - Series([False])]: - self.assertRaises(ValueError, lambda: bool(s)) - - for s in [Series([np.nan]), Series([pd.NaT])]: - self.assertRaises(ValueError, lambda: s.bool()) - - # multiple bool are still an error - for s in [Series([True, True]), Series([False, False])]: - self.assertRaises(ValueError, lambda: bool(s)) - self.assertRaises(ValueError, lambda: s.bool()) - - # single non-bool are an error - for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]: - self.assertRaises(ValueError, lambda: bool(s)) - self.assertRaises(ValueError, lambda: s.bool()) - - def test_metadata_propagation_indiv(self): - # check that the metadata matches up on the resulting ops - - o = Series(range(3), range(3)) - o.name = 'foo' - o2 = Series(range(3), range(3)) - o2.name = 'bar' - - result = o.T - self.check_metadata(o, result) - - # resample - ts = Series(np.random.rand(1000), - index=date_range('20130101', periods=1000, freq='s'), - name='foo') - result = ts.resample('1T').mean() - self.check_metadata(ts, result) - - result = ts.resample('1T').min() - self.check_metadata(ts, result) - - result = ts.resample('1T').apply(lambda x: x.sum()) - self.check_metadata(ts, result) - - _metadata = Series._metadata - _finalize = Series.__finalize__ - Series._metadata = ['name', 'filename'] - o.filename = 'foo' - o2.filename = 'bar' - - def finalize(self, other, method=None, **kwargs): - for name in self._metadata: - if method == 'concat' and name == 'filename': - value = '+'.join([getattr( - o, name) for o in other.objs if getattr(o, name, None) - ]) - object.__setattr__(self, name, value) - else: - object.__setattr__(self, name, getattr(other, name, None)) - - return self - - Series.__finalize__ = finalize - - result = pd.concat([o, o2]) - self.assertEqual(result.filename, 'foo+bar') - self.assertIsNone(result.name) - - # reset - Series._metadata = _metadata - Series.__finalize__ = _finalize - - def test_describe(self): - self.series.describe() - self.ts.describe() - - def test_describe_objects(self): - s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a']) - result = s.describe() - expected = Series({'count': 7, 'unique': 4, - 'top': 'a', 'freq': 3, 'second': 'b', - 'second_freq': 2}, index=result.index) - assert_series_equal(result, expected) - - dt = list(self.ts.index) - dt.append(dt[0]) - ser = Series(dt) - rs = ser.describe() - min_date = min(dt) - max_date = max(dt) - xp = Series({'count': len(dt), - 'unique': len(self.ts.index), - 'first': min_date, 'last': max_date, 'freq': 2, - 'top': min_date}, index=rs.index) - assert_series_equal(rs, xp) - - def test_describe_empty(self): - result = pd.Series().describe() - - self.assertEqual(result['count'], 0) - self.assertTrue(result.drop('count').isnull().all()) - - nanSeries = Series([np.nan]) - nanSeries.name = 'NaN' - result = nanSeries.describe() - self.assertEqual(result['count'], 0) - self.assertTrue(result.drop('count').isnull().all()) - - def test_describe_none(self): - noneSeries = Series([None]) - noneSeries.name = 'None' - expected = Series([0, 0], index=['count', 'unique'], name='None') - assert_series_equal(noneSeries.describe(), expected) - - def test_to_xarray(self): - - tm._skip_if_no_xarray() - import xarray - from xarray import DataArray - - s = Series([]) - s.index.name = 'foo' - result = s.to_xarray() - self.assertEqual(len(result), 0) - self.assertEqual(len(result.coords), 1) - assert_almost_equal(list(result.coords.keys()), ['foo']) - self.assertIsInstance(result, DataArray) - - def testit(index, check_index_type=True, check_categorical=True): - s = Series(range(6), index=index(6)) - s.index.name = 'foo' - result = s.to_xarray() - repr(result) - self.assertEqual(len(result), 6) - self.assertEqual(len(result.coords), 1) - assert_almost_equal(list(result.coords.keys()), ['foo']) - self.assertIsInstance(result, DataArray) - - # idempotency - assert_series_equal(result.to_series(), s, - check_index_type=check_index_type, - check_categorical=check_categorical) - - l = [tm.makeFloatIndex, tm.makeIntIndex, - tm.makeStringIndex, tm.makeUnicodeIndex, - tm.makeDateIndex, tm.makePeriodIndex, - tm.makeTimedeltaIndex] - - if LooseVersion(xarray.__version__) >= '0.8.0': - l.append(tm.makeCategoricalIndex) - - for index in l: - testit(index) - - s = Series(range(6)) - s.index.name = 'foo' - s.index = pd.MultiIndex.from_product([['a', 'b'], range(3)], - names=['one', 'two']) - result = s.to_xarray() - self.assertEqual(len(result), 2) - assert_almost_equal(list(result.coords.keys()), ['one', 'two']) - self.assertIsInstance(result, DataArray) - assert_series_equal(result.to_series(), s) - - -class TestDataFrame(tm.TestCase, Generic): - _typ = DataFrame - _comparator = lambda self, x, y: assert_frame_equal(x, y) - - def test_rename_mi(self): - df = DataFrame([ - 11, 21, 31 - ], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]])) - df.rename(str.lower) - - def test_set_axis_name(self): - df = pd.DataFrame([[1, 2], [3, 4]]) - funcs = ['_set_axis_name', 'rename_axis'] - for func in funcs: - result = methodcaller(func, 'foo')(df) - self.assertTrue(df.index.name is None) - self.assertEqual(result.index.name, 'foo') - - result = methodcaller(func, 'cols', axis=1)(df) - self.assertTrue(df.columns.name is None) - self.assertEqual(result.columns.name, 'cols') - - def test_set_axis_name_mi(self): - df = DataFrame( - np.empty((3, 3)), - index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]), - columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')]) - ) - - level_names = ['L1', 'L2'] - funcs = ['_set_axis_name', 'rename_axis'] - for func in funcs: - result = methodcaller(func, level_names)(df) - self.assertEqual(result.index.names, level_names) - self.assertEqual(result.columns.names, [None, None]) - - result = methodcaller(func, level_names, axis=1)(df) - self.assertEqual(result.columns.names, ["L1", "L2"]) - self.assertEqual(result.index.names, [None, None]) - - def test_nonzero_single_element(self): - - # allow single item via bool method - df = DataFrame([[True]]) - self.assertTrue(df.bool()) - - df = DataFrame([[False]]) - self.assertFalse(df.bool()) - - df = DataFrame([[False, False]]) - self.assertRaises(ValueError, lambda: df.bool()) - self.assertRaises(ValueError, lambda: bool(df)) - - def test_get_numeric_data_preserve_dtype(self): - - # get the numeric data - o = DataFrame({'A': [1, '2', 3.]}) - result = o._get_numeric_data() - expected = DataFrame(index=[0, 1, 2], dtype=object) - self._compare(result, expected) - - def test_describe(self): - tm.makeDataFrame().describe() - tm.makeMixedDataFrame().describe() - tm.makeTimeDataFrame().describe() - - def test_describe_percentiles_percent_or_raw(self): - msg = 'percentiles should all be in the interval \\[0, 1\\]' - - df = tm.makeDataFrame() - with tm.assertRaisesRegexp(ValueError, msg): - df.describe(percentiles=[10, 50, 100]) - - with tm.assertRaisesRegexp(ValueError, msg): - df.describe(percentiles=[2]) - - with tm.assertRaisesRegexp(ValueError, msg): - df.describe(percentiles=[-2]) - - def test_describe_percentiles_equivalence(self): - df = tm.makeDataFrame() - d1 = df.describe() - d2 = df.describe(percentiles=[.25, .75]) - assert_frame_equal(d1, d2) - - def test_describe_percentiles_insert_median(self): - df = tm.makeDataFrame() - d1 = df.describe(percentiles=[.25, .75]) - d2 = df.describe(percentiles=[.25, .5, .75]) - assert_frame_equal(d1, d2) - self.assertTrue('25%' in d1.index) - self.assertTrue('75%' in d2.index) - - # none above - d1 = df.describe(percentiles=[.25, .45]) - d2 = df.describe(percentiles=[.25, .45, .5]) - assert_frame_equal(d1, d2) - self.assertTrue('25%' in d1.index) - self.assertTrue('45%' in d2.index) - - # none below - d1 = df.describe(percentiles=[.75, 1]) - d2 = df.describe(percentiles=[.5, .75, 1]) - assert_frame_equal(d1, d2) - self.assertTrue('75%' in d1.index) - self.assertTrue('100%' in d2.index) - - # edge - d1 = df.describe(percentiles=[0, 1]) - d2 = df.describe(percentiles=[0, .5, 1]) - assert_frame_equal(d1, d2) - self.assertTrue('0%' in d1.index) - self.assertTrue('100%' in d2.index) - - def test_describe_percentiles_insert_median_ndarray(self): - # GH14908 - df = tm.makeDataFrame() - result = df.describe(percentiles=np.array([.25, .75])) - expected = df.describe(percentiles=[.25, .75]) - assert_frame_equal(result, expected) - - def test_describe_percentiles_unique(self): - # GH13104 - df = tm.makeDataFrame() - with self.assertRaises(ValueError): - df.describe(percentiles=[0.1, 0.2, 0.4, 0.5, 0.2, 0.6]) - with self.assertRaises(ValueError): - df.describe(percentiles=[0.1, 0.2, 0.4, 0.2, 0.6]) - - def test_describe_percentiles_formatting(self): - # GH13104 - df = tm.makeDataFrame() - - # default - result = df.describe().index - expected = Index(['count', 'mean', 'std', 'min', '25%', '50%', '75%', - 'max'], - dtype='object') - tm.assert_index_equal(result, expected) - - result = df.describe(percentiles=[0.0001, 0.0005, 0.001, 0.999, - 0.9995, 0.9999]).index - expected = Index(['count', 'mean', 'std', 'min', '0.01%', '0.05%', - '0.1%', '50%', '99.9%', '99.95%', '99.99%', 'max'], - dtype='object') - tm.assert_index_equal(result, expected) - - result = df.describe(percentiles=[0.00499, 0.005, 0.25, 0.50, - 0.75]).index - expected = Index(['count', 'mean', 'std', 'min', '0.499%', '0.5%', - '25%', '50%', '75%', 'max'], - dtype='object') - tm.assert_index_equal(result, expected) - - result = df.describe(percentiles=[0.00499, 0.01001, 0.25, 0.50, - 0.75]).index - expected = Index(['count', 'mean', 'std', 'min', '0.5%', '1.0%', - '25%', '50%', '75%', 'max'], - dtype='object') - tm.assert_index_equal(result, expected) - - def test_describe_column_index_type(self): - # GH13288 - df = pd.DataFrame([1, 2, 3, 4]) - df.columns = pd.Index([0], dtype=object) - result = df.describe().columns - expected = Index([0], dtype=object) - tm.assert_index_equal(result, expected) - - df = pd.DataFrame({'A': list("BCDE"), 0: [1, 2, 3, 4]}) - result = df.describe().columns - expected = Index([0], dtype=object) - tm.assert_index_equal(result, expected) - - def test_describe_no_numeric(self): - df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8, - 'B': ['a', 'b', 'c', 'd'] * 6}) - desc = df.describe() - expected = DataFrame(dict((k, v.describe()) - for k, v in compat.iteritems(df)), - columns=df.columns) - assert_frame_equal(desc, expected) - - ts = tm.makeTimeSeries() - df = DataFrame({'time': ts.index}) - desc = df.describe() - self.assertEqual(desc.time['first'], min(ts.index)) - - def test_describe_empty(self): - df = DataFrame() - tm.assertRaisesRegexp(ValueError, 'DataFrame without columns', - df.describe) - - df = DataFrame(columns=['A', 'B']) - result = df.describe() - expected = DataFrame(0, columns=['A', 'B'], index=['count', 'unique']) - tm.assert_frame_equal(result, expected) - - def test_describe_empty_int_columns(self): - df = DataFrame([[0, 1], [1, 2]]) - desc = df[df[0] < 0].describe() # works - assert_series_equal(desc.xs('count'), - Series([0, 0], dtype=float, name='count')) - self.assertTrue(isnull(desc.iloc[1:]).all().all()) - - def test_describe_objects(self): - df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']}) - result = df.describe() - expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]}, - index=['count', 'unique', 'top', 'freq']) - assert_frame_equal(result, expected) - - df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D') - }) - df.loc[4] = pd.Timestamp('2010-01-04') - result = df.describe() - expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2, - pd.Timestamp('2010-01-01'), - pd.Timestamp('2010-01-04')]}, - index=['count', 'unique', 'top', 'freq', - 'first', 'last']) - assert_frame_equal(result, expected) - - # mix time and str - df['C2'] = ['a', 'a', 'b', 'c', 'a'] - result = df.describe() - expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan] - assert_frame_equal(result, expected) - - # just str - expected = DataFrame({'C2': [5, 3, 'a', 4]}, - index=['count', 'unique', 'top', 'freq']) - result = df[['C2']].describe() - - # mix of time, str, numeric - df['C3'] = [2, 4, 6, 8, 2] - result = df.describe() - expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]}, - index=['count', 'mean', 'std', 'min', '25%', - '50%', '75%', 'max']) - assert_frame_equal(result, expected) - assert_frame_equal(df.describe(), df[['C3']].describe()) - - assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe()) - assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe()) - - def test_describe_typefiltering(self): - df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8, - 'catB': ['a', 'b', 'c', 'd'] * 6, - 'numC': np.arange(24, dtype='int64'), - 'numD': np.arange(24.) + .5, - 'ts': tm.makeTimeSeries()[:24].index}) - - descN = df.describe() - expected_cols = ['numC', 'numD', ] - expected = DataFrame(dict((k, df[k].describe()) - for k in expected_cols), - columns=expected_cols) - assert_frame_equal(descN, expected) - - desc = df.describe(include=['number']) - assert_frame_equal(desc, descN) - desc = df.describe(exclude=['object', 'datetime']) - assert_frame_equal(desc, descN) - desc = df.describe(include=['float']) - assert_frame_equal(desc, descN.drop('numC', 1)) - - descC = df.describe(include=['O']) - expected_cols = ['catA', 'catB'] - expected = DataFrame(dict((k, df[k].describe()) - for k in expected_cols), - columns=expected_cols) - assert_frame_equal(descC, expected) - - descD = df.describe(include=['datetime']) - assert_series_equal(descD.ts, df.ts.describe()) - - desc = df.describe(include=['object', 'number', 'datetime']) - assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN) - assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC) - descDs = descD.sort_index() # the index order change for mixed-types - assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs) - - desc = df.loc[:, 'catA':'catB'].describe(include='all') - assert_frame_equal(desc, descC) - desc = df.loc[:, 'numC':'numD'].describe(include='all') - assert_frame_equal(desc, descN) - - desc = df.describe(percentiles=[], include='all') - cnt = Series(data=[4, 4, 6, 6, 6], - index=['catA', 'catB', 'numC', 'numD', 'ts']) - assert_series_equal(desc.count(), cnt) - self.assertTrue('count' in desc.index) - self.assertTrue('unique' in desc.index) - self.assertTrue('50%' in desc.index) - self.assertTrue('first' in desc.index) - - desc = df.drop("ts", 1).describe(percentiles=[], include='all') - assert_series_equal(desc.count(), cnt.drop("ts")) - self.assertTrue('first' not in desc.index) - desc = df.drop(["numC", "numD"], 1).describe(percentiles=[], - include='all') - assert_series_equal(desc.count(), cnt.drop(["numC", "numD"])) - self.assertTrue('50%' not in desc.index) - - def test_describe_typefiltering_category_bool(self): - df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8), - 'B_str': ['a', 'b', 'c', 'd'] * 6, - 'C_bool': [True] * 12 + [False] * 12, - 'D_num': np.arange(24.) + .5, - 'E_ts': tm.makeTimeSeries()[:24].index}) - - desc = df.describe() - expected_cols = ['D_num'] - expected = DataFrame(dict((k, df[k].describe()) - for k in expected_cols), - columns=expected_cols) - assert_frame_equal(desc, expected) - - desc = df.describe(include=["category"]) - self.assertTrue(desc.columns.tolist() == ["A_cat"]) - - # 'all' includes numpy-dtypes + category - desc1 = df.describe(include="all") - desc2 = df.describe(include=[np.generic, "category"]) - assert_frame_equal(desc1, desc2) - - def test_describe_timedelta(self): - df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")}) - self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta( - "8d4h")) - - def test_describe_typefiltering_dupcol(self): - df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8, - 'catB': ['a', 'b', 'c', 'd'] * 6, - 'numC': np.arange(24), - 'numD': np.arange(24.) + .5, - 'ts': tm.makeTimeSeries()[:24].index}) - s = df.describe(include='all').shape[1] - df = pd.concat([df, df], axis=1) - s2 = df.describe(include='all').shape[1] - self.assertTrue(s2 == 2 * s) - - def test_describe_typefiltering_groupby(self): - df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8, - 'catB': ['a', 'b', 'c', 'd'] * 6, - 'numC': np.arange(24), - 'numD': np.arange(24.) + .5, - 'ts': tm.makeTimeSeries()[:24].index}) - G = df.groupby('catA') - self.assertTrue(G.describe(include=['number']).shape == (2, 16)) - self.assertTrue(G.describe(include=['number', 'object']).shape == (2, - 33)) - self.assertTrue(G.describe(include='all').shape == (2, 52)) - - def test_describe_multi_index_df_column_names(self): - """ Test that column names persist after the describe operation.""" - - df = pd.DataFrame( - {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.random.randn(8)}) - - # GH 11517 - # test for hierarchical index - hierarchical_index_df = df.groupby(['A', 'B']).mean().T - self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B']) - self.assertTrue(hierarchical_index_df.describe().columns.names == - ['A', 'B']) - - # test for non-hierarchical index - non_hierarchical_index_df = df.groupby(['A']).mean().T - self.assertTrue(non_hierarchical_index_df.columns.names == ['A']) - self.assertTrue(non_hierarchical_index_df.describe().columns.names == - ['A']) - - def test_metadata_propagation_indiv(self): - - # groupby - df = DataFrame( - {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.random.randn(8)}) - result = df.groupby('A').sum() - self.check_metadata(df, result) - - # resample - df = DataFrame(np.random.randn(1000, 2), - index=date_range('20130101', periods=1000, freq='s')) - result = df.resample('1T') - self.check_metadata(df, result) - - # merging with override - # GH 6923 - _metadata = DataFrame._metadata - _finalize = DataFrame.__finalize__ - - np.random.seed(10) - df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b']) - df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd']) - DataFrame._metadata = ['filename'] - df1.filename = 'fname1.csv' - df2.filename = 'fname2.csv' - - def finalize(self, other, method=None, **kwargs): - - for name in self._metadata: - if method == 'merge': - left, right = other.left, other.right - value = getattr(left, name, '') + '|' + getattr(right, - name, '') - object.__setattr__(self, name, value) - else: - object.__setattr__(self, name, getattr(other, name, '')) - - return self - - DataFrame.__finalize__ = finalize - result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner') - self.assertEqual(result.filename, 'fname1.csv|fname2.csv') - - # concat - # GH 6927 - DataFrame._metadata = ['filename'] - df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab')) - df1.filename = 'foo' - - def finalize(self, other, method=None, **kwargs): - for name in self._metadata: - if method == 'concat': - value = '+'.join([getattr( - o, name) for o in other.objs if getattr(o, name, None) - ]) - object.__setattr__(self, name, value) - else: - object.__setattr__(self, name, getattr(other, name, None)) - - return self - - DataFrame.__finalize__ = finalize - - result = pd.concat([df1, df1]) - self.assertEqual(result.filename, 'foo+foo') - - # reset - DataFrame._metadata = _metadata - DataFrame.__finalize__ = _finalize - - def test_tz_convert_and_localize(self): - l0 = date_range('20140701', periods=5, freq='D') - - # TODO: l1 should be a PeriodIndex for testing - # after GH2106 is addressed - with tm.assertRaises(NotImplementedError): - period_range('20140701', periods=1).tz_convert('UTC') - with tm.assertRaises(NotImplementedError): - period_range('20140701', periods=1).tz_localize('UTC') - # l1 = period_range('20140701', periods=5, freq='D') - l1 = date_range('20140701', periods=5, freq='D') - - int_idx = Index(range(5)) - - for fn in ['tz_localize', 'tz_convert']: - - if fn == 'tz_convert': - l0 = l0.tz_localize('UTC') - l1 = l1.tz_localize('UTC') - - for idx in [l0, l1]: - - l0_expected = getattr(idx, fn)('US/Pacific') - l1_expected = getattr(idx, fn)('US/Pacific') - - df1 = DataFrame(np.ones(5), index=l0) - df1 = getattr(df1, fn)('US/Pacific') - self.assert_index_equal(df1.index, l0_expected) - - # MultiIndex - # GH7846 - df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1])) - - df3 = getattr(df2, fn)('US/Pacific', level=0) - self.assertFalse(df3.index.levels[0].equals(l0)) - self.assert_index_equal(df3.index.levels[0], l0_expected) - self.assert_index_equal(df3.index.levels[1], l1) - self.assertFalse(df3.index.levels[1].equals(l1_expected)) - - df3 = getattr(df2, fn)('US/Pacific', level=1) - self.assert_index_equal(df3.index.levels[0], l0) - self.assertFalse(df3.index.levels[0].equals(l0_expected)) - self.assert_index_equal(df3.index.levels[1], l1_expected) - self.assertFalse(df3.index.levels[1].equals(l1)) - - df4 = DataFrame(np.ones(5), - MultiIndex.from_arrays([int_idx, l0])) - - # TODO: untested - df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa - - self.assert_index_equal(df3.index.levels[0], l0) - self.assertFalse(df3.index.levels[0].equals(l0_expected)) - self.assert_index_equal(df3.index.levels[1], l1_expected) - self.assertFalse(df3.index.levels[1].equals(l1)) - - # Bad Inputs - for fn in ['tz_localize', 'tz_convert']: - # Not DatetimeIndex / PeriodIndex - with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'): - df = DataFrame(index=int_idx) - df = getattr(df, fn)('US/Pacific') - - # Not DatetimeIndex / PeriodIndex - with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'): - df = DataFrame(np.ones(5), - MultiIndex.from_arrays([int_idx, l0])) - df = getattr(df, fn)('US/Pacific', level=0) - - # Invalid level - with tm.assertRaisesRegexp(ValueError, 'not valid'): - df = DataFrame(index=l0) - df = getattr(df, fn)('US/Pacific', level=1) - - def test_set_attribute(self): - # Test for consistent setattr behavior when an attribute and a column - # have the same name (Issue #8994) - df = DataFrame({'x': [1, 2, 3]}) - - df.y = 2 - df['y'] = [2, 4, 6] - df.y = 5 - - self.assertEqual(df.y, 5) - assert_series_equal(df['y'], Series([2, 4, 6], name='y')) - - def test_pct_change(self): - # GH 11150 - pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange( - 0, 40, 10)]).astype(np.float64) - pnl.iat[1, 0] = np.nan - pnl.iat[1, 1] = np.nan - pnl.iat[2, 3] = 60 - - mask = pnl.isnull() - - for axis in range(2): - expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift( - axis=axis) - 1 - expected[mask] = np.nan - result = pnl.pct_change(axis=axis, fill_method='pad') - - self.assert_frame_equal(result, expected) - - def test_to_xarray(self): - - tm._skip_if_no_xarray() - from xarray import Dataset - - df = DataFrame({'a': list('abc'), - 'b': list(range(1, 4)), - 'c': np.arange(3, 6).astype('u1'), - 'd': np.arange(4.0, 7.0, dtype='float64'), - 'e': [True, False, True], - 'f': pd.Categorical(list('abc')), - 'g': pd.date_range('20130101', periods=3), - 'h': pd.date_range('20130101', - periods=3, - tz='US/Eastern')} - ) - - df.index.name = 'foo' - result = df[0:0].to_xarray() - self.assertEqual(result.dims['foo'], 0) - self.assertIsInstance(result, Dataset) - - for index in [tm.makeFloatIndex, tm.makeIntIndex, - tm.makeStringIndex, tm.makeUnicodeIndex, - tm.makeDateIndex, tm.makePeriodIndex, - tm.makeCategoricalIndex, tm.makeTimedeltaIndex]: - df.index = index(3) - df.index.name = 'foo' - df.columns.name = 'bar' - result = df.to_xarray() - self.assertEqual(result.dims['foo'], 3) - self.assertEqual(len(result.coords), 1) - self.assertEqual(len(result.data_vars), 8) - assert_almost_equal(list(result.coords.keys()), ['foo']) - self.assertIsInstance(result, Dataset) - - # idempotency - # categoricals are not preserved - # datetimes w/tz are not preserved - # column names are lost - expected = df.copy() - expected['f'] = expected['f'].astype(object) - expected['h'] = expected['h'].astype('datetime64[ns]') - expected.columns.name = None - assert_frame_equal(result.to_dataframe(), expected, - check_index_type=False, check_categorical=False) - - # available in 0.7.1 - # MultiIndex - df.index = pd.MultiIndex.from_product([['a'], range(3)], - names=['one', 'two']) - result = df.to_xarray() - self.assertEqual(result.dims['one'], 1) - self.assertEqual(result.dims['two'], 3) - self.assertEqual(len(result.coords), 2) - self.assertEqual(len(result.data_vars), 8) - assert_almost_equal(list(result.coords.keys()), ['one', 'two']) - self.assertIsInstance(result, Dataset) - - result = result.to_dataframe() - expected = df.copy() - expected['f'] = expected['f'].astype(object) - expected['h'] = expected['h'].astype('datetime64[ns]') - expected.columns.name = None - assert_frame_equal(result, - expected, - check_index_type=False) - - def test_deepcopy_empty(self): - # This test covers empty frame copying with non-empty column sets - # as reported in issue GH15370 - empty_frame = DataFrame(data=[], index=[], columns=['A']) - empty_frame_copy = deepcopy(empty_frame) - - self._compare(empty_frame_copy, empty_frame) - - -class TestPanel(tm.TestCase, Generic): - _typ = Panel - _comparator = lambda self, x, y: assert_panel_equal(x, y, by_blocks=True) - - def test_to_xarray(self): - - tm._skip_if_no_xarray() - from xarray import DataArray - - with catch_warnings(record=True): - p = tm.makePanel() - - result = p.to_xarray() - self.assertIsInstance(result, DataArray) - self.assertEqual(len(result.coords), 3) - assert_almost_equal(list(result.coords.keys()), - ['items', 'major_axis', 'minor_axis']) - self.assertEqual(len(result.dims), 3) - - # idempotency - assert_panel_equal(result.to_pandas(), p) - - -class TestPanel4D(tm.TestCase, Generic): - _typ = Panel4D - _comparator = lambda self, x, y: assert_panel4d_equal(x, y, by_blocks=True) - - def test_sample(self): - pytest.skip("sample on Panel4D") - - def test_to_xarray(self): - - tm._skip_if_no_xarray() - from xarray import DataArray - - with catch_warnings(record=True): - p = tm.makePanel4D() - - result = p.to_xarray() - self.assertIsInstance(result, DataArray) - self.assertEqual(len(result.coords), 4) - assert_almost_equal(list(result.coords.keys()), - ['labels', 'items', 'major_axis', - 'minor_axis']) - self.assertEqual(len(result.dims), 4) - - # non-convertible - self.assertRaises(ValueError, lambda: result.to_pandas()) - - -# run all the tests, but wrap each in a warning catcher -for t in ['test_rename', 'test_rename_axis', 'test_get_numeric_data', - 'test_get_default', 'test_nonzero', - 'test_numpy_1_7_compat_numeric_methods', - 'test_downcast', 'test_constructor_compound_dtypes', - 'test_head_tail', - 'test_size_compat', 'test_split_compat', - 'test_unexpected_keyword', - 'test_stat_unexpected_keyword', 'test_api_compat', - 'test_stat_non_defaults_args', - 'test_clip', 'test_truncate_out_of_bounds', 'test_numpy_clip', - 'test_metadata_propagation', 'test_copy_and_deepcopy', - 'test_sample']: - - def f(): - def tester(self): - with catch_warnings(record=True): - return getattr(super(TestPanel, self), t)() - return tester - - setattr(TestPanel, t, f()) - - def f(): - def tester(self): - with catch_warnings(record=True): - return getattr(super(TestPanel4D, self), t)() - return tester - - setattr(TestPanel4D, t, f()) - - -class TestNDFrame(tm.TestCase): - # tests that don't fit elsewhere - - def test_sample(sel): - # Fixes issue: 2419 - # additional specific object based tests - - # A few dataframe test with degenerate weights. - easy_weight_list = [0] * 10 - easy_weight_list[5] = 1 - - df = pd.DataFrame({'col1': range(10, 20), - 'col2': range(20, 30), - 'colString': ['a'] * 10, - 'easyweights': easy_weight_list}) - sample1 = df.sample(n=1, weights='easyweights') - assert_frame_equal(sample1, df.iloc[5:6]) - - # Ensure proper error if string given as weight for Series, panel, or - # DataFrame with axis = 1. - s = Series(range(10)) - with tm.assertRaises(ValueError): - s.sample(n=3, weights='weight_column') - - with catch_warnings(record=True): - panel = Panel(items=[0, 1, 2], major_axis=[2, 3, 4], - minor_axis=[3, 4, 5]) - with tm.assertRaises(ValueError): - panel.sample(n=1, weights='weight_column') - - with tm.assertRaises(ValueError): - df.sample(n=1, weights='weight_column', axis=1) - - # Check weighting key error - with tm.assertRaises(KeyError): - df.sample(n=3, weights='not_a_real_column_name') - - # Check that re-normalizes weights that don't sum to one. - weights_less_than_1 = [0] * 10 - weights_less_than_1[0] = 0.5 - tm.assert_frame_equal( - df.sample(n=1, weights=weights_less_than_1), df.iloc[:1]) - - ### - # Test axis argument - ### - - # Test axis argument - df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10}) - second_column_weight = [0, 1] - assert_frame_equal( - df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']]) - - # Different axis arg types - assert_frame_equal(df.sample(n=1, axis='columns', - weights=second_column_weight), - df[['col2']]) - - weight = [0] * 10 - weight[5] = 0.5 - assert_frame_equal(df.sample(n=1, axis='rows', weights=weight), - df.iloc[5:6]) - assert_frame_equal(df.sample(n=1, axis='index', weights=weight), - df.iloc[5:6]) - - # Check out of range axis values - with tm.assertRaises(ValueError): - df.sample(n=1, axis=2) - - with tm.assertRaises(ValueError): - df.sample(n=1, axis='not_a_name') - - with tm.assertRaises(ValueError): - s = pd.Series(range(10)) - s.sample(n=1, axis=1) - - # Test weight length compared to correct axis - with tm.assertRaises(ValueError): - df.sample(n=1, axis=1, weights=[0.5] * 10) - - # Check weights with axis = 1 - easy_weight_list = [0] * 3 - easy_weight_list[2] = 1 - - df = pd.DataFrame({'col1': range(10, 20), - 'col2': range(20, 30), - 'colString': ['a'] * 10}) - sample1 = df.sample(n=1, axis=1, weights=easy_weight_list) - assert_frame_equal(sample1, df[['colString']]) - - # Test default axes - with catch_warnings(record=True): - p = Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6], - minor_axis=[1, 3, 5]) - assert_panel_equal( - p.sample(n=3, random_state=42), p.sample(n=3, axis=1, - random_state=42)) - assert_frame_equal( - df.sample(n=3, random_state=42), df.sample(n=3, axis=0, - random_state=42)) - - # Test that function aligns weights with frame - df = DataFrame( - {'col1': [5, 6, 7], - 'col2': ['a', 'b', 'c'], }, index=[9, 5, 3]) - s = Series([1, 0, 0], index=[3, 5, 9]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s)) - - # Weights have index values to be dropped because not in - # sampled DataFrame - s2 = Series([0.001, 0, 10000], index=[3, 5, 10]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2)) - - # Weights have empty values to be filed with zeros - s3 = Series([0.01, 0], index=[3, 5]) - assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3)) - - # No overlap in weight and sampled DataFrame indices - s4 = Series([1, 0], index=[1, 2]) - with tm.assertRaises(ValueError): - df.sample(1, weights=s4) - - def test_squeeze(self): - # noop - for s in [tm.makeFloatSeries(), tm.makeStringSeries(), - tm.makeObjectSeries()]: - tm.assert_series_equal(s.squeeze(), s) - for df in [tm.makeTimeDataFrame()]: - tm.assert_frame_equal(df.squeeze(), df) - with catch_warnings(record=True): - for p in [tm.makePanel()]: - tm.assert_panel_equal(p.squeeze(), p) - with catch_warnings(record=True): - for p4d in [tm.makePanel4D()]: - tm.assert_panel4d_equal(p4d.squeeze(), p4d) - - # squeezing - df = tm.makeTimeDataFrame().reindex(columns=['A']) - tm.assert_series_equal(df.squeeze(), df['A']) - - with catch_warnings(record=True): - p = tm.makePanel().reindex(items=['ItemA']) - tm.assert_frame_equal(p.squeeze(), p['ItemA']) - - p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A']) - tm.assert_series_equal(p.squeeze(), p.loc['ItemA', :, 'A']) - - with catch_warnings(record=True): - p4d = tm.makePanel4D().reindex(labels=['label1']) - tm.assert_panel_equal(p4d.squeeze(), p4d['label1']) - - with catch_warnings(record=True): - p4d = tm.makePanel4D().reindex(labels=['label1'], items=['ItemA']) - tm.assert_frame_equal(p4d.squeeze(), p4d.loc['label1', 'ItemA']) - - # don't fail with 0 length dimensions GH11229 & GH8999 - empty_series = Series([], name='five') - empty_frame = DataFrame([empty_series]) - with catch_warnings(record=True): - empty_panel = Panel({'six': empty_frame}) - - [tm.assert_series_equal(empty_series, higher_dim.squeeze()) - for higher_dim in [empty_series, empty_frame, empty_panel]] - - # axis argument - df = tm.makeTimeDataFrame(nper=1).iloc[:, :1] - assert df.shape == (1, 1) - tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0]) - tm.assert_series_equal(df.squeeze(axis='index'), df.iloc[0]) - tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0]) - tm.assert_series_equal(df.squeeze(axis='columns'), df.iloc[:, 0]) - assert df.squeeze() == df.iloc[0, 0] - tm.assertRaises(ValueError, df.squeeze, axis=2) - tm.assertRaises(ValueError, df.squeeze, axis='x') - - df = tm.makeTimeDataFrame(3) - tm.assert_frame_equal(df.squeeze(axis=0), df) - - def test_numpy_squeeze(self): - s = tm.makeFloatSeries() - tm.assert_series_equal(np.squeeze(s), s) - - df = tm.makeTimeDataFrame().reindex(columns=['A']) - tm.assert_series_equal(np.squeeze(df), df['A']) - - def test_transpose(self): - msg = (r"transpose\(\) got multiple values for " - r"keyword argument 'axes'") - for s in [tm.makeFloatSeries(), tm.makeStringSeries(), - tm.makeObjectSeries()]: - # calls implementation in pandas/core/base.py - tm.assert_series_equal(s.transpose(), s) - for df in [tm.makeTimeDataFrame()]: - tm.assert_frame_equal(df.transpose().transpose(), df) - - with catch_warnings(record=True): - for p in [tm.makePanel()]: - tm.assert_panel_equal(p.transpose(2, 0, 1) - .transpose(1, 2, 0), p) - tm.assertRaisesRegexp(TypeError, msg, p.transpose, - 2, 0, 1, axes=(2, 0, 1)) - - with catch_warnings(record=True): - for p4d in [tm.makePanel4D()]: - tm.assert_panel4d_equal(p4d.transpose(2, 0, 3, 1) - .transpose(1, 3, 0, 2), p4d) - tm.assertRaisesRegexp(TypeError, msg, p4d.transpose, - 2, 0, 3, 1, axes=(2, 0, 3, 1)) - - def test_numpy_transpose(self): - msg = "the 'axes' parameter is not supported" - - s = tm.makeFloatSeries() - tm.assert_series_equal( - np.transpose(s), s) - tm.assertRaisesRegexp(ValueError, msg, - np.transpose, s, axes=1) - - df = tm.makeTimeDataFrame() - tm.assert_frame_equal(np.transpose( - np.transpose(df)), df) - tm.assertRaisesRegexp(ValueError, msg, - np.transpose, df, axes=1) - - with catch_warnings(record=True): - p = tm.makePanel() - tm.assert_panel_equal(np.transpose( - np.transpose(p, axes=(2, 0, 1)), - axes=(1, 2, 0)), p) - - with catch_warnings(record=True): - p4d = tm.makePanel4D() - tm.assert_panel4d_equal(np.transpose( - np.transpose(p4d, axes=(2, 0, 3, 1)), - axes=(1, 3, 0, 2)), p4d) - - def test_take(self): - indices = [1, 5, -2, 6, 3, -1] - for s in [tm.makeFloatSeries(), tm.makeStringSeries(), - tm.makeObjectSeries()]: - out = s.take(indices) - expected = Series(data=s.values.take(indices), - index=s.index.take(indices), dtype=s.dtype) - tm.assert_series_equal(out, expected) - for df in [tm.makeTimeDataFrame()]: - out = df.take(indices) - expected = DataFrame(data=df.values.take(indices, axis=0), - index=df.index.take(indices), - columns=df.columns) - tm.assert_frame_equal(out, expected) - - indices = [-3, 2, 0, 1] - with catch_warnings(record=True): - for p in [tm.makePanel()]: - out = p.take(indices) - expected = Panel(data=p.values.take(indices, axis=0), - items=p.items.take(indices), - major_axis=p.major_axis, - minor_axis=p.minor_axis) - tm.assert_panel_equal(out, expected) - - with catch_warnings(record=True): - for p4d in [tm.makePanel4D()]: - out = p4d.take(indices) - expected = Panel4D(data=p4d.values.take(indices, axis=0), - labels=p4d.labels.take(indices), - major_axis=p4d.major_axis, - minor_axis=p4d.minor_axis, - items=p4d.items) - tm.assert_panel4d_equal(out, expected) - - def test_take_invalid_kwargs(self): - indices = [-3, 2, 0, 1] - s = tm.makeFloatSeries() - df = tm.makeTimeDataFrame() - - with catch_warnings(record=True): - p = tm.makePanel() - p4d = tm.makePanel4D() - - for obj in (s, df, p, p4d): - msg = r"take\(\) got an unexpected keyword argument 'foo'" - tm.assertRaisesRegexp(TypeError, msg, obj.take, - indices, foo=2) - - msg = "the 'out' parameter is not supported" - tm.assertRaisesRegexp(ValueError, msg, obj.take, - indices, out=indices) - - msg = "the 'mode' parameter is not supported" - tm.assertRaisesRegexp(ValueError, msg, obj.take, - indices, mode='clip') - - def test_equals(self): - s1 = pd.Series([1, 2, 3], index=[0, 2, 1]) - s2 = s1.copy() - self.assertTrue(s1.equals(s2)) - - s1[1] = 99 - self.assertFalse(s1.equals(s2)) - - # NaNs compare as equal - s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3]) - s2 = s1.copy() - self.assertTrue(s1.equals(s2)) - - s2[0] = 9.9 - self.assertFalse(s1.equals(s2)) - - idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')]) - s1 = Series([1, 2, np.nan], index=idx) - s2 = s1.copy() - self.assertTrue(s1.equals(s2)) - - # Add object dtype column with nans - index = np.random.random(10) - df1 = DataFrame( - np.random.random(10, ), index=index, columns=['floats']) - df1['text'] = 'the sky is so blue. we could use more chocolate.'.split( - ) - df1['start'] = date_range('2000-1-1', periods=10, freq='T') - df1['end'] = date_range('2000-1-1', periods=10, freq='D') - df1['diff'] = df1['end'] - df1['start'] - df1['bool'] = (np.arange(10) % 3 == 0) - df1.loc[::2] = nan - df2 = df1.copy() - self.assertTrue(df1['text'].equals(df2['text'])) - self.assertTrue(df1['start'].equals(df2['start'])) - self.assertTrue(df1['end'].equals(df2['end'])) - self.assertTrue(df1['diff'].equals(df2['diff'])) - self.assertTrue(df1['bool'].equals(df2['bool'])) - self.assertTrue(df1.equals(df2)) - self.assertFalse(df1.equals(object)) - - # different dtype - different = df1.copy() - different['floats'] = different['floats'].astype('float32') - self.assertFalse(df1.equals(different)) - - # different index - different_index = -index - different = df2.set_index(different_index) - self.assertFalse(df1.equals(different)) - - # different columns - different = df2.copy() - different.columns = df2.columns[::-1] - self.assertFalse(df1.equals(different)) - - # DatetimeIndex - index = pd.date_range('2000-1-1', periods=10, freq='T') - df1 = df1.set_index(index) - df2 = df1.copy() - self.assertTrue(df1.equals(df2)) - - # MultiIndex - df3 = df1.set_index(['text'], append=True) - df2 = df1.set_index(['text'], append=True) - self.assertTrue(df3.equals(df2)) - - df2 = df1.set_index(['floats'], append=True) - self.assertFalse(df3.equals(df2)) - - # NaN in index - df3 = df1.set_index(['floats'], append=True) - df2 = df1.set_index(['floats'], append=True) - self.assertTrue(df3.equals(df2)) - - # GH 8437 - a = pd.Series([False, np.nan]) - b = pd.Series([False, np.nan]) - c = pd.Series(index=range(2)) - d = pd.Series(index=range(2)) - e = pd.Series(index=range(2)) - f = pd.Series(index=range(2)) - c[:-1] = d[:-1] = e[0] = f[0] = False - self.assertTrue(a.equals(a)) - self.assertTrue(a.equals(b)) - self.assertTrue(a.equals(c)) - self.assertTrue(a.equals(d)) - self.assertFalse(a.equals(e)) - self.assertTrue(e.equals(f)) - - def test_describe_raises(self): - with catch_warnings(record=True): - with tm.assertRaises(NotImplementedError): - tm.makePanel().describe() - - def test_pipe(self): - df = DataFrame({'A': [1, 2, 3]}) - f = lambda x, y: x ** y - result = df.pipe(f, 2) - expected = DataFrame({'A': [1, 4, 9]}) - self.assert_frame_equal(result, expected) - - result = df.A.pipe(f, 2) - self.assert_series_equal(result, expected.A) - - def test_pipe_tuple(self): - df = DataFrame({'A': [1, 2, 3]}) - f = lambda x, y: y - result = df.pipe((f, 'y'), 0) - self.assert_frame_equal(result, df) - - result = df.A.pipe((f, 'y'), 0) - self.assert_series_equal(result, df.A) - - def test_pipe_tuple_error(self): - df = DataFrame({"A": [1, 2, 3]}) - f = lambda x, y: y - with tm.assertRaises(ValueError): - df.pipe((f, 'y'), x=1, y=0) - - with tm.assertRaises(ValueError): - df.A.pipe((f, 'y'), x=1, y=0) - - def test_pipe_panel(self): - with catch_warnings(record=True): - wp = Panel({'r1': DataFrame({"A": [1, 2, 3]})}) - f = lambda x, y: x + y - result = wp.pipe(f, 2) - expected = wp + 2 - assert_panel_equal(result, expected) - - result = wp.pipe((f, 'y'), x=1) - expected = wp + 1 - assert_panel_equal(result, expected) - - with tm.assertRaises(ValueError): - result = wp.pipe((f, 'y'), x=1, y=1) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 55e0e512169fb..69a844e2e64e4 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -2584,7 +2584,7 @@ def test_truncate(self): wp.major_axis[2]) def test_axis_dummies(self): - from pandas.core.reshape import make_axis_dummies + from pandas.core.reshape.reshape import make_axis_dummies minor_dummies = make_axis_dummies(self.panel, 'minor').astype(np.uint8) self.assertEqual(len(minor_dummies.columns), @@ -2604,7 +2604,7 @@ def test_axis_dummies(self): # TODO: test correctness def test_get_dummies(self): - from pandas.core.reshape import get_dummies, make_axis_dummies + from pandas.core.reshape.reshape import get_dummies, make_axis_dummies self.panel['Label'] = self.panel.index.labels[1] minor_dummies = make_axis_dummies(self.panel, 'minor').astype(np.uint8) @@ -2655,7 +2655,7 @@ def test_join(self): def test_pivot(self): with catch_warnings(record=True): - from pandas.core.reshape import _slow_pivot + from pandas.core.reshape.reshape import _slow_pivot one, two, three = (np.array([1, 2, 3, 4, 5]), np.array(['a', 'b', 'c', 'd', 'e']), diff --git a/pandas/tests/test_util.py b/pandas/tests/test_util.py index 2793cc14df19a..1fa436df0910d 100644 --- a/pandas/tests/test_util.py +++ b/pandas/tests/test_util.py @@ -1,8 +1,12 @@ # -*- coding: utf-8 -*- -from collections import OrderedDict +import os +import locale +import codecs import sys -import unittest from uuid import uuid4 +from collections import OrderedDict + +import pytest from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf from pandas.util.decorators import deprecate_kwarg from pandas.util.validators import (validate_args, validate_kwargs, @@ -11,6 +15,9 @@ import pandas.util.testing as tm +CURRENT_LOCALE = locale.getlocale() +LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None) + class TestDecorators(tm.TestCase): @@ -352,9 +359,9 @@ def test_exactly_one_ref(self): # materialize as bytearray to show that it is mutable self.assertEqual(bytearray(as_stolen_buf), b'test') - @unittest.skipIf( + @pytest.mark.skipif( sys.version_info[0] > 2, - 'bytes objects cannot be interned in py3', + reason='bytes objects cannot be interned in py3', ) def test_interned(self): salt = uuid4().hex @@ -401,3 +408,66 @@ def test_numpy_errstate_is_default(): from pandas.compat import numpy # noqa # The errstate should be unchanged after that import. assert np.geterr() == expected + + +class TestLocaleUtils(tm.TestCase): + + @classmethod + def setUpClass(cls): + super(TestLocaleUtils, cls).setUpClass() + cls.locales = tm.get_locales() + + if not cls.locales: + pytest.skip("No locales found") + + tm._skip_if_windows() + + @classmethod + def tearDownClass(cls): + super(TestLocaleUtils, cls).tearDownClass() + del cls.locales + + def test_get_locales(self): + # all systems should have at least a single locale + assert len(tm.get_locales()) > 0 + + def test_get_locales_prefix(self): + if len(self.locales) == 1: + pytest.skip("Only a single locale found, no point in " + "trying to test filtering locale prefixes") + first_locale = self.locales[0] + assert len(tm.get_locales(prefix=first_locale[:2])) > 0 + + def test_set_locale(self): + if len(self.locales) == 1: + pytest.skip("Only a single locale found, no point in " + "trying to test setting another locale") + + if all(x is None for x in CURRENT_LOCALE): + # Not sure why, but on some travis runs with pytest, + # getlocale() returned (None, None). + pytest.skip("CURRENT_LOCALE is not set.") + + if LOCALE_OVERRIDE is None: + lang, enc = 'it_CH', 'UTF-8' + elif LOCALE_OVERRIDE == 'C': + lang, enc = 'en_US', 'ascii' + else: + lang, enc = LOCALE_OVERRIDE.split('.') + + enc = codecs.lookup(enc).name + new_locale = lang, enc + + if not tm._can_set_locale(new_locale): + with tm.assertRaises(locale.Error): + with tm.set_locale(new_locale): + pass + else: + with tm.set_locale(new_locale) as normalized_locale: + new_lang, new_enc = normalized_locale.split('.') + new_enc = codecs.lookup(enc).name + normalized_locale = new_lang, new_enc + self.assertEqual(normalized_locale, new_locale) + + current_locale = locale.getlocale() + self.assertEqual(current_locale, CURRENT_LOCALE) diff --git a/pandas/tests/tools/test_util.py b/pandas/tests/tools/test_util.py deleted file mode 100644 index 3ac7d8b32516e..0000000000000 --- a/pandas/tests/tools/test_util.py +++ /dev/null @@ -1,485 +0,0 @@ -import os -import locale -import codecs -import pytest -import decimal - -import numpy as np -from numpy import iinfo - -import pandas as pd -from pandas import (date_range, Index, _np_version_under1p9) -import pandas.util.testing as tm -from pandas.tools.util import cartesian_product, to_numeric - -CURRENT_LOCALE = locale.getlocale() -LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None) - - -class TestCartesianProduct(tm.TestCase): - - def test_simple(self): - x, y = list('ABC'), [1, 22] - result1, result2 = cartesian_product([x, y]) - expected1 = np.array(['A', 'A', 'B', 'B', 'C', 'C']) - expected2 = np.array([1, 22, 1, 22, 1, 22]) - tm.assert_numpy_array_equal(result1, expected1) - tm.assert_numpy_array_equal(result2, expected2) - - def test_datetimeindex(self): - # regression test for GitHub issue #6439 - # make sure that the ordering on datetimeindex is consistent - x = date_range('2000-01-01', periods=2) - result1, result2 = [Index(y).day for y in cartesian_product([x, x])] - expected1 = Index([1, 1, 2, 2]) - expected2 = Index([1, 2, 1, 2]) - tm.assert_index_equal(result1, expected1) - tm.assert_index_equal(result2, expected2) - - def test_empty(self): - # product of empty factors - X = [[], [0, 1], []] - Y = [[], [], ['a', 'b', 'c']] - for x, y in zip(X, Y): - expected1 = np.array([], dtype=np.asarray(x).dtype) - expected2 = np.array([], dtype=np.asarray(y).dtype) - result1, result2 = cartesian_product([x, y]) - tm.assert_numpy_array_equal(result1, expected1) - tm.assert_numpy_array_equal(result2, expected2) - - # empty product (empty input): - result = cartesian_product([]) - expected = [] - assert result == expected - - def test_invalid_input(self): - invalid_inputs = [1, [1], [1, 2], [[1], 2], - 'a', ['a'], ['a', 'b'], [['a'], 'b']] - msg = "Input must be a list-like of list-likes" - for X in invalid_inputs: - tm.assertRaisesRegexp(TypeError, msg, cartesian_product, X=X) - - -class TestLocaleUtils(tm.TestCase): - - @classmethod - def setUpClass(cls): - super(TestLocaleUtils, cls).setUpClass() - cls.locales = tm.get_locales() - - if not cls.locales: - pytest.skip("No locales found") - - tm._skip_if_windows() - - @classmethod - def tearDownClass(cls): - super(TestLocaleUtils, cls).tearDownClass() - del cls.locales - - def test_get_locales(self): - # all systems should have at least a single locale - assert len(tm.get_locales()) > 0 - - def test_get_locales_prefix(self): - if len(self.locales) == 1: - pytest.skip("Only a single locale found, no point in " - "trying to test filtering locale prefixes") - first_locale = self.locales[0] - assert len(tm.get_locales(prefix=first_locale[:2])) > 0 - - def test_set_locale(self): - if len(self.locales) == 1: - pytest.skip("Only a single locale found, no point in " - "trying to test setting another locale") - - if all(x is None for x in CURRENT_LOCALE): - # Not sure why, but on some travis runs with pytest, - # getlocale() returned (None, None). - pytest.skip("CURRENT_LOCALE is not set.") - - if LOCALE_OVERRIDE is None: - lang, enc = 'it_CH', 'UTF-8' - elif LOCALE_OVERRIDE == 'C': - lang, enc = 'en_US', 'ascii' - else: - lang, enc = LOCALE_OVERRIDE.split('.') - - enc = codecs.lookup(enc).name - new_locale = lang, enc - - if not tm._can_set_locale(new_locale): - with tm.assertRaises(locale.Error): - with tm.set_locale(new_locale): - pass - else: - with tm.set_locale(new_locale) as normalized_locale: - new_lang, new_enc = normalized_locale.split('.') - new_enc = codecs.lookup(enc).name - normalized_locale = new_lang, new_enc - self.assertEqual(normalized_locale, new_locale) - - current_locale = locale.getlocale() - self.assertEqual(current_locale, CURRENT_LOCALE) - - -class TestToNumeric(tm.TestCase): - - def test_series(self): - s = pd.Series(['1', '-3.14', '7']) - res = to_numeric(s) - expected = pd.Series([1, -3.14, 7]) - tm.assert_series_equal(res, expected) - - s = pd.Series(['1', '-3.14', 7]) - res = to_numeric(s) - tm.assert_series_equal(res, expected) - - def test_series_numeric(self): - s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX') - res = to_numeric(s) - tm.assert_series_equal(res, s) - - s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX') - res = to_numeric(s) - tm.assert_series_equal(res, s) - - # bool is regarded as numeric - s = pd.Series([True, False, True, True], - index=list('ABCD'), name='XXX') - res = to_numeric(s) - tm.assert_series_equal(res, s) - - def test_error(self): - s = pd.Series([1, -3.14, 'apple']) - msg = 'Unable to parse string "apple" at position 2' - with tm.assertRaisesRegexp(ValueError, msg): - to_numeric(s, errors='raise') - - res = to_numeric(s, errors='ignore') - expected = pd.Series([1, -3.14, 'apple']) - tm.assert_series_equal(res, expected) - - res = to_numeric(s, errors='coerce') - expected = pd.Series([1, -3.14, np.nan]) - tm.assert_series_equal(res, expected) - - s = pd.Series(['orange', 1, -3.14, 'apple']) - msg = 'Unable to parse string "orange" at position 0' - with tm.assertRaisesRegexp(ValueError, msg): - to_numeric(s, errors='raise') - - def test_error_seen_bool(self): - s = pd.Series([True, False, 'apple']) - msg = 'Unable to parse string "apple" at position 2' - with tm.assertRaisesRegexp(ValueError, msg): - to_numeric(s, errors='raise') - - res = to_numeric(s, errors='ignore') - expected = pd.Series([True, False, 'apple']) - tm.assert_series_equal(res, expected) - - # coerces to float - res = to_numeric(s, errors='coerce') - expected = pd.Series([1., 0., np.nan]) - tm.assert_series_equal(res, expected) - - def test_list(self): - s = ['1', '-3.14', '7'] - res = to_numeric(s) - expected = np.array([1, -3.14, 7]) - tm.assert_numpy_array_equal(res, expected) - - def test_list_numeric(self): - s = [1, 3, 4, 5] - res = to_numeric(s) - tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64)) - - s = [1., 3., 4., 5.] - res = to_numeric(s) - tm.assert_numpy_array_equal(res, np.array(s)) - - # bool is regarded as numeric - s = [True, False, True, True] - res = to_numeric(s) - tm.assert_numpy_array_equal(res, np.array(s)) - - def test_numeric(self): - s = pd.Series([1, -3.14, 7], dtype='O') - res = to_numeric(s) - expected = pd.Series([1, -3.14, 7]) - tm.assert_series_equal(res, expected) - - s = pd.Series([1, -3.14, 7]) - res = to_numeric(s) - tm.assert_series_equal(res, expected) - - # GH 14827 - df = pd.DataFrame(dict( - a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'], - b=[1.0, 2.0, 3.0, 4.0], - )) - expected = pd.DataFrame(dict( - a=[1.2, 3.14, np.inf, 0.1], - b=[1.0, 2.0, 3.0, 4.0], - )) - - # Test to_numeric over one column - df_copy = df.copy() - df_copy['a'] = df_copy['a'].apply(to_numeric) - tm.assert_frame_equal(df_copy, expected) - - # Test to_numeric over multiple columns - df_copy = df.copy() - df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric) - tm.assert_frame_equal(df_copy, expected) - - def test_numeric_lists_and_arrays(self): - # Test to_numeric with embedded lists and arrays - df = pd.DataFrame(dict( - a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1] - )) - df['a'] = df['a'].apply(to_numeric) - expected = pd.DataFrame(dict( - a=[[3.14, 1.0], 1.6, 0.1], - )) - tm.assert_frame_equal(df, expected) - - df = pd.DataFrame(dict( - a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1] - )) - df['a'] = df['a'].apply(to_numeric) - expected = pd.DataFrame(dict( - a=[[3.14, 1.0], 0.1], - )) - tm.assert_frame_equal(df, expected) - - def test_all_nan(self): - s = pd.Series(['a', 'b', 'c']) - res = to_numeric(s, errors='coerce') - expected = pd.Series([np.nan, np.nan, np.nan]) - tm.assert_series_equal(res, expected) - - def test_type_check(self): - # GH 11776 - df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']}) - with tm.assertRaisesRegexp(TypeError, "1-d array"): - to_numeric(df) - for errors in ['ignore', 'raise', 'coerce']: - with tm.assertRaisesRegexp(TypeError, "1-d array"): - to_numeric(df, errors=errors) - - def test_scalar(self): - self.assertEqual(pd.to_numeric(1), 1) - self.assertEqual(pd.to_numeric(1.1), 1.1) - - self.assertEqual(pd.to_numeric('1'), 1) - self.assertEqual(pd.to_numeric('1.1'), 1.1) - - with tm.assertRaises(ValueError): - to_numeric('XX', errors='raise') - - self.assertEqual(to_numeric('XX', errors='ignore'), 'XX') - self.assertTrue(np.isnan(to_numeric('XX', errors='coerce'))) - - def test_numeric_dtypes(self): - idx = pd.Index([1, 2, 3], name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, idx) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.values) - - idx = pd.Index([1., np.nan, 3., np.nan], name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, idx) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.values) - - def test_str(self): - idx = pd.Index(['1', '2', '3'], name='xxx') - exp = np.array([1, 2, 3], dtype='int64') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(exp, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(exp, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, exp) - - idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx') - exp = np.array([1.5, 2.7, 3.4]) - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(exp, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(exp, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, exp) - - def test_datetimelike(self): - for tz in [None, 'US/Eastern', 'Asia/Tokyo']: - idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.asi8) - - def test_timedelta(self): - idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.asi8) - - def test_period(self): - idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) - - # ToDo: enable when we can support native PeriodDtype - # res = pd.to_numeric(pd.Series(idx, name='xxx')) - # tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) - - def test_non_hashable(self): - # Test for Bug #13324 - s = pd.Series([[10.0, 2], 1.0, 'apple']) - res = pd.to_numeric(s, errors='coerce') - tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan])) - - res = pd.to_numeric(s, errors='ignore') - tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple'])) - - with self.assertRaisesRegexp(TypeError, "Invalid object type"): - pd.to_numeric(s) - - def test_downcast(self): - # see gh-13352 - mixed_data = ['1', 2, 3] - int_data = [1, 2, 3] - date_data = np.array(['1970-01-02', '1970-01-03', - '1970-01-04'], dtype='datetime64[D]') - - invalid_downcast = 'unsigned-integer' - msg = 'invalid downcasting method provided' - - smallest_int_dtype = np.dtype(np.typecodes['Integer'][0]) - smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0]) - - # support below np.float32 is rare and far between - float_32_char = np.dtype(np.float32).char - smallest_float_dtype = float_32_char - - for data in (mixed_data, int_data, date_data): - with self.assertRaisesRegexp(ValueError, msg): - pd.to_numeric(data, downcast=invalid_downcast) - - expected = np.array([1, 2, 3], dtype=np.int64) - - res = pd.to_numeric(data) - tm.assert_numpy_array_equal(res, expected) - - res = pd.to_numeric(data, downcast=None) - tm.assert_numpy_array_equal(res, expected) - - expected = np.array([1, 2, 3], dtype=smallest_int_dtype) - - for signed_downcast in ('integer', 'signed'): - res = pd.to_numeric(data, downcast=signed_downcast) - tm.assert_numpy_array_equal(res, expected) - - expected = np.array([1, 2, 3], dtype=smallest_uint_dtype) - res = pd.to_numeric(data, downcast='unsigned') - tm.assert_numpy_array_equal(res, expected) - - expected = np.array([1, 2, 3], dtype=smallest_float_dtype) - res = pd.to_numeric(data, downcast='float') - tm.assert_numpy_array_equal(res, expected) - - # if we can't successfully cast the given - # data to a numeric dtype, do not bother - # with the downcast parameter - data = ['foo', 2, 3] - expected = np.array(data, dtype=object) - res = pd.to_numeric(data, errors='ignore', - downcast='unsigned') - tm.assert_numpy_array_equal(res, expected) - - # cannot cast to an unsigned integer because - # we have a negative number - data = ['-1', 2, 3] - expected = np.array([-1, 2, 3], dtype=np.int64) - res = pd.to_numeric(data, downcast='unsigned') - tm.assert_numpy_array_equal(res, expected) - - # cannot cast to an integer (signed or unsigned) - # because we have a float number - data = (['1.1', 2, 3], - [10000.0, 20000, 3000, 40000.36, 50000, 50000.00]) - expected = (np.array([1.1, 2, 3], dtype=np.float64), - np.array([10000.0, 20000, 3000, - 40000.36, 50000, 50000.00], dtype=np.float64)) - - for _data, _expected in zip(data, expected): - for downcast in ('integer', 'signed', 'unsigned'): - res = pd.to_numeric(_data, downcast=downcast) - tm.assert_numpy_array_equal(res, _expected) - - # the smallest integer dtype need not be np.(u)int8 - data = ['256', 257, 258] - - for downcast, expected_dtype in zip( - ['integer', 'signed', 'unsigned'], - [np.int16, np.int16, np.uint16]): - expected = np.array([256, 257, 258], dtype=expected_dtype) - res = pd.to_numeric(data, downcast=downcast) - tm.assert_numpy_array_equal(res, expected) - - def test_downcast_limits(self): - # Test the limits of each downcast. Bug: #14401. - # Check to make sure numpy is new enough to run this test. - if _np_version_under1p9: - pytest.skip("Numpy version is under 1.9") - - i = 'integer' - u = 'unsigned' - dtype_downcast_min_max = [ - ('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]), - ('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]), - ('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]), - ('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]), - ('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]), - ('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]), - ('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]), - ('uint64', u, [iinfo(np.uint64).min, iinfo(np.uint64).max]), - ('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]), - ('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]), - ('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]), - ('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]), - ('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]), - ('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]), - ('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]), - ('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]), - ('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]) - ] - - for dtype, downcast, min_max in dtype_downcast_min_max: - series = pd.to_numeric(pd.Series(min_max), downcast=downcast) - assert series.dtype == dtype diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 53208fbdd5529..cd58aa2c7f923 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -1,46 +1,4 @@ -""" -SQL-style merge routines -""" - -import copy import warnings -import string - -import numpy as np -from pandas.compat import range, lzip, zip, map, filter -import pandas.compat as compat - -import pandas as pd -from pandas import (Categorical, Series, DataFrame, - Index, MultiIndex, Timedelta) -from pandas.core.frame import _merge_doc -from pandas.core.dtypes.common import ( - is_datetime64tz_dtype, - is_datetime64_dtype, - needs_i8_conversion, - is_int64_dtype, - is_categorical_dtype, - is_integer_dtype, - is_float_dtype, - is_numeric_dtype, - is_integer, - is_int_or_datetime_dtype, - is_dtype_equal, - is_bool, - is_list_like, - _ensure_int64, - _ensure_float64, - _ensure_object, - _get_dtype) -from pandas.core.dtypes.missing import na_value_for_dtype -from pandas.core.internals import (items_overlap_with_suffix, - concatenate_block_managers) -from pandas.util.decorators import Appender, Substitution - -from pandas.core.sorting import is_int64_overflow_possible -import pandas.core.algorithms as algos -import pandas.core.common as com -from pandas._libs import hashtable as libhashtable, join as libjoin, lib # back-compat of pseudo-public API @@ -51,1447 +9,9 @@ def wrapper(*args, **kwargs): "import from the public API: " "pandas.concat instead", FutureWarning, stacklevel=3) + import pandas as pd return pd.concat(*args, **kwargs) return wrapper concat = concat_wrap() - - -@Substitution('\nleft : DataFrame') -@Appender(_merge_doc, indents=0) -def merge(left, right, how='inner', on=None, left_on=None, right_on=None, - left_index=False, right_index=False, sort=False, - suffixes=('_x', '_y'), copy=True, indicator=False): - op = _MergeOperation(left, right, how=how, on=on, left_on=left_on, - right_on=right_on, left_index=left_index, - right_index=right_index, sort=sort, suffixes=suffixes, - copy=copy, indicator=indicator) - return op.get_result() - - -if __debug__: - merge.__doc__ = _merge_doc % '\nleft : DataFrame' - - -class MergeError(ValueError): - pass - - -def _groupby_and_merge(by, on, left, right, _merge_pieces, - check_duplicates=True): - """ - groupby & merge; we are always performing a left-by type operation - - Parameters - ---------- - by: field to group - on: duplicates field - left: left frame - right: right frame - _merge_pieces: function for merging - check_duplicates: boolean, default True - should we check & clean duplicates - """ - - pieces = [] - if not isinstance(by, (list, tuple)): - by = [by] - - lby = left.groupby(by, sort=False) - - # if we can groupby the rhs - # then we can get vastly better perf - try: - - # we will check & remove duplicates if indicated - if check_duplicates: - if on is None: - on = [] - elif not isinstance(on, (list, tuple)): - on = [on] - - if right.duplicated(by + on).any(): - right = right.drop_duplicates(by + on, keep='last') - rby = right.groupby(by, sort=False) - except KeyError: - rby = None - - for key, lhs in lby: - - if rby is None: - rhs = right - else: - try: - rhs = right.take(rby.indices[key]) - except KeyError: - # key doesn't exist in left - lcols = lhs.columns.tolist() - cols = lcols + [r for r in right.columns - if r not in set(lcols)] - merged = lhs.reindex(columns=cols) - merged.index = range(len(merged)) - pieces.append(merged) - continue - - merged = _merge_pieces(lhs, rhs) - - # make sure join keys are in the merged - # TODO, should _merge_pieces do this? - for k in by: - try: - if k in merged: - merged[k] = key - except: - pass - - pieces.append(merged) - - # preserve the original order - # if we have a missing piece this can be reset - from pandas.tools.concat import concat - result = concat(pieces, ignore_index=True) - result = result.reindex(columns=pieces[0].columns, copy=False) - return result, lby - - -def ordered_merge(left, right, on=None, - left_on=None, right_on=None, - left_by=None, right_by=None, - fill_method=None, suffixes=('_x', '_y')): - - warnings.warn("ordered_merge is deprecated and replaced by merge_ordered", - FutureWarning, stacklevel=2) - return merge_ordered(left, right, on=on, - left_on=left_on, right_on=right_on, - left_by=left_by, right_by=right_by, - fill_method=fill_method, suffixes=suffixes) - - -def merge_ordered(left, right, on=None, - left_on=None, right_on=None, - left_by=None, right_by=None, - fill_method=None, suffixes=('_x', '_y'), - how='outer'): - """Perform merge with optional filling/interpolation designed for ordered - data like time series data. Optionally perform group-wise merge (see - examples) - - Parameters - ---------- - left : DataFrame - right : DataFrame - on : label or list - Field names to join on. Must be found in both DataFrames. - left_on : label or list, or array-like - Field names to join on in left DataFrame. Can be a vector or list of - vectors of the length of the DataFrame to use a particular vector as - the join key instead of columns - right_on : label or list, or array-like - Field names to join on in right DataFrame or vector/list of vectors per - left_on docs - left_by : column name or list of column names - Group left DataFrame by group columns and merge piece by piece with - right DataFrame - right_by : column name or list of column names - Group right DataFrame by group columns and merge piece by piece with - left DataFrame - fill_method : {'ffill', None}, default None - Interpolation method for data - suffixes : 2-length sequence (tuple, list, ...) - Suffix to apply to overlapping column names in the left and right - side, respectively - how : {'left', 'right', 'outer', 'inner'}, default 'outer' - * left: use only keys from left frame (SQL: left outer join) - * right: use only keys from right frame (SQL: right outer join) - * outer: use union of keys from both frames (SQL: full outer join) - * inner: use intersection of keys from both frames (SQL: inner join) - - .. versionadded:: 0.19.0 - - Examples - -------- - >>> A >>> B - key lvalue group key rvalue - 0 a 1 a 0 b 1 - 1 c 2 a 1 c 2 - 2 e 3 a 2 d 3 - 3 a 1 b - 4 c 2 b - 5 e 3 b - - >>> ordered_merge(A, B, fill_method='ffill', left_by='group') - key lvalue group rvalue - 0 a 1 a NaN - 1 b 1 a 1 - 2 c 2 a 2 - 3 d 2 a 3 - 4 e 3 a 3 - 5 f 3 a 4 - 6 a 1 b NaN - 7 b 1 b 1 - 8 c 2 b 2 - 9 d 2 b 3 - 10 e 3 b 3 - 11 f 3 b 4 - - Returns - ------- - merged : DataFrame - The output type will the be same as 'left', if it is a subclass - of DataFrame. - - See also - -------- - merge - merge_asof - - """ - def _merger(x, y): - # perform the ordered merge operation - op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on, - suffixes=suffixes, fill_method=fill_method, - how=how) - return op.get_result() - - if left_by is not None and right_by is not None: - raise ValueError('Can only group either left or right frames') - elif left_by is not None: - result, _ = _groupby_and_merge(left_by, on, left, right, - lambda x, y: _merger(x, y), - check_duplicates=False) - elif right_by is not None: - result, _ = _groupby_and_merge(right_by, on, right, left, - lambda x, y: _merger(y, x), - check_duplicates=False) - else: - result = _merger(left, right) - return result - - -ordered_merge.__doc__ = merge_ordered.__doc__ - - -def merge_asof(left, right, on=None, - left_on=None, right_on=None, - left_index=False, right_index=False, - by=None, left_by=None, right_by=None, - suffixes=('_x', '_y'), - tolerance=None, - allow_exact_matches=True, - direction='backward'): - """Perform an asof merge. This is similar to a left-join except that we - match on nearest key rather than equal keys. - - Both DataFrames must be sorted by the key. - - For each row in the left DataFrame: - - - A "backward" search selects the last row in the right DataFrame whose - 'on' key is less than or equal to the left's key. - - - A "forward" search selects the first row in the right DataFrame whose - 'on' key is greater than or equal to the left's key. - - - A "nearest" search selects the row in the right DataFrame whose 'on' - key is closest in absolute distance to the left's key. - - The default is "backward" and is compatible in versions below 0.20.0. - The direction parameter was added in version 0.20.0 and introduces - "forward" and "nearest". - - Optionally match on equivalent keys with 'by' before searching with 'on'. - - .. versionadded:: 0.19.0 - - Parameters - ---------- - left : DataFrame - right : DataFrame - on : label - Field name to join on. Must be found in both DataFrames. - The data MUST be ordered. Furthermore this must be a numeric column, - such as datetimelike, integer, or float. On or left_on/right_on - must be given. - left_on : label - Field name to join on in left DataFrame. - right_on : label - Field name to join on in right DataFrame. - left_index : boolean - Use the index of the left DataFrame as the join key. - - .. versionadded:: 0.19.2 - - right_index : boolean - Use the index of the right DataFrame as the join key. - - .. versionadded:: 0.19.2 - - by : column name or list of column names - Match on these columns before performing merge operation. - left_by : column name - Field names to match on in the left DataFrame. - - .. versionadded:: 0.19.2 - - right_by : column name - Field names to match on in the right DataFrame. - - .. versionadded:: 0.19.2 - - suffixes : 2-length sequence (tuple, list, ...) - Suffix to apply to overlapping column names in the left and right - side, respectively. - tolerance : integer or Timedelta, optional, default None - Select asof tolerance within this range; must be compatible - with the merge index. - allow_exact_matches : boolean, default True - - - If True, allow matching with the same 'on' value - (i.e. less-than-or-equal-to / greater-than-or-equal-to) - - If False, don't match the same 'on' value - (i.e., stricly less-than / strictly greater-than) - - direction : 'backward' (default), 'forward', or 'nearest' - Whether to search for prior, subsequent, or closest matches. - - .. versionadded:: 0.20.0 - - Returns - ------- - merged : DataFrame - - Examples - -------- - >>> left - a left_val - 0 1 a - 1 5 b - 2 10 c - - >>> right - a right_val - 0 1 1 - 1 2 2 - 2 3 3 - 3 6 6 - 4 7 7 - - >>> pd.merge_asof(left, right, on='a') - a left_val right_val - 0 1 a 1 - 1 5 b 3 - 2 10 c 7 - - >>> pd.merge_asof(left, right, on='a', allow_exact_matches=False) - a left_val right_val - 0 1 a NaN - 1 5 b 3.0 - 2 10 c 7.0 - - >>> pd.merge_asof(left, right, on='a', direction='forward') - a left_val right_val - 0 1 a 1.0 - 1 5 b 6.0 - 2 10 c NaN - - >>> pd.merge_asof(left, right, on='a', direction='nearest') - a left_val right_val - 0 1 a 1 - 1 5 b 6 - 2 10 c 7 - - We can use indexed DataFrames as well. - - >>> left - left_val - 1 a - 5 b - 10 c - - >>> right - right_val - 1 1 - 2 2 - 3 3 - 6 6 - 7 7 - - >>> pd.merge_asof(left, right, left_index=True, right_index=True) - left_val right_val - 1 a 1 - 5 b 3 - 10 c 7 - - Here is a real-world times-series example - - >>> quotes - time ticker bid ask - 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 - 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 - 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 - 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 - 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 - 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 - 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 - 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 - - >>> trades - time ticker price quantity - 0 2016-05-25 13:30:00.023 MSFT 51.95 75 - 1 2016-05-25 13:30:00.038 MSFT 51.95 155 - 2 2016-05-25 13:30:00.048 GOOG 720.77 100 - 3 2016-05-25 13:30:00.048 GOOG 720.92 100 - 4 2016-05-25 13:30:00.048 AAPL 98.00 100 - - By default we are taking the asof of the quotes - - >>> pd.merge_asof(trades, quotes, - ... on='time', - ... by='ticker') - time ticker price quantity bid ask - 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 - 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 - 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 - 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 - 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN - - We only asof within 2ms betwen the quote time and the trade time - - >>> pd.merge_asof(trades, quotes, - ... on='time', - ... by='ticker', - ... tolerance=pd.Timedelta('2ms')) - time ticker price quantity bid ask - 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 - 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN - 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 - 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 - 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN - - We only asof within 10ms betwen the quote time and the trade time - and we exclude exact matches on time. However *prior* data will - propogate forward - - >>> pd.merge_asof(trades, quotes, - ... on='time', - ... by='ticker', - ... tolerance=pd.Timedelta('10ms'), - ... allow_exact_matches=False) - time ticker price quantity bid ask - 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN - 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 - 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 - 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 - 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN - - See also - -------- - merge - merge_ordered - - """ - op = _AsOfMerge(left, right, - on=on, left_on=left_on, right_on=right_on, - left_index=left_index, right_index=right_index, - by=by, left_by=left_by, right_by=right_by, - suffixes=suffixes, - how='asof', tolerance=tolerance, - allow_exact_matches=allow_exact_matches, - direction=direction) - return op.get_result() - - -# TODO: transformations?? -# TODO: only copy DataFrames when modification necessary -class _MergeOperation(object): - """ - Perform a database (SQL) merge operation between two DataFrame objects - using either columns as keys or their row indexes - """ - _merge_type = 'merge' - - def __init__(self, left, right, how='inner', on=None, - left_on=None, right_on=None, axis=1, - left_index=False, right_index=False, sort=True, - suffixes=('_x', '_y'), copy=True, indicator=False): - self.left = self.orig_left = left - self.right = self.orig_right = right - self.how = how - self.axis = axis - - self.on = com._maybe_make_list(on) - self.left_on = com._maybe_make_list(left_on) - self.right_on = com._maybe_make_list(right_on) - - self.copy = copy - self.suffixes = suffixes - self.sort = sort - - self.left_index = left_index - self.right_index = right_index - - self.indicator = indicator - - if isinstance(self.indicator, compat.string_types): - self.indicator_name = self.indicator - elif isinstance(self.indicator, bool): - self.indicator_name = '_merge' if self.indicator else None - else: - raise ValueError( - 'indicator option can only accept boolean or string arguments') - - if not isinstance(left, DataFrame): - raise ValueError( - 'can not merge DataFrame with instance of ' - 'type {0}'.format(type(left))) - if not isinstance(right, DataFrame): - raise ValueError( - 'can not merge DataFrame with instance of ' - 'type {0}'.format(type(right))) - - if not is_bool(left_index): - raise ValueError( - 'left_index parameter must be of type bool, not ' - '{0}'.format(type(left_index))) - if not is_bool(right_index): - raise ValueError( - 'right_index parameter must be of type bool, not ' - '{0}'.format(type(right_index))) - - # warn user when merging between different levels - if left.columns.nlevels != right.columns.nlevels: - msg = ('merging between different levels can give an unintended ' - 'result ({0} levels on the left, {1} on the right)') - msg = msg.format(left.columns.nlevels, right.columns.nlevels) - warnings.warn(msg, UserWarning) - - self._validate_specification() - - # note this function has side effects - (self.left_join_keys, - self.right_join_keys, - self.join_names) = self._get_merge_keys() - - # validate the merge keys dtypes. We may need to coerce - # to avoid incompat dtypes - self._maybe_coerce_merge_keys() - - def get_result(self): - if self.indicator: - self.left, self.right = self._indicator_pre_merge( - self.left, self.right) - - join_index, left_indexer, right_indexer = self._get_join_info() - - ldata, rdata = self.left._data, self.right._data - lsuf, rsuf = self.suffixes - - llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf, - rdata.items, rsuf) - - lindexers = {1: left_indexer} if left_indexer is not None else {} - rindexers = {1: right_indexer} if right_indexer is not None else {} - - result_data = concatenate_block_managers( - [(ldata, lindexers), (rdata, rindexers)], - axes=[llabels.append(rlabels), join_index], - concat_axis=0, copy=self.copy) - - typ = self.left._constructor - result = typ(result_data).__finalize__(self, method=self._merge_type) - - if self.indicator: - result = self._indicator_post_merge(result) - - self._maybe_add_join_keys(result, left_indexer, right_indexer) - - return result - - def _indicator_pre_merge(self, left, right): - - columns = left.columns.union(right.columns) - - for i in ['_left_indicator', '_right_indicator']: - if i in columns: - raise ValueError("Cannot use `indicator=True` option when " - "data contains a column named {}".format(i)) - if self.indicator_name in columns: - raise ValueError( - "Cannot use name of an existing column for indicator column") - - left = left.copy() - right = right.copy() - - left['_left_indicator'] = 1 - left['_left_indicator'] = left['_left_indicator'].astype('int8') - - right['_right_indicator'] = 2 - right['_right_indicator'] = right['_right_indicator'].astype('int8') - - return left, right - - def _indicator_post_merge(self, result): - - result['_left_indicator'] = result['_left_indicator'].fillna(0) - result['_right_indicator'] = result['_right_indicator'].fillna(0) - - result[self.indicator_name] = Categorical((result['_left_indicator'] + - result['_right_indicator']), - categories=[1, 2, 3]) - result[self.indicator_name] = ( - result[self.indicator_name] - .cat.rename_categories(['left_only', 'right_only', 'both'])) - - result = result.drop(labels=['_left_indicator', '_right_indicator'], - axis=1) - return result - - def _maybe_add_join_keys(self, result, left_indexer, right_indexer): - - left_has_missing = None - right_has_missing = None - - keys = zip(self.join_names, self.left_on, self.right_on) - for i, (name, lname, rname) in enumerate(keys): - if not _should_fill(lname, rname): - continue - - take_left, take_right = None, None - - if name in result: - - if left_indexer is not None and right_indexer is not None: - if name in self.left: - - if left_has_missing is None: - left_has_missing = (left_indexer == -1).any() - - if left_has_missing: - take_right = self.right_join_keys[i] - - if not is_dtype_equal(result[name].dtype, - self.left[name].dtype): - take_left = self.left[name]._values - - elif name in self.right: - - if right_has_missing is None: - right_has_missing = (right_indexer == -1).any() - - if right_has_missing: - take_left = self.left_join_keys[i] - - if not is_dtype_equal(result[name].dtype, - self.right[name].dtype): - take_right = self.right[name]._values - - elif left_indexer is not None \ - and isinstance(self.left_join_keys[i], np.ndarray): - - take_left = self.left_join_keys[i] - take_right = self.right_join_keys[i] - - if take_left is not None or take_right is not None: - - if take_left is None: - lvals = result[name]._values - else: - lfill = na_value_for_dtype(take_left.dtype) - lvals = algos.take_1d(take_left, left_indexer, - fill_value=lfill) - - if take_right is None: - rvals = result[name]._values - else: - rfill = na_value_for_dtype(take_right.dtype) - rvals = algos.take_1d(take_right, right_indexer, - fill_value=rfill) - - # if we have an all missing left_indexer - # make sure to just use the right values - mask = left_indexer == -1 - if mask.all(): - key_col = rvals - else: - key_col = Index(lvals).where(~mask, rvals) - - if name in result: - result[name] = key_col - else: - result.insert(i, name or 'key_%d' % i, key_col) - - def _get_join_indexers(self): - """ return the join indexers """ - return _get_join_indexers(self.left_join_keys, - self.right_join_keys, - sort=self.sort, - how=self.how) - - def _get_join_info(self): - left_ax = self.left._data.axes[self.axis] - right_ax = self.right._data.axes[self.axis] - - if self.left_index and self.right_index and self.how != 'asof': - join_index, left_indexer, right_indexer = \ - left_ax.join(right_ax, how=self.how, return_indexers=True, - sort=self.sort) - elif self.right_index and self.how == 'left': - join_index, left_indexer, right_indexer = \ - _left_join_on_index(left_ax, right_ax, self.left_join_keys, - sort=self.sort) - - elif self.left_index and self.how == 'right': - join_index, right_indexer, left_indexer = \ - _left_join_on_index(right_ax, left_ax, self.right_join_keys, - sort=self.sort) - else: - (left_indexer, - right_indexer) = self._get_join_indexers() - - if self.right_index: - if len(self.left) > 0: - join_index = self.left.index.take(left_indexer) - else: - join_index = self.right.index.take(right_indexer) - left_indexer = np.array([-1] * len(join_index)) - elif self.left_index: - if len(self.right) > 0: - join_index = self.right.index.take(right_indexer) - else: - join_index = self.left.index.take(left_indexer) - right_indexer = np.array([-1] * len(join_index)) - else: - join_index = Index(np.arange(len(left_indexer))) - - if len(join_index) == 0: - join_index = join_index.astype(object) - return join_index, left_indexer, right_indexer - - def _get_merge_keys(self): - """ - Note: has side effects (copy/delete key columns) - - Parameters - ---------- - left - right - on - - Returns - ------- - left_keys, right_keys - """ - left_keys = [] - right_keys = [] - join_names = [] - right_drop = [] - left_drop = [] - left, right = self.left, self.right - - is_lkey = lambda x: isinstance( - x, (np.ndarray, Series)) and len(x) == len(left) - is_rkey = lambda x: isinstance( - x, (np.ndarray, Series)) and len(x) == len(right) - - # Note that pd.merge_asof() has separate 'on' and 'by' parameters. A - # user could, for example, request 'left_index' and 'left_by'. In a - # regular pd.merge(), users cannot specify both 'left_index' and - # 'left_on'. (Instead, users have a MultiIndex). That means the - # self.left_on in this function is always empty in a pd.merge(), but - # a pd.merge_asof(left_index=True, left_by=...) will result in a - # self.left_on array with a None in the middle of it. This requires - # a work-around as designated in the code below. - # See _validate_specification() for where this happens. - - # ugh, spaghetti re #733 - if _any(self.left_on) and _any(self.right_on): - for lk, rk in zip(self.left_on, self.right_on): - if is_lkey(lk): - left_keys.append(lk) - if is_rkey(rk): - right_keys.append(rk) - join_names.append(None) # what to do? - else: - if rk is not None: - right_keys.append(right[rk]._values) - join_names.append(rk) - else: - # work-around for merge_asof(right_index=True) - right_keys.append(right.index) - join_names.append(right.index.name) - else: - if not is_rkey(rk): - if rk is not None: - right_keys.append(right[rk]._values) - else: - # work-around for merge_asof(right_index=True) - right_keys.append(right.index) - if lk is not None and lk == rk: - # avoid key upcast in corner case (length-0) - if len(left) > 0: - right_drop.append(rk) - else: - left_drop.append(lk) - else: - right_keys.append(rk) - if lk is not None: - left_keys.append(left[lk]._values) - join_names.append(lk) - else: - # work-around for merge_asof(left_index=True) - left_keys.append(left.index) - join_names.append(left.index.name) - elif _any(self.left_on): - for k in self.left_on: - if is_lkey(k): - left_keys.append(k) - join_names.append(None) - else: - left_keys.append(left[k]._values) - join_names.append(k) - if isinstance(self.right.index, MultiIndex): - right_keys = [lev._values.take(lab) - for lev, lab in zip(self.right.index.levels, - self.right.index.labels)] - else: - right_keys = [self.right.index.values] - elif _any(self.right_on): - for k in self.right_on: - if is_rkey(k): - right_keys.append(k) - join_names.append(None) - else: - right_keys.append(right[k]._values) - join_names.append(k) - if isinstance(self.left.index, MultiIndex): - left_keys = [lev._values.take(lab) - for lev, lab in zip(self.left.index.levels, - self.left.index.labels)] - else: - left_keys = [self.left.index.values] - - if left_drop: - self.left = self.left.drop(left_drop, axis=1) - - if right_drop: - self.right = self.right.drop(right_drop, axis=1) - - return left_keys, right_keys, join_names - - def _maybe_coerce_merge_keys(self): - # we have valid mergee's but we may have to further - # coerce these if they are originally incompatible types - # - # for example if these are categorical, but are not dtype_equal - # or if we have object and integer dtypes - - for lk, rk, name in zip(self.left_join_keys, - self.right_join_keys, - self.join_names): - if (len(lk) and not len(rk)) or (not len(lk) and len(rk)): - continue - - # if either left or right is a categorical - # then the must match exactly in categories & ordered - if is_categorical_dtype(lk) and is_categorical_dtype(rk): - if lk.is_dtype_equal(rk): - continue - elif is_categorical_dtype(lk) or is_categorical_dtype(rk): - pass - - elif is_dtype_equal(lk.dtype, rk.dtype): - continue - - # if we are numeric, then allow differing - # kinds to proceed, eg. int64 and int8 - # further if we are object, but we infer to - # the same, then proceed - if (is_numeric_dtype(lk) and is_numeric_dtype(rk)): - if lk.dtype.kind == rk.dtype.kind: - continue - - # let's infer and see if we are ok - if lib.infer_dtype(lk) == lib.infer_dtype(rk): - continue - - # Houston, we have a problem! - # let's coerce to object - if name in self.left.columns: - self.left = self.left.assign( - **{name: self.left[name].astype(object)}) - if name in self.right.columns: - self.right = self.right.assign( - **{name: self.right[name].astype(object)}) - - def _validate_specification(self): - # Hm, any way to make this logic less complicated?? - if self.on is None and self.left_on is None and self.right_on is None: - - if self.left_index and self.right_index: - self.left_on, self.right_on = (), () - elif self.left_index: - if self.right_on is None: - raise MergeError('Must pass right_on or right_index=True') - elif self.right_index: - if self.left_on is None: - raise MergeError('Must pass left_on or left_index=True') - else: - # use the common columns - common_cols = self.left.columns.intersection( - self.right.columns) - if len(common_cols) == 0: - raise MergeError('No common columns to perform merge on') - if not common_cols.is_unique: - raise MergeError("Data columns not unique: %s" - % repr(common_cols)) - self.left_on = self.right_on = common_cols - elif self.on is not None: - if self.left_on is not None or self.right_on is not None: - raise MergeError('Can only pass argument "on" OR "left_on" ' - 'and "right_on", not a combination of both.') - self.left_on = self.right_on = self.on - elif self.left_on is not None: - n = len(self.left_on) - if self.right_index: - if len(self.left_on) != self.right.index.nlevels: - raise ValueError('len(left_on) must equal the number ' - 'of levels in the index of "right"') - self.right_on = [None] * n - elif self.right_on is not None: - n = len(self.right_on) - if self.left_index: - if len(self.right_on) != self.left.index.nlevels: - raise ValueError('len(right_on) must equal the number ' - 'of levels in the index of "left"') - self.left_on = [None] * n - if len(self.right_on) != len(self.left_on): - raise ValueError("len(right_on) must equal len(left_on)") - - -def _get_join_indexers(left_keys, right_keys, sort=False, how='inner', - **kwargs): - """ - - Parameters - ---------- - left_keys: ndarray, Index, Series - right_keys: ndarray, Index, Series - sort: boolean, default False - how: string {'inner', 'outer', 'left', 'right'}, default 'inner' - - Returns - ------- - tuple of (left_indexer, right_indexer) - indexers into the left_keys, right_keys - - """ - from functools import partial - - assert len(left_keys) == len(right_keys), \ - 'left_key and right_keys must be the same length' - - # bind `sort` arg. of _factorize_keys - fkeys = partial(_factorize_keys, sort=sort) - - # get left & right join labels and num. of levels at each location - llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys))) - - # get flat i8 keys from label lists - lkey, rkey = _get_join_keys(llab, rlab, shape, sort) - - # factorize keys to a dense i8 space - # `count` is the num. of unique keys - # set(lkey) | set(rkey) == range(count) - lkey, rkey, count = fkeys(lkey, rkey) - - # preserve left frame order if how == 'left' and sort == False - kwargs = copy.copy(kwargs) - if how == 'left': - kwargs['sort'] = sort - join_func = _join_functions[how] - - return join_func(lkey, rkey, count, **kwargs) - - -class _OrderedMerge(_MergeOperation): - _merge_type = 'ordered_merge' - - def __init__(self, left, right, on=None, left_on=None, right_on=None, - left_index=False, right_index=False, axis=1, - suffixes=('_x', '_y'), copy=True, - fill_method=None, how='outer'): - - self.fill_method = fill_method - _MergeOperation.__init__(self, left, right, on=on, left_on=left_on, - left_index=left_index, - right_index=right_index, - right_on=right_on, axis=axis, - how=how, suffixes=suffixes, - sort=True # factorize sorts - ) - - def get_result(self): - join_index, left_indexer, right_indexer = self._get_join_info() - - # this is a bit kludgy - ldata, rdata = self.left._data, self.right._data - lsuf, rsuf = self.suffixes - - llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf, - rdata.items, rsuf) - - if self.fill_method == 'ffill': - left_join_indexer = libjoin.ffill_indexer(left_indexer) - right_join_indexer = libjoin.ffill_indexer(right_indexer) - else: - left_join_indexer = left_indexer - right_join_indexer = right_indexer - - lindexers = { - 1: left_join_indexer} if left_join_indexer is not None else {} - rindexers = { - 1: right_join_indexer} if right_join_indexer is not None else {} - - result_data = concatenate_block_managers( - [(ldata, lindexers), (rdata, rindexers)], - axes=[llabels.append(rlabels), join_index], - concat_axis=0, copy=self.copy) - - typ = self.left._constructor - result = typ(result_data).__finalize__(self, method=self._merge_type) - - self._maybe_add_join_keys(result, left_indexer, right_indexer) - - return result - - -def _asof_function(direction, on_type): - return getattr(libjoin, 'asof_join_%s_%s' % (direction, on_type), None) - - -def _asof_by_function(direction, on_type, by_type): - return getattr(libjoin, 'asof_join_%s_%s_by_%s' % - (direction, on_type, by_type), None) - - -_type_casters = { - 'int64_t': _ensure_int64, - 'double': _ensure_float64, - 'object': _ensure_object, -} - -_cython_types = { - 'uint8': 'uint8_t', - 'uint32': 'uint32_t', - 'uint16': 'uint16_t', - 'uint64': 'uint64_t', - 'int8': 'int8_t', - 'int32': 'int32_t', - 'int16': 'int16_t', - 'int64': 'int64_t', - 'float16': 'error', - 'float32': 'float', - 'float64': 'double', -} - - -def _get_cython_type(dtype): - """ Given a dtype, return a C name like 'int64_t' or 'double' """ - type_name = _get_dtype(dtype).name - ctype = _cython_types.get(type_name, 'object') - if ctype == 'error': - raise MergeError('unsupported type: ' + type_name) - return ctype - - -def _get_cython_type_upcast(dtype): - """ Upcast a dtype to 'int64_t', 'double', or 'object' """ - if is_integer_dtype(dtype): - return 'int64_t' - elif is_float_dtype(dtype): - return 'double' - else: - return 'object' - - -class _AsOfMerge(_OrderedMerge): - _merge_type = 'asof_merge' - - def __init__(self, left, right, on=None, left_on=None, right_on=None, - left_index=False, right_index=False, - by=None, left_by=None, right_by=None, - axis=1, suffixes=('_x', '_y'), copy=True, - fill_method=None, - how='asof', tolerance=None, - allow_exact_matches=True, - direction='backward'): - - self.by = by - self.left_by = left_by - self.right_by = right_by - self.tolerance = tolerance - self.allow_exact_matches = allow_exact_matches - self.direction = direction - - _OrderedMerge.__init__(self, left, right, on=on, left_on=left_on, - right_on=right_on, left_index=left_index, - right_index=right_index, axis=axis, - how=how, suffixes=suffixes, - fill_method=fill_method) - - def _validate_specification(self): - super(_AsOfMerge, self)._validate_specification() - - # we only allow on to be a single item for on - if len(self.left_on) != 1 and not self.left_index: - raise MergeError("can only asof on a key for left") - - if len(self.right_on) != 1 and not self.right_index: - raise MergeError("can only asof on a key for right") - - if self.left_index and isinstance(self.left.index, MultiIndex): - raise MergeError("left can only have one index") - - if self.right_index and isinstance(self.right.index, MultiIndex): - raise MergeError("right can only have one index") - - # set 'by' columns - if self.by is not None: - if self.left_by is not None or self.right_by is not None: - raise MergeError('Can only pass by OR left_by ' - 'and right_by') - self.left_by = self.right_by = self.by - if self.left_by is None and self.right_by is not None: - raise MergeError('missing left_by') - if self.left_by is not None and self.right_by is None: - raise MergeError('missing right_by') - - # add 'by' to our key-list so we can have it in the - # output as a key - if self.left_by is not None: - if not is_list_like(self.left_by): - self.left_by = [self.left_by] - if not is_list_like(self.right_by): - self.right_by = [self.right_by] - - if len(self.left_by) != len(self.right_by): - raise MergeError('left_by and right_by must be same length') - - self.left_on = self.left_by + list(self.left_on) - self.right_on = self.right_by + list(self.right_on) - - # check 'direction' is valid - if self.direction not in ['backward', 'forward', 'nearest']: - raise MergeError('direction invalid: ' + self.direction) - - @property - def _asof_key(self): - """ This is our asof key, the 'on' """ - return self.left_on[-1] - - def _get_merge_keys(self): - - # note this function has side effects - (left_join_keys, - right_join_keys, - join_names) = super(_AsOfMerge, self)._get_merge_keys() - - # validate index types are the same - for lk, rk in zip(left_join_keys, right_join_keys): - if not is_dtype_equal(lk.dtype, rk.dtype): - raise MergeError("incompatible merge keys, " - "must be the same type") - - # validate tolerance; must be a Timedelta if we have a DTI - if self.tolerance is not None: - - if self.left_index: - lt = self.left.index - else: - lt = left_join_keys[-1] - - msg = "incompatible tolerance, must be compat " \ - "with type {0}".format(type(lt)) - - if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt): - if not isinstance(self.tolerance, Timedelta): - raise MergeError(msg) - if self.tolerance < Timedelta(0): - raise MergeError("tolerance must be positive") - - elif is_int64_dtype(lt): - if not is_integer(self.tolerance): - raise MergeError(msg) - if self.tolerance < 0: - raise MergeError("tolerance must be positive") - - else: - raise MergeError("key must be integer or timestamp") - - # validate allow_exact_matches - if not is_bool(self.allow_exact_matches): - raise MergeError("allow_exact_matches must be boolean, " - "passed {0}".format(self.allow_exact_matches)) - - return left_join_keys, right_join_keys, join_names - - def _get_join_indexers(self): - """ return the join indexers """ - - def flip(xs): - """ unlike np.transpose, this returns an array of tuples """ - labels = list(string.ascii_lowercase[:len(xs)]) - dtypes = [x.dtype for x in xs] - labeled_dtypes = list(zip(labels, dtypes)) - return np.array(lzip(*xs), labeled_dtypes) - - # values to compare - left_values = (self.left.index.values if self.left_index else - self.left_join_keys[-1]) - right_values = (self.right.index.values if self.right_index else - self.right_join_keys[-1]) - tolerance = self.tolerance - - # we required sortedness in the join keys - msg = " keys must be sorted" - if not Index(left_values).is_monotonic: - raise ValueError('left' + msg) - if not Index(right_values).is_monotonic: - raise ValueError('right' + msg) - - # initial type conversion as needed - if needs_i8_conversion(left_values): - left_values = left_values.view('i8') - right_values = right_values.view('i8') - if tolerance is not None: - tolerance = tolerance.value - - # a "by" parameter requires special handling - if self.left_by is not None: - # remove 'on' parameter from values if one existed - if self.left_index and self.right_index: - left_by_values = self.left_join_keys - right_by_values = self.right_join_keys - else: - left_by_values = self.left_join_keys[0:-1] - right_by_values = self.right_join_keys[0:-1] - - # get tuple representation of values if more than one - if len(left_by_values) == 1: - left_by_values = left_by_values[0] - right_by_values = right_by_values[0] - else: - left_by_values = flip(left_by_values) - right_by_values = flip(right_by_values) - - # upcast 'by' parameter because HashTable is limited - by_type = _get_cython_type_upcast(left_by_values.dtype) - by_type_caster = _type_casters[by_type] - left_by_values = by_type_caster(left_by_values) - right_by_values = by_type_caster(right_by_values) - - # choose appropriate function by type - on_type = _get_cython_type(left_values.dtype) - func = _asof_by_function(self.direction, on_type, by_type) - return func(left_values, - right_values, - left_by_values, - right_by_values, - self.allow_exact_matches, - tolerance) - else: - # choose appropriate function by type - on_type = _get_cython_type(left_values.dtype) - func = _asof_function(self.direction, on_type) - return func(left_values, - right_values, - self.allow_exact_matches, - tolerance) - - -def _get_multiindex_indexer(join_keys, index, sort): - from functools import partial - - # bind `sort` argument - fkeys = partial(_factorize_keys, sort=sort) - - # left & right join labels and num. of levels at each location - rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys))) - if sort: - rlab = list(map(np.take, rlab, index.labels)) - else: - i8copy = lambda a: a.astype('i8', subok=False, copy=True) - rlab = list(map(i8copy, index.labels)) - - # fix right labels if there were any nulls - for i in range(len(join_keys)): - mask = index.labels[i] == -1 - if mask.any(): - # check if there already was any nulls at this location - # if there was, it is factorized to `shape[i] - 1` - a = join_keys[i][llab[i] == shape[i] - 1] - if a.size == 0 or not a[0] != a[0]: - shape[i] += 1 - - rlab[i][mask] = shape[i] - 1 - - # get flat i8 join keys - lkey, rkey = _get_join_keys(llab, rlab, shape, sort) - - # factorize keys to a dense i8 space - lkey, rkey, count = fkeys(lkey, rkey) - - return libjoin.left_outer_join(lkey, rkey, count, sort=sort) - - -def _get_single_indexer(join_key, index, sort=False): - left_key, right_key, count = _factorize_keys(join_key, index, sort=sort) - - left_indexer, right_indexer = libjoin.left_outer_join( - _ensure_int64(left_key), - _ensure_int64(right_key), - count, sort=sort) - - return left_indexer, right_indexer - - -def _left_join_on_index(left_ax, right_ax, join_keys, sort=False): - if len(join_keys) > 1: - if not ((isinstance(right_ax, MultiIndex) and - len(join_keys) == right_ax.nlevels)): - raise AssertionError("If more than one join key is given then " - "'right_ax' must be a MultiIndex and the " - "number of join keys must be the number of " - "levels in right_ax") - - left_indexer, right_indexer = \ - _get_multiindex_indexer(join_keys, right_ax, sort=sort) - else: - jkey = join_keys[0] - - left_indexer, right_indexer = \ - _get_single_indexer(jkey, right_ax, sort=sort) - - if sort or len(left_ax) != len(left_indexer): - # if asked to sort or there are 1-to-many matches - join_index = left_ax.take(left_indexer) - return join_index, left_indexer, right_indexer - - # left frame preserves order & length of its index - return left_ax, None, right_indexer - - -def _right_outer_join(x, y, max_groups): - right_indexer, left_indexer = libjoin.left_outer_join(y, x, max_groups) - return left_indexer, right_indexer - - -_join_functions = { - 'inner': libjoin.inner_join, - 'left': libjoin.left_outer_join, - 'right': _right_outer_join, - 'outer': libjoin.full_outer_join, -} - - -def _factorize_keys(lk, rk, sort=True): - if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk): - lk = lk.values - rk = rk.values - - # if we exactly match in categories, allow us to use codes - if (is_categorical_dtype(lk) and - is_categorical_dtype(rk) and - lk.is_dtype_equal(rk)): - return lk.codes, rk.codes, len(lk.categories) - - if is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk): - klass = libhashtable.Int64Factorizer - lk = _ensure_int64(com._values_from_object(lk)) - rk = _ensure_int64(com._values_from_object(rk)) - else: - klass = libhashtable.Factorizer - lk = _ensure_object(lk) - rk = _ensure_object(rk) - - rizer = klass(max(len(lk), len(rk))) - - llab = rizer.factorize(lk) - rlab = rizer.factorize(rk) - - count = rizer.get_count() - - if sort: - uniques = rizer.uniques.to_array() - llab, rlab = _sort_labels(uniques, llab, rlab) - - # NA group - lmask = llab == -1 - lany = lmask.any() - rmask = rlab == -1 - rany = rmask.any() - - if lany or rany: - if lany: - np.putmask(llab, lmask, count) - if rany: - np.putmask(rlab, rmask, count) - count += 1 - - return llab, rlab, count - - -def _sort_labels(uniques, left, right): - if not isinstance(uniques, np.ndarray): - # tuplesafe - uniques = Index(uniques).values - - l = len(left) - labels = np.concatenate([left, right]) - - _, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1) - new_labels = _ensure_int64(new_labels) - new_left, new_right = new_labels[:l], new_labels[l:] - - return new_left, new_right - - -def _get_join_keys(llab, rlab, shape, sort): - - # how many levels can be done without overflow - pred = lambda i: not is_int64_overflow_possible(shape[:i]) - nlev = next(filter(pred, range(len(shape), 0, -1))) - - # get keys for the first `nlev` levels - stride = np.prod(shape[1:nlev], dtype='i8') - lkey = stride * llab[0].astype('i8', subok=False, copy=False) - rkey = stride * rlab[0].astype('i8', subok=False, copy=False) - - for i in range(1, nlev): - stride //= shape[i] - lkey += llab[i] * stride - rkey += rlab[i] * stride - - if nlev == len(shape): # all done! - return lkey, rkey - - # densify current keys to avoid overflow - lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort) - - llab = [lkey] + llab[nlev:] - rlab = [rkey] + rlab[nlev:] - shape = [count] + shape[nlev:] - - return _get_join_keys(llab, rlab, shape, sort) - - -def _should_fill(lname, rname): - if (not isinstance(lname, compat.string_types) or - not isinstance(rname, compat.string_types)): - return True - return lname == rname - - -def _any(x): - return x is not None and len(x) > 0 and any([y is not None for y in x]) diff --git a/pandas/tools/util.py b/pandas/tools/util.py deleted file mode 100644 index baf968440858d..0000000000000 --- a/pandas/tools/util.py +++ /dev/null @@ -1,245 +0,0 @@ -import numpy as np -import pandas._libs.lib as lib - -from pandas.core.dtypes.common import ( - is_number, - is_numeric_dtype, - is_datetime_or_timedelta_dtype, - is_list_like, - _ensure_object, - is_decimal, - is_scalar as isscalar) - -from pandas.core.dtypes.cast import maybe_downcast_to_dtype - -import pandas as pd -from pandas.compat import reduce -from pandas.core.index import Index -from pandas.core import common as com - - -def match(needles, haystack): - haystack = Index(haystack) - needles = Index(needles) - return haystack.get_indexer(needles) - - -def cartesian_product(X): - """ - Numpy version of itertools.product or pandas.compat.product. - Sometimes faster (for large inputs)... - - Parameters - ---------- - X : list-like of list-likes - - Returns - ------- - product : list of ndarrays - - Examples - -------- - >>> cartesian_product([list('ABC'), [1, 2]]) - [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'), - array([1, 2, 1, 2, 1, 2])] - - See also - -------- - itertools.product : Cartesian product of input iterables. Equivalent to - nested for-loops. - pandas.compat.product : An alias for itertools.product. - """ - msg = "Input must be a list-like of list-likes" - if not is_list_like(X): - raise TypeError(msg) - for x in X: - if not is_list_like(x): - raise TypeError(msg) - - if len(X) == 0: - return [] - - lenX = np.fromiter((len(x) for x in X), dtype=np.intp) - cumprodX = np.cumproduct(lenX) - - a = np.roll(cumprodX, 1) - a[0] = 1 - - if cumprodX[-1] != 0: - b = cumprodX[-1] / cumprodX - else: - # if any factor is empty, the cartesian product is empty - b = np.zeros_like(cumprodX) - - return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]), - np.product(a[i])) - for i, x in enumerate(X)] - - -def _compose2(f, g): - """Compose 2 callables""" - return lambda *args, **kwargs: f(g(*args, **kwargs)) - - -def compose(*funcs): - """Compose 2 or more callables""" - assert len(funcs) > 1, 'At least 2 callables must be passed to compose' - return reduce(_compose2, funcs) - - -def to_numeric(arg, errors='raise', downcast=None): - """ - Convert argument to a numeric type. - - Parameters - ---------- - arg : list, tuple, 1-d array, or Series - errors : {'ignore', 'raise', 'coerce'}, default 'raise' - - If 'raise', then invalid parsing will raise an exception - - If 'coerce', then invalid parsing will be set as NaN - - If 'ignore', then invalid parsing will return the input - downcast : {'integer', 'signed', 'unsigned', 'float'} , default None - If not None, and if the data has been successfully cast to a - numerical dtype (or if the data was numeric to begin with), - downcast that resulting data to the smallest numerical dtype - possible according to the following rules: - - - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - - 'float': smallest float dtype (min.: np.float32) - - As this behaviour is separate from the core conversion to - numeric values, any errors raised during the downcasting - will be surfaced regardless of the value of the 'errors' input. - - In addition, downcasting will only occur if the size - of the resulting data's dtype is strictly larger than - the dtype it is to be cast to, so if none of the dtypes - checked satisfy that specification, no downcasting will be - performed on the data. - - .. versionadded:: 0.19.0 - - Returns - ------- - ret : numeric if parsing succeeded. - Return type depends on input. Series if Series, otherwise ndarray - - Examples - -------- - Take separate series and convert to numeric, coercing when told to - - >>> import pandas as pd - >>> s = pd.Series(['1.0', '2', -3]) - >>> pd.to_numeric(s) - 0 1.0 - 1 2.0 - 2 -3.0 - dtype: float64 - >>> pd.to_numeric(s, downcast='float') - 0 1.0 - 1 2.0 - 2 -3.0 - dtype: float32 - >>> pd.to_numeric(s, downcast='signed') - 0 1 - 1 2 - 2 -3 - dtype: int8 - >>> s = pd.Series(['apple', '1.0', '2', -3]) - >>> pd.to_numeric(s, errors='ignore') - 0 apple - 1 1.0 - 2 2 - 3 -3 - dtype: object - >>> pd.to_numeric(s, errors='coerce') - 0 NaN - 1 1.0 - 2 2.0 - 3 -3.0 - dtype: float64 - """ - if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'): - raise ValueError('invalid downcasting method provided') - - is_series = False - is_index = False - is_scalar = False - - if isinstance(arg, pd.Series): - is_series = True - values = arg.values - elif isinstance(arg, pd.Index): - is_index = True - values = arg.asi8 - if values is None: - values = arg.values - elif isinstance(arg, (list, tuple)): - values = np.array(arg, dtype='O') - elif isscalar(arg): - if is_decimal(arg): - return float(arg) - if is_number(arg): - return arg - is_scalar = True - values = np.array([arg], dtype='O') - elif getattr(arg, 'ndim', 1) > 1: - raise TypeError('arg must be a list, tuple, 1-d array, or Series') - else: - values = arg - - try: - if is_numeric_dtype(values): - pass - elif is_datetime_or_timedelta_dtype(values): - values = values.astype(np.int64) - else: - values = _ensure_object(values) - coerce_numeric = False if errors in ('ignore', 'raise') else True - values = lib.maybe_convert_numeric(values, set(), - coerce_numeric=coerce_numeric) - - except Exception: - if errors == 'raise': - raise - - # attempt downcast only if the data has been successfully converted - # to a numerical dtype and if a downcast method has been specified - if downcast is not None and is_numeric_dtype(values): - typecodes = None - - if downcast in ('integer', 'signed'): - typecodes = np.typecodes['Integer'] - elif downcast == 'unsigned' and np.min(values) >= 0: - typecodes = np.typecodes['UnsignedInteger'] - elif downcast == 'float': - typecodes = np.typecodes['Float'] - - # pandas support goes only to np.float32, - # as float dtypes smaller than that are - # extremely rare and not well supported - float_32_char = np.dtype(np.float32).char - float_32_ind = typecodes.index(float_32_char) - typecodes = typecodes[float_32_ind:] - - if typecodes is not None: - # from smallest to largest - for dtype in typecodes: - if np.dtype(dtype).itemsize <= values.dtype.itemsize: - values = maybe_downcast_to_dtype(values, dtype) - - # successful conversion - if values.dtype == dtype: - break - - if is_series: - return pd.Series(values, index=arg.index, name=arg.name) - elif is_index: - # because we want to coerce to numeric if possible, - # do not use _shallow_copy_with_infer - return Index(values, name=arg.name) - elif is_scalar: - return values[0] - else: - return values diff --git a/setup.py b/setup.py index 6fc66e2355c0f..69b9a974b9935 100755 --- a/setup.py +++ b/setup.py @@ -642,6 +642,7 @@ def pxd(name): 'pandas.core.dtypes', 'pandas.core.indexes', 'pandas.core.computation', + 'pandas.core.reshape', 'pandas.core.sparse', 'pandas.errors', 'pandas.io', @@ -673,7 +674,6 @@ def pxd(name): 'pandas.tests.series', 'pandas.tests.scalar', 'pandas.tests.tseries', - 'pandas.tests.tools', 'pandas.tests.plotting', 'pandas.tools', 'pandas.tseries', @@ -703,7 +703,7 @@ def pxd(name): 'data/html_encoding/*.html', 'json/data/*.json'], 'pandas.tests.io.formats': ['data/*.csv'], - 'pandas.tests.tools': ['data/*.csv'], + 'pandas.tests.reshape': ['data/*.csv'], 'pandas.tests.tseries': ['data/*.pickle'], 'pandas.io.formats': ['templates/*.tpl'] }, From c8dafb5a7ae9fe42b9d15c47082a6fb139e78b5d Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Tue, 18 Apr 2017 12:17:07 +0000 Subject: [PATCH 53/56] CLN: reorg pandas.tseries (#16040) * CLN: move pandas/tseries/resample.py -> pandas/core/resample.py closes #13634 * CLN: move pandas.tseries.period -> pandas.core.indexes.period * CLN: move pandas.tseries.tdi -> pandas.core.indexes.timedeltas * CLN: move pandas.tseries.base -> pandas.core.indexes.datetimelike * CLN: pandas.tseries.common -> pandas.core.indexes.accessors * CLN: move pandas.tseries.index -> pandas.core.indexes.datetimes * CLN: move pandas.tseries.timedeltas, pandas.tseries.tools -> pandas.core.tools * move to_numeric to pandas.core.tools.numeric --- doc/source/api.rst | 2 +- pandas/_libs/period.pyx | 2 +- pandas/_libs/tslib.pyx | 2 +- pandas/compat/pickle_compat.py | 8 +- pandas/core/api.py | 17 +- pandas/core/computation/pytables.py | 2 +- pandas/core/datetools.py | 2 +- pandas/core/dtypes/cast.py | 171 +------- pandas/core/frame.py | 6 +- pandas/core/generic.py | 12 +- pandas/core/groupby.py | 6 +- .../common.py => core/indexes/accessors.py} | 6 +- pandas/core/indexes/api.py | 11 +- pandas/core/indexes/base.py | 20 +- .../base.py => core/indexes/datetimelike.py} | 9 +- .../index.py => core/indexes/datetimes.py} | 16 +- pandas/{tseries => core/indexes}/period.py | 16 +- .../tdi.py => core/indexes/timedeltas.py} | 9 +- pandas/core/internals.py | 2 +- pandas/core/ops.py | 4 +- pandas/{tseries => core}/resample.py | 6 +- pandas/core/series.py | 10 +- pandas/core/tools/__init__.py | 0 .../tools.py => core/tools/datetimes.py} | 2 +- pandas/core/tools/numeric.py | 170 ++++++++ pandas/{tseries => core/tools}/timedeltas.py | 0 pandas/io/excel.py | 2 +- pandas/io/formats/format.py | 6 +- pandas/io/parsers.py | 2 +- pandas/io/sql.py | 2 +- pandas/plotting/_converter.py | 6 +- pandas/plotting/_core.py | 2 +- pandas/plotting/_timeseries.py | 8 +- pandas/tests/dtypes/test_cast.py | 369 +---------------- pandas/tests/frame/test_alter_axes.py | 7 +- pandas/tests/frame/test_analytics.py | 2 +- pandas/tests/frame/test_timeseries.py | 2 +- pandas/tests/groupby/test_groupby.py | 5 +- pandas/tests/groupby/test_timegrouper.py | 4 +- pandas/tests/indexes/common.py | 9 +- .../indexes/datetimes/test_date_range.py | 2 +- pandas/tests/indexes/datetimes/test_ops.py | 2 +- pandas/tests/indexes/datetimes/test_setops.py | 2 +- pandas/tests/indexes/datetimes/test_tools.py | 4 +- .../tests/indexes/period/test_construction.py | 2 +- pandas/tests/indexes/period/test_ops.py | 2 +- pandas/tests/indexes/period/test_setops.py | 2 +- pandas/tests/indexes/period/test_tools.py | 2 +- pandas/tests/indexes/test_base.py | 2 +- pandas/tests/io/json/test_ujson.py | 2 +- pandas/tests/io/parser/parse_dates.py | 4 +- pandas/tests/io/test_sql.py | 2 +- pandas/tests/plotting/test_datetimelike.py | 8 +- pandas/tests/reshape/test_concat.py | 2 +- pandas/tests/scalar/test_period.py | 2 +- pandas/tests/scalar/test_timedelta.py | 2 +- pandas/tests/series/test_analytics.py | 4 +- pandas/tests/series/test_api.py | 2 +- pandas/tests/series/test_combine_concat.py | 2 +- pandas/tests/series/test_constructors.py | 2 +- pandas/tests/series/test_datetime_values.py | 6 +- pandas/tests/series/test_internals.py | 2 +- pandas/tests/series/test_operators.py | 4 +- pandas/tests/series/test_period.py | 2 +- pandas/tests/series/test_quantile.py | 2 +- pandas/tests/series/test_timeseries.py | 6 +- pandas/tests/sparse/test_frame.py | 2 +- pandas/tests/test_base.py | 2 +- pandas/tests/test_categorical.py | 2 +- pandas/tests/{tseries => }/test_resample.py | 10 +- pandas/tests/tools/__init__.py | 0 pandas/tests/tools/test_numeric.py | 371 ++++++++++++++++++ pandas/tests/tseries/test_frequencies.py | 4 +- pandas/tests/tseries/test_offsets.py | 8 +- pandas/tests/tseries/test_timezones.py | 4 +- pandas/tseries/api.py | 6 - pandas/tseries/offsets.py | 2 +- setup.py | 2 + 78 files changed, 727 insertions(+), 697 deletions(-) rename pandas/{tseries/common.py => core/indexes/accessors.py} (97%) rename pandas/{tseries/base.py => core/indexes/datetimelike.py} (99%) rename pandas/{tseries/index.py => core/indexes/datetimes.py} (99%) rename pandas/{tseries => core/indexes}/period.py (98%) rename pandas/{tseries/tdi.py => core/indexes/timedeltas.py} (99%) rename pandas/{tseries => core}/resample.py (99%) mode change 100755 => 100644 create mode 100644 pandas/core/tools/__init__.py rename pandas/{tseries/tools.py => core/tools/datetimes.py} (99%) create mode 100644 pandas/core/tools/numeric.py rename pandas/{tseries => core/tools}/timedeltas.py (100%) rename pandas/tests/{tseries => }/test_resample.py (99%) mode change 100755 => 100644 create mode 100644 pandas/tests/tools/__init__.py create mode 100644 pandas/tests/tools/test_numeric.py diff --git a/doc/source/api.rst b/doc/source/api.rst index 868f0d7f9c962..caa5498db1ebf 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1761,7 +1761,7 @@ The following methods are available only for ``DataFrameGroupBy`` objects. Resampling ---------- -.. currentmodule:: pandas.tseries.resample +.. currentmodule:: pandas.core.resample Resampler objects are returned by resample calls: :func:`pandas.DataFrame.resample`, :func:`pandas.Series.resample`. diff --git a/pandas/_libs/period.pyx b/pandas/_libs/period.pyx index f30035910a62f..1db31387de5a7 100644 --- a/pandas/_libs/period.pyx +++ b/pandas/_libs/period.pyx @@ -34,7 +34,7 @@ from tslib cimport ( ) from pandas.tseries import offsets -from pandas.tseries.tools import parse_time_string +from pandas.core.tools.datetimes import parse_time_string from pandas.tseries import frequencies cdef int64_t NPY_NAT = util.get_nat() diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 47679966e3d5c..c471d46262484 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -502,7 +502,7 @@ class Timestamp(_Timestamp): """ Return an period of which this timestamp is an observation. """ - from pandas.tseries.period import Period + from pandas import Period if freq is None: freq = self.freq diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index f7d451ce7c92f..6df365a1cd898 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -94,7 +94,13 @@ def load_reduce(self): ('pandas.indexes.range', 'RangeIndex'): ('pandas.core.indexes.range', 'RangeIndex'), ('pandas.indexes.multi', 'MultiIndex'): - ('pandas.core.indexes.multi', 'MultiIndex') + ('pandas.core.indexes.multi', 'MultiIndex'), + ('pandas.tseries.index', '_new_DatetimeIndex'): + ('pandas.core.indexes.datetimes', '_new_DatetimeIndex'), + ('pandas.tseries.index', 'DatetimeIndex'): + ('pandas.core.indexes.datetimes', 'DatetimeIndex'), + ('pandas.tseries.period', 'PeriodIndex'): + ('pandas.core.indexes.period', 'PeriodIndex') } diff --git a/pandas/core/api.py b/pandas/core/api.py index f3191283b85eb..3e84720c32a1c 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -11,7 +11,12 @@ from pandas.io.formats.format import set_eng_float_format from pandas.core.index import (Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, - MultiIndex, IntervalIndex) + MultiIndex, IntervalIndex, + TimedeltaIndex, DatetimeIndex, + PeriodIndex, NaT) +from pandas.core.indexes.period import Period, period_range, pnow +from pandas.core.indexes.timedeltas import Timedelta, timedelta_range +from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range from pandas.core.indexes.interval import Interval, interval_range from pandas.core.series import Series @@ -23,13 +28,11 @@ lreshape, wide_to_long) from pandas.core.indexing import IndexSlice -from pandas.core.dtypes.cast import to_numeric +from pandas.core.tools.numeric import to_numeric from pandas.tseries.offsets import DateOffset -from pandas.tseries.tools import to_datetime -from pandas.tseries.index import (DatetimeIndex, Timestamp, - date_range, bdate_range) -from pandas.tseries.tdi import TimedeltaIndex, Timedelta -from pandas.tseries.period import Period, PeriodIndex +from pandas.core.tools.datetimes import to_datetime +from pandas.core.tools.timedeltas import to_timedelta +from pandas.core.resample import TimeGrouper # see gh-14094. from pandas.util.depr_module import _DeprecatedModule diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 285ff346158a0..5870090856ff9 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -14,7 +14,7 @@ from pandas.core.computation.ops import is_term, UndefinedVariableError from pandas.core.computation.expr import BaseExprVisitor from pandas.core.computation.common import _ensure_decoded -from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type +from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type class Scope(expr.Scope): diff --git a/pandas/core/datetools.py b/pandas/core/datetools.py index bfc3f3d4e4743..3444d09c6ed1b 100644 --- a/pandas/core/datetools.py +++ b/pandas/core/datetools.py @@ -4,7 +4,7 @@ import warnings -from pandas.tseries.tools import * +from pandas.core.tools.datetimes import * from pandas.tseries.offsets import * from pandas.tseries.frequencies import * diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 3c1f480787d3a..a5e12e8262579 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -5,7 +5,6 @@ import numpy as np import warnings -import pandas as pd from pandas._libs import tslib, lib from pandas._libs.tslib import iNaT from pandas.compat import string_types, text_type, PY3 @@ -19,8 +18,6 @@ is_integer_dtype, is_datetime_or_timedelta_dtype, is_bool_dtype, is_scalar, - is_numeric_dtype, is_decimal, - is_number, _string_dtypes, _coerce_to_dtype, _ensure_int8, _ensure_int16, @@ -29,7 +26,7 @@ _POSSIBLY_CAST_DTYPES) from .dtypes import ExtensionDtype, DatetimeTZDtype, PeriodDtype from .generic import (ABCDatetimeIndex, ABCPeriodIndex, - ABCSeries, ABCIndexClass) + ABCSeries) from .missing import isnull, notnull from .inference import is_list_like @@ -548,7 +545,7 @@ def coerce_to_dtypes(result, dtypes): if len(result) != len(dtypes): raise AssertionError("_coerce_to_dtypes requires equal len arrays") - from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type + from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type def conv(r, dtype): try: @@ -670,7 +667,7 @@ def maybe_convert_objects(values, convert_dates=True, convert_numeric=True, if convert_timedeltas and values.dtype == np.object_: if convert_timedeltas == 'coerce': - from pandas.tseries.timedeltas import to_timedelta + from pandas.core.tools.timedeltas import to_timedelta new_values = to_timedelta(values, coerce=True) # if we are all nans then leave me alone @@ -872,8 +869,8 @@ def maybe_cast_to_datetime(value, dtype, errors='raise'): """ try to cast the array/value to a datetimelike dtype, converting float nan to iNaT """ - from pandas.tseries.timedeltas import to_timedelta - from pandas.tseries.tools import to_datetime + from pandas.core.tools.timedeltas import to_timedelta + from pandas.core.tools.datetimes import to_datetime if dtype is not None: if isinstance(dtype, string_types): @@ -1029,161 +1026,3 @@ def find_common_type(types): return np.object return np.find_common_type(types, []) - - -def to_numeric(arg, errors='raise', downcast=None): - """ - Convert argument to a numeric type. - - Parameters - ---------- - arg : list, tuple, 1-d array, or Series - errors : {'ignore', 'raise', 'coerce'}, default 'raise' - - If 'raise', then invalid parsing will raise an exception - - If 'coerce', then invalid parsing will be set as NaN - - If 'ignore', then invalid parsing will return the input - downcast : {'integer', 'signed', 'unsigned', 'float'} , default None - If not None, and if the data has been successfully cast to a - numerical dtype (or if the data was numeric to begin with), - downcast that resulting data to the smallest numerical dtype - possible according to the following rules: - - - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - - 'float': smallest float dtype (min.: np.float32) - - As this behaviour is separate from the core conversion to - numeric values, any errors raised during the downcasting - will be surfaced regardless of the value of the 'errors' input. - - In addition, downcasting will only occur if the size - of the resulting data's dtype is strictly larger than - the dtype it is to be cast to, so if none of the dtypes - checked satisfy that specification, no downcasting will be - performed on the data. - - .. versionadded:: 0.19.0 - - Returns - ------- - ret : numeric if parsing succeeded. - Return type depends on input. Series if Series, otherwise ndarray - - Examples - -------- - Take separate series and convert to numeric, coercing when told to - - >>> import pandas as pd - >>> s = pd.Series(['1.0', '2', -3]) - >>> pd.to_numeric(s) - 0 1.0 - 1 2.0 - 2 -3.0 - dtype: float64 - >>> pd.to_numeric(s, downcast='float') - 0 1.0 - 1 2.0 - 2 -3.0 - dtype: float32 - >>> pd.to_numeric(s, downcast='signed') - 0 1 - 1 2 - 2 -3 - dtype: int8 - >>> s = pd.Series(['apple', '1.0', '2', -3]) - >>> pd.to_numeric(s, errors='ignore') - 0 apple - 1 1.0 - 2 2 - 3 -3 - dtype: object - >>> pd.to_numeric(s, errors='coerce') - 0 NaN - 1 1.0 - 2 2.0 - 3 -3.0 - dtype: float64 - """ - if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'): - raise ValueError('invalid downcasting method provided') - - is_series = False - is_index = False - is_scalars = False - - if isinstance(arg, ABCSeries): - is_series = True - values = arg.values - elif isinstance(arg, ABCIndexClass): - is_index = True - values = arg.asi8 - if values is None: - values = arg.values - elif isinstance(arg, (list, tuple)): - values = np.array(arg, dtype='O') - elif is_scalar(arg): - if is_decimal(arg): - return float(arg) - if is_number(arg): - return arg - is_scalars = True - values = np.array([arg], dtype='O') - elif getattr(arg, 'ndim', 1) > 1: - raise TypeError('arg must be a list, tuple, 1-d array, or Series') - else: - values = arg - - try: - if is_numeric_dtype(values): - pass - elif is_datetime_or_timedelta_dtype(values): - values = values.astype(np.int64) - else: - values = _ensure_object(values) - coerce_numeric = False if errors in ('ignore', 'raise') else True - values = lib.maybe_convert_numeric(values, set(), - coerce_numeric=coerce_numeric) - - except Exception: - if errors == 'raise': - raise - - # attempt downcast only if the data has been successfully converted - # to a numerical dtype and if a downcast method has been specified - if downcast is not None and is_numeric_dtype(values): - typecodes = None - - if downcast in ('integer', 'signed'): - typecodes = np.typecodes['Integer'] - elif downcast == 'unsigned' and np.min(values) >= 0: - typecodes = np.typecodes['UnsignedInteger'] - elif downcast == 'float': - typecodes = np.typecodes['Float'] - - # pandas support goes only to np.float32, - # as float dtypes smaller than that are - # extremely rare and not well supported - float_32_char = np.dtype(np.float32).char - float_32_ind = typecodes.index(float_32_char) - typecodes = typecodes[float_32_ind:] - - if typecodes is not None: - # from smallest to largest - for dtype in typecodes: - if np.dtype(dtype).itemsize <= values.dtype.itemsize: - values = maybe_downcast_to_dtype(values, dtype) - - # successful conversion - if values.dtype == dtype: - break - - if is_series: - return pd.Series(values, index=arg.index, name=arg.name) - elif is_index: - # because we want to coerce to numeric if possible, - # do not use _shallow_copy_with_infer - return pd.Index(values, name=arg.name) - elif is_scalars: - return values[0] - else: - return values diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9b9039455b948..153042d4a09c9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -82,9 +82,9 @@ from pandas.util.decorators import Appender, Substitution from pandas.util.validators import validate_bool_kwarg -from pandas.tseries.period import PeriodIndex -from pandas.tseries.index import DatetimeIndex -from pandas.tseries.tdi import TimedeltaIndex +from pandas.core.indexes.period import PeriodIndex +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex import pandas.core.base as base import pandas.core.common as com diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 841df3727e5a6..1555157610609 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -37,8 +37,8 @@ from pandas.core.index import (Index, MultiIndex, _ensure_index, InvalidIndexError) import pandas.core.indexing as indexing -from pandas.tseries.index import DatetimeIndex -from pandas.tseries.period import PeriodIndex, Period +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.period import PeriodIndex, Period from pandas.core.internals import BlockManager import pandas.core.algorithms as algos import pandas.core.common as com @@ -4363,7 +4363,7 @@ def asfreq(self, freq, method=None, how=None, normalize=False, To learn more about the frequency strings, please see `this link `__. """ - from pandas.tseries.resample import asfreq + from pandas.core.resample import asfreq return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value) @@ -4573,8 +4573,8 @@ def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, 2000-01-01 00:00:00 0 6 12 18 2000-01-01 00:03:00 0 4 8 12 """ - from pandas.tseries.resample import (resample, - _maybe_process_deprecations) + from pandas.core.resample import (resample, + _maybe_process_deprecations) axis = self._get_axis_number(axis) r = resample(self, freq=rule, label=label, closed=closed, axis=axis, kind=kind, loffset=loffset, @@ -5361,7 +5361,7 @@ def truncate(self, before=None, after=None, axis=None, copy=True): # if we have a date index, convert to dates, otherwise # treat like a slice if ax.is_all_dates: - from pandas.tseries.tools import to_datetime + from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 8f788aed3950d..1f715c685c27e 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -232,7 +232,7 @@ class Grouper(object): def __new__(cls, *args, **kwargs): if kwargs.get('freq') is not None: - from pandas.tseries.resample import TimeGrouper + from pandas.core.resample import TimeGrouper cls = TimeGrouper return super(Grouper, cls).__new__(cls) @@ -1227,7 +1227,7 @@ def resample(self, rule, *args, **kwargs): Provide resampling when using a TimeGrouper Return a new grouper with our resampler appended """ - from pandas.tseries.resample import get_resampler_for_grouping + from pandas.core.resample import get_resampler_for_grouping return get_resampler_for_grouping(self, rule, *args, **kwargs) @Substitution(name='groupby') @@ -3509,7 +3509,7 @@ def _decide_output_index(self, output, labels): def _wrap_applied_output(self, keys, values, not_indexed_same=False): from pandas.core.index import _all_indexes_same - from pandas.core.dtypes.cast import to_numeric + from pandas.core.tools.numeric import to_numeric if len(keys) == 0: return DataFrame(index=keys) diff --git a/pandas/tseries/common.py b/pandas/core/indexes/accessors.py similarity index 97% rename from pandas/tseries/common.py rename to pandas/core/indexes/accessors.py index 2154cfd4b2857..f1fb9a8ad93a7 100644 --- a/pandas/tseries/common.py +++ b/pandas/core/indexes/accessors.py @@ -12,10 +12,10 @@ is_list_like) from pandas.core.base import PandasDelegate, NoNewAttributesMixin -from pandas.tseries.index import DatetimeIndex +from pandas.core.indexes.datetimes import DatetimeIndex from pandas._libs.period import IncompatibleFrequency # noqa -from pandas.tseries.period import PeriodIndex -from pandas.tseries.tdi import TimedeltaIndex +from pandas.core.indexes.period import PeriodIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.algorithms import take_1d diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index d40f6da4c4ee5..d90c681abc03f 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -7,16 +7,21 @@ from pandas.core.indexes.numeric import (NumericIndex, Float64Index, # noqa Int64Index, UInt64Index) from pandas.core.indexes.range import RangeIndex # noqa +from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.indexes.period import PeriodIndex +from pandas.core.indexes.datetimes import DatetimeIndex import pandas.core.common as com -import pandas._libs.lib as lib +from pandas._libs import lib +from pandas._libs.tslib import NaT # TODO: there are many places that rely on these private methods existing in # pandas.core.index __all__ = ['Index', 'MultiIndex', 'NumericIndex', 'Float64Index', 'Int64Index', 'CategoricalIndex', 'IntervalIndex', 'RangeIndex', 'UInt64Index', - 'InvalidIndexError', - '_new_Index', + 'InvalidIndexError', 'TimedeltaIndex', + 'PeriodIndex', 'DatetimeIndex', + '_new_Index', 'NaT', '_ensure_index', '_get_na_value', '_get_combined_index', '_get_distinct_indexes', '_union_indexes', '_get_consensus_names', diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 705b7a186dced..dcb9f9a144f39 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -91,7 +91,7 @@ def _new_Index(cls, d): # required for backward compat, because PI can't be instantiated with # ordinals through __new__ GH #13277 if issubclass(cls, ABCPeriodIndex): - from pandas.tseries.period import _new_PeriodIndex + from pandas.core.indexes.period import _new_PeriodIndex return _new_PeriodIndex(cls, **d) return cls.__new__(cls, **d) @@ -184,7 +184,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, if (is_datetime64_any_dtype(data) or (dtype is not None and is_datetime64_any_dtype(dtype)) or 'tz' in kwargs): - from pandas.tseries.index import DatetimeIndex + from pandas.core.indexes.datetimes import DatetimeIndex result = DatetimeIndex(data, copy=copy, name=name, dtype=dtype, **kwargs) if dtype is not None and is_dtype_equal(_o_dtype, dtype): @@ -194,7 +194,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, elif (is_timedelta64_dtype(data) or (dtype is not None and is_timedelta64_dtype(dtype))): - from pandas.tseries.tdi import TimedeltaIndex + from pandas.core.indexes.timedeltas import TimedeltaIndex result = TimedeltaIndex(data, copy=copy, name=name, **kwargs) if dtype is not None and _o_dtype == dtype: return Index(result.to_pytimedelta(), dtype=_o_dtype) @@ -250,8 +250,8 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, raise # maybe coerce to a sub-class - from pandas.tseries.period import (PeriodIndex, - IncompatibleFrequency) + from pandas.core.indexes.period import ( + PeriodIndex, IncompatibleFrequency) if isinstance(data, PeriodIndex): return PeriodIndex(data, copy=copy, name=name, **kwargs) if is_signed_integer_dtype(data.dtype): @@ -299,7 +299,8 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, if (lib.is_datetime_with_singletz_array(subarr) or 'tz' in kwargs): # only when subarr has the same tz - from pandas.tseries.index import DatetimeIndex + from pandas.core.indexes.datetimes import ( + DatetimeIndex) try: return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) @@ -307,7 +308,8 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, pass elif inferred.startswith('timedelta'): - from pandas.tseries.tdi import TimedeltaIndex + from pandas.core.indexes.timedeltas import ( + TimedeltaIndex) return TimedeltaIndex(subarr, copy=copy, name=name, **kwargs) elif inferred == 'period': @@ -1009,7 +1011,7 @@ def to_datetime(self, dayfirst=False): warnings.warn("to_datetime is deprecated. Use pd.to_datetime(...)", FutureWarning, stacklevel=2) - from pandas.tseries.index import DatetimeIndex + from pandas.core.indexes.datetimes import DatetimeIndex if self.inferred_type == 'string': from dateutil.parser import parse parser = lambda x: parse(x, dayfirst=dayfirst) @@ -2664,7 +2666,7 @@ def get_indexer_for(self, target, **kwargs): def _maybe_promote(self, other): # A hack, but it works - from pandas.tseries.index import DatetimeIndex + from pandas.core.indexes.datetimes import DatetimeIndex if self.inferred_type == 'date' and isinstance(other, DatetimeIndex): return DatetimeIndex(self), other elif self.inferred_type == 'boolean': diff --git a/pandas/tseries/base.py b/pandas/core/indexes/datetimelike.py similarity index 99% rename from pandas/tseries/base.py rename to pandas/core/indexes/datetimelike.py index 3daa88fe396f6..387209ceb038f 100644 --- a/pandas/tseries/base.py +++ b/pandas/core/indexes/datetimelike.py @@ -27,8 +27,7 @@ Timedelta, Timestamp, iNaT, NaT) from pandas._libs.period import Period -from pandas.core.index import Index -from pandas.core.indexes.base import _index_shared_docs +from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util.decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat import pandas.tseries.frequencies as frequencies @@ -639,7 +638,7 @@ def _add_datetimelike_methods(cls): def __add__(self, other): from pandas.core.index import Index - from pandas.tseries.tdi import TimedeltaIndex + from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.tseries.offsets import DateOffset if isinstance(other, TimedeltaIndex): return self._add_delta(other) @@ -666,8 +665,8 @@ def __add__(self, other): def __sub__(self, other): from pandas.core.index import Index - from pandas.tseries.index import DatetimeIndex - from pandas.tseries.tdi import TimedeltaIndex + from pandas.core.indexes.datetimes import DatetimeIndex + from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.tseries.offsets import DateOffset if isinstance(other, TimedeltaIndex): return self._add_delta(-other) diff --git a/pandas/tseries/index.py b/pandas/core/indexes/datetimes.py similarity index 99% rename from pandas/tseries/index.py rename to pandas/core/indexes/datetimes.py index d9aa72fe065ab..b92368ec1be7b 100644 --- a/pandas/tseries/index.py +++ b/pandas/core/indexes/datetimes.py @@ -29,21 +29,23 @@ from pandas.errors import PerformanceWarning from pandas.core.common import _values_from_object, _maybe_box -from pandas.core.index import Index, Int64Index, Float64Index -from pandas.core.indexes.base import _index_shared_docs +from pandas.core.indexes.base import Index, _index_shared_docs +from pandas.core.indexes.numeric import Int64Index, Float64Index import pandas.compat as compat from pandas.tseries.frequencies import ( to_offset, get_period_alias, Resolution) -from pandas.tseries.base import DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin +from pandas.core.indexes.datetimelike import ( + DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin) from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay -from pandas.tseries.tools import parse_time_string, normalize_date, to_time -from pandas.tseries.timedeltas import to_timedelta +from pandas.core.tools.datetimes import ( + parse_time_string, normalize_date, to_time) +from pandas.core.tools.timedeltas import to_timedelta from pandas.util.decorators import (Appender, cache_readonly, deprecate_kwarg, Substitution) import pandas.core.common as com import pandas.tseries.offsets as offsets -import pandas.tseries.tools as tools +import pandas.core.tools.datetimes as tools from pandas._libs import (lib, index as libindex, tslib as libts, algos as libalgos, join as libjoin, @@ -927,7 +929,7 @@ def to_period(self, freq=None): """ Cast to PeriodIndex at a particular frequency """ - from pandas.tseries.period import PeriodIndex + from pandas.core.indexes.period import PeriodIndex if freq is None: freq = self.freqstr or self.inferred_freq diff --git a/pandas/tseries/period.py b/pandas/core/indexes/period.py similarity index 98% rename from pandas/tseries/period.py rename to pandas/core/indexes/period.py index b19e086b818f0..378661a49e20d 100644 --- a/pandas/tseries/period.py +++ b/pandas/core/indexes/period.py @@ -24,10 +24,10 @@ import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc -from pandas.tseries.index import DatetimeIndex, Int64Index, Index -from pandas.tseries.tdi import TimedeltaIndex -from pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin -from pandas.tseries.tools import parse_time_string +from pandas.core.indexes.datetimes import DatetimeIndex, Int64Index, Index +from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.indexes.datetimelike import DatelikeOps, DatetimeIndexOpsMixin +from pandas.core.tools.datetimes import parse_time_string import pandas.tseries.offsets as offsets from pandas._libs.lib import infer_dtype @@ -528,17 +528,17 @@ def asfreq(self, freq=None, how='E'): -------- >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') >>> pidx - + [2010, ..., 2015] Length: 6, Freq: A-DEC >>> pidx.asfreq('M') - + [2010-12, ..., 2015-12] Length: 6, Freq: M >>> pidx.asfreq('M', how='S') - + [2010-01, ..., 2015-01] Length: 6, Freq: M """ @@ -1154,7 +1154,7 @@ def pnow(freq=None): # deprecation, xref #13790 import warnings - warnings.warn("pd.pnow() and pandas.tseries.period.pnow() " + warnings.warn("pd.pnow() and pandas.core.indexes.period.pnow() " "are deprecated. Please use Period.now()", FutureWarning, stacklevel=2) return Period.now(freq=freq) diff --git a/pandas/tseries/tdi.py b/pandas/core/indexes/timedeltas.py similarity index 99% rename from pandas/tseries/tdi.py rename to pandas/core/indexes/timedeltas.py index 7768b4a340775..1081787b2c0b0 100644 --- a/pandas/tseries/tdi.py +++ b/pandas/core/indexes/timedeltas.py @@ -17,7 +17,8 @@ from pandas.core.dtypes.generic import ABCSeries from pandas.core.common import _maybe_box, _values_from_object, is_bool_indexer -from pandas.core.index import Index, Int64Index +from pandas.core.indexes.base import Index +from pandas.core.indexes.numeric import Int64Index import pandas.compat as compat from pandas.compat import u from pandas.tseries.frequencies import to_offset @@ -27,9 +28,9 @@ import pandas.core.common as com import pandas.core.dtypes.concat as _concat from pandas.util.decorators import Appender, Substitution, deprecate_kwarg -from pandas.tseries.base import TimelikeOps, DatetimeIndexOpsMixin -from pandas.tseries.timedeltas import (to_timedelta, - _coerce_scalar_to_timedelta_type) +from pandas.core.indexes.datetimelike import TimelikeOps, DatetimeIndexOpsMixin +from pandas.core.tools.timedeltas import ( + to_timedelta, _coerce_scalar_to_timedelta_type) from pandas.tseries.offsets import Tick, DateOffset from pandas._libs import (lib, index as libindex, tslib as libts, join as libjoin, Timedelta, NaT, iNaT) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5a87574455a63..f265f5f438280 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -54,7 +54,7 @@ from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import maybe_convert_indices, length_of_indexer from pandas.core.categorical import Categorical, maybe_to_categorical -from pandas.tseries.index import DatetimeIndex +from pandas.core.indexes.datetimes import DatetimeIndex from pandas.io.formats.printing import pprint_thing import pandas.core.missing as missing diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 50815498f40df..41a17a0957cbf 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -442,7 +442,7 @@ def _validate(self, lvalues, rvalues, name): def _convert_to_array(self, values, name=None, other=None): """converts values to ndarray""" - from pandas.tseries.timedeltas import to_timedelta + from pandas.core.tools.timedeltas import to_timedelta ovalues = values supplied_dtype = None @@ -508,7 +508,7 @@ def _convert_to_array(self, values, name=None, other=None): return values def _convert_for_datetime(self, lvalues, rvalues): - from pandas.tseries.timedeltas import to_timedelta + from pandas.core.tools.timedeltas import to_timedelta mask = isnull(lvalues) | isnull(rvalues) diff --git a/pandas/tseries/resample.py b/pandas/core/resample.py old mode 100755 new mode 100644 similarity index 99% rename from pandas/tseries/resample.py rename to pandas/core/resample.py index 2856b54ad9a8c..203ae0cb17e02 --- a/pandas/tseries/resample.py +++ b/pandas/core/resample.py @@ -10,10 +10,10 @@ SeriesGroupBy, groupby, PanelGroupBy) from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod -from pandas.tseries.index import DatetimeIndex, date_range -from pandas.tseries.tdi import TimedeltaIndex +from pandas.core.indexes.datetimes import DatetimeIndex, date_range +from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.tseries.offsets import DateOffset, Tick, Day, _delta_to_nanoseconds -from pandas.tseries.period import PeriodIndex, period_range +from pandas.core.indexes.period import PeriodIndex, period_range import pandas.core.common as com import pandas.core.algorithms as algos diff --git a/pandas/core/series.py b/pandas/core/series.py index 69a2b35d88460..8a2351527856d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -52,11 +52,11 @@ from pandas.core.internals import SingleBlockManager from pandas.core.categorical import Categorical, CategoricalAccessor import pandas.core.strings as strings -from pandas.tseries.common import (maybe_to_datetimelike, - CombinedDatetimelikeProperties) -from pandas.tseries.index import DatetimeIndex -from pandas.tseries.tdi import TimedeltaIndex -from pandas.tseries.period import PeriodIndex +from pandas.core.indexes.accessors import ( + maybe_to_datetimelike, CombinedDatetimelikeProperties) +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.indexes.period import PeriodIndex from pandas import compat from pandas.util.terminal import get_terminal_size from pandas.compat import zip, u, OrderedDict, StringIO diff --git a/pandas/core/tools/__init__.py b/pandas/core/tools/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tseries/tools.py b/pandas/core/tools/datetimes.py similarity index 99% rename from pandas/tseries/tools.py rename to pandas/core/tools/datetimes.py index db7aa5974e562..9c02a6212c412 100644 --- a/pandas/tseries/tools.py +++ b/pandas/core/tools/datetimes.py @@ -336,7 +336,7 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, 2 1960-01-04 """ - from pandas.tseries.index import DatetimeIndex + from pandas.core.indexes.datetimes import DatetimeIndex tz = 'utc' if utc else None diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py new file mode 100644 index 0000000000000..eda88a2f7e474 --- /dev/null +++ b/pandas/core/tools/numeric.py @@ -0,0 +1,170 @@ +import numpy as np +import pandas as pd +from pandas.core.dtypes.common import ( + is_scalar, + is_numeric_dtype, + is_decimal, + is_datetime_or_timedelta_dtype, + is_number, + _ensure_object) +from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass +from pandas.core.dtypes.cast import maybe_downcast_to_dtype +from pandas._libs import lib + + +def to_numeric(arg, errors='raise', downcast=None): + """ + Convert argument to a numeric type. + + Parameters + ---------- + arg : list, tuple, 1-d array, or Series + errors : {'ignore', 'raise', 'coerce'}, default 'raise' + - If 'raise', then invalid parsing will raise an exception + - If 'coerce', then invalid parsing will be set as NaN + - If 'ignore', then invalid parsing will return the input + downcast : {'integer', 'signed', 'unsigned', 'float'} , default None + If not None, and if the data has been successfully cast to a + numerical dtype (or if the data was numeric to begin with), + downcast that resulting data to the smallest numerical dtype + possible according to the following rules: + + - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) + - 'unsigned': smallest unsigned int dtype (min.: np.uint8) + - 'float': smallest float dtype (min.: np.float32) + + As this behaviour is separate from the core conversion to + numeric values, any errors raised during the downcasting + will be surfaced regardless of the value of the 'errors' input. + + In addition, downcasting will only occur if the size + of the resulting data's dtype is strictly larger than + the dtype it is to be cast to, so if none of the dtypes + checked satisfy that specification, no downcasting will be + performed on the data. + + .. versionadded:: 0.19.0 + + Returns + ------- + ret : numeric if parsing succeeded. + Return type depends on input. Series if Series, otherwise ndarray + + Examples + -------- + Take separate series and convert to numeric, coercing when told to + + >>> import pandas as pd + >>> s = pd.Series(['1.0', '2', -3]) + >>> pd.to_numeric(s) + 0 1.0 + 1 2.0 + 2 -3.0 + dtype: float64 + >>> pd.to_numeric(s, downcast='float') + 0 1.0 + 1 2.0 + 2 -3.0 + dtype: float32 + >>> pd.to_numeric(s, downcast='signed') + 0 1 + 1 2 + 2 -3 + dtype: int8 + >>> s = pd.Series(['apple', '1.0', '2', -3]) + >>> pd.to_numeric(s, errors='ignore') + 0 apple + 1 1.0 + 2 2 + 3 -3 + dtype: object + >>> pd.to_numeric(s, errors='coerce') + 0 NaN + 1 1.0 + 2 2.0 + 3 -3.0 + dtype: float64 + """ + if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'): + raise ValueError('invalid downcasting method provided') + + is_series = False + is_index = False + is_scalars = False + + if isinstance(arg, ABCSeries): + is_series = True + values = arg.values + elif isinstance(arg, ABCIndexClass): + is_index = True + values = arg.asi8 + if values is None: + values = arg.values + elif isinstance(arg, (list, tuple)): + values = np.array(arg, dtype='O') + elif is_scalar(arg): + if is_decimal(arg): + return float(arg) + if is_number(arg): + return arg + is_scalars = True + values = np.array([arg], dtype='O') + elif getattr(arg, 'ndim', 1) > 1: + raise TypeError('arg must be a list, tuple, 1-d array, or Series') + else: + values = arg + + try: + if is_numeric_dtype(values): + pass + elif is_datetime_or_timedelta_dtype(values): + values = values.astype(np.int64) + else: + values = _ensure_object(values) + coerce_numeric = False if errors in ('ignore', 'raise') else True + values = lib.maybe_convert_numeric(values, set(), + coerce_numeric=coerce_numeric) + + except Exception: + if errors == 'raise': + raise + + # attempt downcast only if the data has been successfully converted + # to a numerical dtype and if a downcast method has been specified + if downcast is not None and is_numeric_dtype(values): + typecodes = None + + if downcast in ('integer', 'signed'): + typecodes = np.typecodes['Integer'] + elif downcast == 'unsigned' and np.min(values) >= 0: + typecodes = np.typecodes['UnsignedInteger'] + elif downcast == 'float': + typecodes = np.typecodes['Float'] + + # pandas support goes only to np.float32, + # as float dtypes smaller than that are + # extremely rare and not well supported + float_32_char = np.dtype(np.float32).char + float_32_ind = typecodes.index(float_32_char) + typecodes = typecodes[float_32_ind:] + + if typecodes is not None: + # from smallest to largest + for dtype in typecodes: + if np.dtype(dtype).itemsize <= values.dtype.itemsize: + values = maybe_downcast_to_dtype(values, dtype) + + # successful conversion + if values.dtype == dtype: + break + + if is_series: + return pd.Series(values, index=arg.index, name=arg.name) + elif is_index: + # because we want to coerce to numeric if possible, + # do not use _shallow_copy_with_infer + return pd.Index(values, name=arg.name) + elif is_scalars: + return values[0] + else: + return values diff --git a/pandas/tseries/timedeltas.py b/pandas/core/tools/timedeltas.py similarity index 100% rename from pandas/tseries/timedeltas.py rename to pandas/core/tools/timedeltas.py diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 637635a64d4d0..fbb10ebdfc56d 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -19,7 +19,7 @@ from pandas.errors import EmptyDataError from pandas.io.common import (_is_url, _urlopen, _validate_header_arg, get_filepath_or_buffer, _NA_VALUES) -from pandas.tseries.period import Period +from pandas.core.indexes.period import Period from pandas.io.json import libjson from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass, string_types, OrderedDict) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 6fbcbe7d645e1..d618fab08309f 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -38,8 +38,8 @@ import pandas._libs.lib as lib from pandas._libs.tslib import (iNaT, Timestamp, Timedelta, format_array_from_datetime) -from pandas.tseries.index import DatetimeIndex -from pandas.tseries.period import PeriodIndex +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.period import PeriodIndex import pandas as pd import numpy as np @@ -2314,7 +2314,7 @@ def _format_strings(self): class PeriodArrayFormatter(IntArrayFormatter): def _format_strings(self): - from pandas.tseries.period import IncompatibleFrequency + from pandas.core.indexes.period import IncompatibleFrequency try: values = PeriodIndex(self.values).to_native_types() except IncompatibleFrequency: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index f2449e3064867..79595818b7387 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -35,7 +35,7 @@ _get_handle, UnicodeReader, UTF8Recoder, BaseIterator, _NA_VALUES, _infer_compression) -from pandas.tseries import tools +from pandas.core.tools import datetimes as tools from pandas.util.decorators import Appender diff --git a/pandas/io/sql.py b/pandas/io/sql.py index de47a8ad5401f..ee992c6dd3439 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -22,7 +22,7 @@ string_types, text_type) from pandas.core.api import DataFrame, Series from pandas.core.base import PandasObject -from pandas.tseries.tools import to_datetime +from pandas.core.tools.datetimes import to_datetime from contextlib import contextmanager diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 9621ee3d0cad4..97295dfa7baf1 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -26,11 +26,11 @@ from pandas.core.index import Index from pandas.core.series import Series -from pandas.tseries.index import date_range -import pandas.tseries.tools as tools +from pandas.core.indexes.datetimes import date_range +import pandas.core.tools.datetimes as tools import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import FreqGroup -from pandas.tseries.period import Period, PeriodIndex +from pandas.core.indexes.period import Period, PeriodIndex from pandas.plotting._compat import _mpl_le_2_0_0 diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 934c05ba5f130..c3476d1443fc3 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -21,7 +21,7 @@ from pandas.core.generic import _shared_docs, _shared_doc_kwargs from pandas.core.index import Index, MultiIndex from pandas.core.series import Series, remove_na -from pandas.tseries.period import PeriodIndex +from pandas.core.indexes.period import PeriodIndex from pandas.compat import range, lrange, map, zip, string_types import pandas.compat as compat from pandas.io.formats.printing import pprint_thing diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index f8c7c1ee9ee10..3d04973ed0009 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -3,12 +3,12 @@ import numpy as np from matplotlib import pylab -from pandas.tseries.period import Period +from pandas.core.indexes.period import Period from pandas.tseries.offsets import DateOffset import pandas.tseries.frequencies as frequencies -from pandas.tseries.index import DatetimeIndex -from pandas.tseries.period import PeriodIndex -from pandas.tseries.tdi import TimedeltaIndex +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.period import PeriodIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.io.formats.printing import pprint_thing import pandas.compat as compat diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index e59784d233367..f3fdc54d4a3cc 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -6,13 +6,10 @@ """ import pytest -import decimal from datetime import datetime, timedelta, date import numpy as np -import pandas as pd -from pandas import (Timedelta, Timestamp, DatetimeIndex, - to_numeric, _np_version_under1p9) +from pandas import Timedelta, Timestamp, DatetimeIndex from pandas.core.dtypes.cast import ( maybe_downcast_to_dtype, @@ -28,8 +25,6 @@ PeriodDtype) from pandas.util import testing as tm -from numpy import iinfo - class TestMaybeDowncast(tm.TestCase): @@ -327,365 +322,3 @@ def test_period_dtype(self): np.dtype('datetime64[ns]'), np.object, np.int64]: self.assertEqual(find_common_type([dtype, dtype2]), np.object) self.assertEqual(find_common_type([dtype2, dtype]), np.object) - - -class TestToNumeric(tm.TestCase): - - def test_series(self): - s = pd.Series(['1', '-3.14', '7']) - res = to_numeric(s) - expected = pd.Series([1, -3.14, 7]) - tm.assert_series_equal(res, expected) - - s = pd.Series(['1', '-3.14', 7]) - res = to_numeric(s) - tm.assert_series_equal(res, expected) - - def test_series_numeric(self): - s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX') - res = to_numeric(s) - tm.assert_series_equal(res, s) - - s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX') - res = to_numeric(s) - tm.assert_series_equal(res, s) - - # bool is regarded as numeric - s = pd.Series([True, False, True, True], - index=list('ABCD'), name='XXX') - res = to_numeric(s) - tm.assert_series_equal(res, s) - - def test_error(self): - s = pd.Series([1, -3.14, 'apple']) - msg = 'Unable to parse string "apple" at position 2' - with tm.assertRaisesRegexp(ValueError, msg): - to_numeric(s, errors='raise') - - res = to_numeric(s, errors='ignore') - expected = pd.Series([1, -3.14, 'apple']) - tm.assert_series_equal(res, expected) - - res = to_numeric(s, errors='coerce') - expected = pd.Series([1, -3.14, np.nan]) - tm.assert_series_equal(res, expected) - - s = pd.Series(['orange', 1, -3.14, 'apple']) - msg = 'Unable to parse string "orange" at position 0' - with tm.assertRaisesRegexp(ValueError, msg): - to_numeric(s, errors='raise') - - def test_error_seen_bool(self): - s = pd.Series([True, False, 'apple']) - msg = 'Unable to parse string "apple" at position 2' - with tm.assertRaisesRegexp(ValueError, msg): - to_numeric(s, errors='raise') - - res = to_numeric(s, errors='ignore') - expected = pd.Series([True, False, 'apple']) - tm.assert_series_equal(res, expected) - - # coerces to float - res = to_numeric(s, errors='coerce') - expected = pd.Series([1., 0., np.nan]) - tm.assert_series_equal(res, expected) - - def test_list(self): - s = ['1', '-3.14', '7'] - res = to_numeric(s) - expected = np.array([1, -3.14, 7]) - tm.assert_numpy_array_equal(res, expected) - - def test_list_numeric(self): - s = [1, 3, 4, 5] - res = to_numeric(s) - tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64)) - - s = [1., 3., 4., 5.] - res = to_numeric(s) - tm.assert_numpy_array_equal(res, np.array(s)) - - # bool is regarded as numeric - s = [True, False, True, True] - res = to_numeric(s) - tm.assert_numpy_array_equal(res, np.array(s)) - - def test_numeric(self): - s = pd.Series([1, -3.14, 7], dtype='O') - res = to_numeric(s) - expected = pd.Series([1, -3.14, 7]) - tm.assert_series_equal(res, expected) - - s = pd.Series([1, -3.14, 7]) - res = to_numeric(s) - tm.assert_series_equal(res, expected) - - # GH 14827 - df = pd.DataFrame(dict( - a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'], - b=[1.0, 2.0, 3.0, 4.0], - )) - expected = pd.DataFrame(dict( - a=[1.2, 3.14, np.inf, 0.1], - b=[1.0, 2.0, 3.0, 4.0], - )) - - # Test to_numeric over one column - df_copy = df.copy() - df_copy['a'] = df_copy['a'].apply(to_numeric) - tm.assert_frame_equal(df_copy, expected) - - # Test to_numeric over multiple columns - df_copy = df.copy() - df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric) - tm.assert_frame_equal(df_copy, expected) - - def test_numeric_lists_and_arrays(self): - # Test to_numeric with embedded lists and arrays - df = pd.DataFrame(dict( - a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1] - )) - df['a'] = df['a'].apply(to_numeric) - expected = pd.DataFrame(dict( - a=[[3.14, 1.0], 1.6, 0.1], - )) - tm.assert_frame_equal(df, expected) - - df = pd.DataFrame(dict( - a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1] - )) - df['a'] = df['a'].apply(to_numeric) - expected = pd.DataFrame(dict( - a=[[3.14, 1.0], 0.1], - )) - tm.assert_frame_equal(df, expected) - - def test_all_nan(self): - s = pd.Series(['a', 'b', 'c']) - res = to_numeric(s, errors='coerce') - expected = pd.Series([np.nan, np.nan, np.nan]) - tm.assert_series_equal(res, expected) - - def test_type_check(self): - # GH 11776 - df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']}) - with tm.assertRaisesRegexp(TypeError, "1-d array"): - to_numeric(df) - for errors in ['ignore', 'raise', 'coerce']: - with tm.assertRaisesRegexp(TypeError, "1-d array"): - to_numeric(df, errors=errors) - - def test_scalar(self): - self.assertEqual(pd.to_numeric(1), 1) - self.assertEqual(pd.to_numeric(1.1), 1.1) - - self.assertEqual(pd.to_numeric('1'), 1) - self.assertEqual(pd.to_numeric('1.1'), 1.1) - - with tm.assertRaises(ValueError): - to_numeric('XX', errors='raise') - - self.assertEqual(to_numeric('XX', errors='ignore'), 'XX') - self.assertTrue(np.isnan(to_numeric('XX', errors='coerce'))) - - def test_numeric_dtypes(self): - idx = pd.Index([1, 2, 3], name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, idx) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.values) - - idx = pd.Index([1., np.nan, 3., np.nan], name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, idx) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.values) - - def test_str(self): - idx = pd.Index(['1', '2', '3'], name='xxx') - exp = np.array([1, 2, 3], dtype='int64') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(exp, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(exp, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, exp) - - idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx') - exp = np.array([1.5, 2.7, 3.4]) - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(exp, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(exp, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, exp) - - def test_datetimelike(self): - for tz in [None, 'US/Eastern', 'Asia/Tokyo']: - idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.asi8) - - def test_timedelta(self): - idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.asi8) - - def test_period(self): - idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) - - # ToDo: enable when we can support native PeriodDtype - # res = pd.to_numeric(pd.Series(idx, name='xxx')) - # tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) - - def test_non_hashable(self): - # Test for Bug #13324 - s = pd.Series([[10.0, 2], 1.0, 'apple']) - res = pd.to_numeric(s, errors='coerce') - tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan])) - - res = pd.to_numeric(s, errors='ignore') - tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple'])) - - with self.assertRaisesRegexp(TypeError, "Invalid object type"): - pd.to_numeric(s) - - def test_downcast(self): - # see gh-13352 - mixed_data = ['1', 2, 3] - int_data = [1, 2, 3] - date_data = np.array(['1970-01-02', '1970-01-03', - '1970-01-04'], dtype='datetime64[D]') - - invalid_downcast = 'unsigned-integer' - msg = 'invalid downcasting method provided' - - smallest_int_dtype = np.dtype(np.typecodes['Integer'][0]) - smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0]) - - # support below np.float32 is rare and far between - float_32_char = np.dtype(np.float32).char - smallest_float_dtype = float_32_char - - for data in (mixed_data, int_data, date_data): - with self.assertRaisesRegexp(ValueError, msg): - pd.to_numeric(data, downcast=invalid_downcast) - - expected = np.array([1, 2, 3], dtype=np.int64) - - res = pd.to_numeric(data) - tm.assert_numpy_array_equal(res, expected) - - res = pd.to_numeric(data, downcast=None) - tm.assert_numpy_array_equal(res, expected) - - expected = np.array([1, 2, 3], dtype=smallest_int_dtype) - - for signed_downcast in ('integer', 'signed'): - res = pd.to_numeric(data, downcast=signed_downcast) - tm.assert_numpy_array_equal(res, expected) - - expected = np.array([1, 2, 3], dtype=smallest_uint_dtype) - res = pd.to_numeric(data, downcast='unsigned') - tm.assert_numpy_array_equal(res, expected) - - expected = np.array([1, 2, 3], dtype=smallest_float_dtype) - res = pd.to_numeric(data, downcast='float') - tm.assert_numpy_array_equal(res, expected) - - # if we can't successfully cast the given - # data to a numeric dtype, do not bother - # with the downcast parameter - data = ['foo', 2, 3] - expected = np.array(data, dtype=object) - res = pd.to_numeric(data, errors='ignore', - downcast='unsigned') - tm.assert_numpy_array_equal(res, expected) - - # cannot cast to an unsigned integer because - # we have a negative number - data = ['-1', 2, 3] - expected = np.array([-1, 2, 3], dtype=np.int64) - res = pd.to_numeric(data, downcast='unsigned') - tm.assert_numpy_array_equal(res, expected) - - # cannot cast to an integer (signed or unsigned) - # because we have a float number - data = (['1.1', 2, 3], - [10000.0, 20000, 3000, 40000.36, 50000, 50000.00]) - expected = (np.array([1.1, 2, 3], dtype=np.float64), - np.array([10000.0, 20000, 3000, - 40000.36, 50000, 50000.00], dtype=np.float64)) - - for _data, _expected in zip(data, expected): - for downcast in ('integer', 'signed', 'unsigned'): - res = pd.to_numeric(_data, downcast=downcast) - tm.assert_numpy_array_equal(res, _expected) - - # the smallest integer dtype need not be np.(u)int8 - data = ['256', 257, 258] - - for downcast, expected_dtype in zip( - ['integer', 'signed', 'unsigned'], - [np.int16, np.int16, np.uint16]): - expected = np.array([256, 257, 258], dtype=expected_dtype) - res = pd.to_numeric(data, downcast=downcast) - tm.assert_numpy_array_equal(res, expected) - - def test_downcast_limits(self): - # Test the limits of each downcast. Bug: #14401. - # Check to make sure numpy is new enough to run this test. - if _np_version_under1p9: - pytest.skip("Numpy version is under 1.9") - - i = 'integer' - u = 'unsigned' - dtype_downcast_min_max = [ - ('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]), - ('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]), - ('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]), - ('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]), - ('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]), - ('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]), - ('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]), - ('uint64', u, [iinfo(np.uint64).min, iinfo(np.uint64).max]), - ('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]), - ('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]), - ('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]), - ('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]), - ('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]), - ('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]), - ('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]), - ('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]), - ('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]) - ] - - for dtype, downcast, min_max in dtype_downcast_min_max: - series = pd.to_numeric(pd.Series(min_max), downcast=downcast) - assert series.dtype == dtype diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index ce4dd6d38eeeb..3133a6883eb6f 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -8,7 +8,8 @@ from pandas.compat import lrange from pandas import (DataFrame, Series, Index, MultiIndex, - RangeIndex, date_range, IntervalIndex) + RangeIndex, date_range, IntervalIndex, + to_datetime) from pandas.core.dtypes.common import ( is_object_dtype, is_categorical_dtype, @@ -202,8 +203,8 @@ def test_set_index_cast_datetimeindex(self): # don't cast a DatetimeIndex WITH a tz, leave as object # GH 6032 i = (pd.DatetimeIndex( - pd.tseries.tools.to_datetime(['2013-1-1 13:00', - '2013-1-2 14:00'], errors="raise")) + to_datetime(['2013-1-1 13:00', + '2013-1-2 14:00'], errors="raise")) .tz_localize('US/Pacific')) df = DataFrame(np.random.randn(2, 1), columns=['A']) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index e165e30c59f0f..979493b95a253 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -873,7 +873,7 @@ def test_operators_timedelta64(self): mixed['F'] = Timestamp('20130101') # results in an object array - from pandas.tseries.timedeltas import ( + from pandas.core.tools.timedeltas import ( _coerce_scalar_to_timedelta_type as _coerce) result = mixed.min() diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 37b6f0c261789..7765bac55fb1f 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -547,7 +547,7 @@ def test_datetime_assignment_with_NaT_and_diff_time_units(self): def test_frame_to_period(self): K = 5 - from pandas.tseries.period import period_range + from pandas.core.indexes.period import period_range dr = date_range('1/1/2000', '1/1/2001') pr = period_range('1/1/2000', '1/1/2001') diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index f486c70d86f9d..880737392d037 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -8,7 +8,7 @@ from pandas import (date_range, bdate_range, Timestamp, isnull, Index, MultiIndex, DataFrame, Series, - concat, Panel) + concat, Panel, DatetimeIndex) from pandas.errors import UnsupportedFunctionCall, PerformanceWarning from pandas.util.testing import (assert_panel_equal, assert_frame_equal, assert_series_equal, assert_almost_equal, @@ -3305,7 +3305,6 @@ def test_groupby_sort_multiindex_series(self): assert_series_equal(result, mseries_result.sort_index()) def test_groupby_reindex_inside_function(self): - from pandas.tseries.api import DatetimeIndex periods = 1000 ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods) @@ -3559,7 +3558,7 @@ def test_groupby_with_empty(self): index = pd.DatetimeIndex(()) data = () series = pd.Series(data, index) - grouper = pd.tseries.resample.TimeGrouper('D') + grouper = pd.core.resample.TimeGrouper('D') grouped = series.groupby(grouper) assert next(iter(grouped), None) is None diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index 3142b74b56778..f97f59cd92262 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -5,7 +5,8 @@ from numpy import nan import pandas as pd -from pandas import DataFrame, date_range, Index, Series, MultiIndex, Timestamp +from pandas import (DataFrame, date_range, Index, + Series, MultiIndex, Timestamp, DatetimeIndex) from pandas.compat import StringIO from pandas.util import testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal @@ -361,7 +362,6 @@ def sumfunc_value(x): def test_groupby_groups_datetimeindex(self): # #1430 - from pandas.tseries.api import DatetimeIndex periods = 1000 ind = DatetimeIndex(start='2012/1/1', freq='5min', periods=periods) df = DataFrame({'high': np.arange(periods), diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 9003a3707e417..d53f131820dea 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -9,6 +9,7 @@ RangeIndex, MultiIndex, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, IntervalIndex, notnull, isnull) +from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin from pandas.core.dtypes.common import needs_i8_conversion from pandas.util.testing import assertRaisesRegexp from pandas._libs.tslib import iNaT @@ -789,7 +790,7 @@ def test_numpy_ufuncs(self): np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, np.arcsinh, np.arccosh, np.arctanh, np.deg2rad, np.rad2deg]: - if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin): + if isinstance(idx, DatetimeIndexOpsMixin): # raise TypeError or ValueError (PeriodIndex) # PeriodIndex behavior should be changed in future version with tm.assertRaises(Exception): @@ -812,7 +813,7 @@ def test_numpy_ufuncs(self): func(idx) for func in [np.isfinite, np.isinf, np.isnan, np.signbit]: - if isinstance(idx, pd.tseries.base.DatetimeIndexOpsMixin): + if isinstance(idx, DatetimeIndexOpsMixin): # raise TypeError or ValueError (PeriodIndex) with tm.assertRaises(Exception): func(idx) @@ -847,7 +848,7 @@ def test_hasnans_isnans(self): if len(index) == 0: continue - elif isinstance(index, pd.tseries.base.DatetimeIndexOpsMixin): + elif isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT elif isinstance(index, (Int64Index, UInt64Index)): continue @@ -887,7 +888,7 @@ def test_fillna(self): idx = index.copy() values = idx.values - if isinstance(index, pd.tseries.base.DatetimeIndexOpsMixin): + if isinstance(index, DatetimeIndexOpsMixin): values[1] = iNaT elif isinstance(index, (Int64Index, UInt64Index)): continue diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 67e82e5c71d75..3eaeda965b217 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -9,7 +9,7 @@ import pandas as pd import pandas.util.testing as tm from pandas import compat -from pandas.tseries.index import bdate_range, cdate_range +from pandas.core.indexes.datetimes import bdate_range, cdate_range from pandas import date_range, offsets, DatetimeIndex, Timestamp from pandas.tseries.offsets import (generate_range, CDay, BDay, DateOffset, MonthEnd) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 8ab29c0c0b6f2..5dcc49cf776db 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -8,7 +8,7 @@ import pandas._libs.tslib as tslib import pandas.util.testing as tm from pandas.errors import PerformanceWarning -from pandas.tseries.index import cdate_range +from pandas.core.indexes.datetimes import cdate_range from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta, date_range, TimedeltaIndex, _np_version_under1p10, Index, datetime, Float64Index, offsets, bdate_range) diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index a1ad147f84aff..2da37f9394407 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -4,7 +4,7 @@ import pandas as pd import pandas.util.testing as tm -from pandas.tseries.index import cdate_range +from pandas.core.indexes.datetimes import cdate_range from pandas import (DatetimeIndex, date_range, Series, bdate_range, DataFrame, Int64Index, Index, to_datetime) from pandas.tseries.offsets import Minute, BMonthEnd, MonthEnd diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index f8eb923d51f75..a250a936b7ca8 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -10,8 +10,8 @@ import pandas as pd from pandas._libs import tslib, lib -from pandas.tseries import tools -from pandas.tseries.tools import normalize_date +from pandas.core.tools import datetimes as tools +from pandas.core.tools.datetimes import normalize_date from pandas.compat import lmap from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.dtypes.common import is_datetime64_ns_dtype diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py index ab70ad59846e8..1340c9cad211b 100644 --- a/pandas/tests/indexes/period/test_construction.py +++ b/pandas/tests/indexes/period/test_construction.py @@ -2,7 +2,7 @@ import pandas as pd import pandas.util.testing as tm -import pandas.tseries.period as period +import pandas.core.indexes.period as period from pandas.compat import lrange, PY3, text_type, lmap from pandas import (Period, PeriodIndex, period_range, offsets, date_range, Series, Index) diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 3b94992f2fe9f..50b2da380fd30 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -4,7 +4,7 @@ import pandas as pd import pandas._libs.tslib as tslib import pandas.util.testing as tm -import pandas.tseries.period as period +import pandas.core.indexes.period as period from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period, _np_version_under1p10, Index, Timedelta, offsets) diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py index d4f06bae8bc32..357eccccf9fe8 100644 --- a/pandas/tests/indexes/period/test_setops.py +++ b/pandas/tests/indexes/period/test_setops.py @@ -2,7 +2,7 @@ import pandas as pd import pandas.util.testing as tm -import pandas.tseries.period as period +import pandas.core.indexes.period as period from pandas import period_range, PeriodIndex, Index, date_range diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index f9a1df3d824f1..32fbf44bd572c 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -3,7 +3,7 @@ import pandas as pd import pandas.util.testing as tm -import pandas.tseries.period as period +import pandas.core.indexes.period as period from pandas.compat import lrange from pandas.tseries.frequencies import get_freq, MONTHS from pandas._libs.period import period_ordinal, period_asfreq diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index de15abe89712a..c1b61bcd2971c 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -21,7 +21,7 @@ import pandas.core.config as cf -from pandas.tseries.index import _to_m8 +from pandas.core.indexes.datetimes import _to_m8 import pandas as pd from pandas._libs.lib import Timestamp diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index dcfa939f84d7e..545165be37178 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -1452,7 +1452,7 @@ def testIndex(self): tm.assert_index_equal(i, outp) def test_datetimeindex(self): - from pandas.tseries.index import date_range + from pandas.core.indexes.datetimes import date_range rng = date_range('1/1/2000', periods=20) diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py index de4e3fbc0d943..8bb1d5ee3972a 100644 --- a/pandas/tests/io/parser/parse_dates.py +++ b/pandas/tests/io/parser/parse_dates.py @@ -15,7 +15,7 @@ import pandas as pd import pandas.io.parsers as parsers -import pandas.tseries.tools as tools +import pandas.core.tools.datetimes as tools import pandas.util.testing as tm import pandas.io.date_converters as conv @@ -23,7 +23,7 @@ from pandas import compat from pandas.compat import parse_date, StringIO, lrange from pandas.compat.numpy import np_array_datetime64_compat -from pandas.tseries.index import date_range +from pandas.core.indexes.datetimes import date_range class ParseDatesTests(object): diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index ce411bb4d5c4e..b4c7f2ba8719e 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -38,7 +38,7 @@ from pandas import date_range, to_datetime, to_timedelta, Timestamp import pandas.compat as compat from pandas.compat import range, lrange, string_types, PY36 -from pandas.tseries.tools import format as date_format +from pandas.core.tools.datetimes import format as date_format import pandas.io.sql as sql from pandas.io.sql import read_sql_table, read_sql_query diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 4beb804acacc5..9946c3475b7a1 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -8,11 +8,11 @@ import numpy as np from pandas import Index, Series, DataFrame from pandas.compat import is_platform_mac -from pandas.tseries.index import date_range, bdate_range -from pandas.tseries.tdi import timedelta_range +from pandas.core.indexes.datetimes import date_range, bdate_range +from pandas.core.indexes.timedeltas import timedelta_range from pandas.tseries.offsets import DateOffset -from pandas.tseries.period import period_range, Period, PeriodIndex -from pandas.tseries.resample import DatetimeIndex +from pandas.core.indexes.period import period_range, Period, PeriodIndex +from pandas.core.resample import DatetimeIndex from pandas.util.testing import assert_series_equal, ensure_clean, slow import pandas.util.testing as tm diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index e6514a1e2e81e..ed194cacb1628 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1503,7 +1503,7 @@ def test_concat_exclude_none(self): self.assertRaises(ValueError, concat, [None, None]) def test_concat_datetime64_block(self): - from pandas.tseries.index import date_range + from pandas.core.indexes.datetimes import date_range rng = date_range('1/1/2000', periods=10) diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/test_period.py index 98af0028469bf..4c6784fb1732b 100644 --- a/pandas/tests/scalar/test_period.py +++ b/pandas/tests/scalar/test_period.py @@ -3,7 +3,7 @@ import pandas as pd import pandas.util.testing as tm -import pandas.tseries.period as period +import pandas.core.indexes.period as period from pandas.compat import text_type, iteritems from pandas.compat.numpy import np_datetime64_compat diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index abdbf29008b7e..227297709098f 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -4,7 +4,7 @@ import pandas as pd import pandas.util.testing as tm -from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct +from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series, to_timedelta, compat) from pandas._libs.tslib import iNaT, NaTType diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index a682e8643d251..671c04880bf5c 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -13,8 +13,8 @@ from pandas import (Series, Categorical, DataFrame, isnull, notnull, bdate_range, date_range, _np_version_under1p10) from pandas.core.index import MultiIndex -from pandas.tseries.index import Timestamp -from pandas.tseries.tdi import Timedelta +from pandas.core.indexes.datetimes import Timestamp +from pandas.core.indexes.timedeltas import Timedelta import pandas.core.config as cf import pandas.core.nanops as nanops diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index faf987c9b3820..25acd304e0a23 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -5,7 +5,7 @@ import pandas as pd from pandas import Index, Series, DataFrame, date_range -from pandas.tseries.index import Timestamp +from pandas.core.indexes.datetimes import Timestamp from pandas.compat import range from pandas import compat diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py index d4e5d36c15c68..15e7d97c7ce32 100644 --- a/pandas/tests/series/test_combine_concat.py +++ b/pandas/tests/series/test_combine_concat.py @@ -204,7 +204,7 @@ def test_concat_empty_series_dtypes(self): self.assertEqual(result.ftype, 'object:dense') def test_combine_first_dt64(self): - from pandas.tseries.tools import to_datetime + from pandas.core.tools.datetimes import to_datetime s0 = to_datetime(Series(["2010", np.NaN])) s1 = to_datetime(Series([np.NaN, "2011"])) rs = s0.combine_first(s1) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 24b2a12d70709..6b16c607e5ee1 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -13,7 +13,7 @@ is_datetime64tz_dtype) from pandas import (Index, Series, isnull, date_range, NaT, period_range, MultiIndex, IntervalIndex) -from pandas.tseries.index import Timestamp, DatetimeIndex +from pandas.core.indexes.datetimes import Timestamp, DatetimeIndex from pandas._libs import lib from pandas._libs.tslib import iNaT diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index 8825ba5607a20..a984f578b0520 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -364,8 +364,8 @@ def test_valid_dt_with_missing_values(self): def test_dt_accessor_api(self): # GH 9322 - from pandas.tseries.common import (CombinedDatetimelikeProperties, - DatetimeProperties) + from pandas.core.indexes.accessors import ( + CombinedDatetimelikeProperties, DatetimeProperties) self.assertIs(Series.dt, CombinedDatetimelikeProperties) s = Series(date_range('2000-01-01', periods=3)) @@ -379,7 +379,7 @@ def test_dt_accessor_api(self): self.assertFalse(hasattr(s, 'dt')) def test_sub_of_datetime_from_TimeSeries(self): - from pandas.tseries.timedeltas import to_timedelta + from pandas.core.tools.timedeltas import to_timedelta from datetime import datetime a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00)) b = datetime(1993, 6, 22, 13, 30) diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py index 4b1c303200739..9ca7645e6f974 100644 --- a/pandas/tests/series/test_internals.py +++ b/pandas/tests/series/test_internals.py @@ -7,7 +7,7 @@ import numpy as np from pandas import Series -from pandas.tseries.index import Timestamp +from pandas.core.indexes.datetimes import Timestamp import pandas._libs.lib as lib from pandas.util.testing import assert_series_equal diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 3d609dec7958a..2e1ae7b81ea20 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -13,8 +13,8 @@ from pandas import (Index, Series, DataFrame, isnull, bdate_range, NaT, date_range, timedelta_range, _np_version_under1p8) -from pandas.tseries.index import Timestamp -from pandas.tseries.tdi import Timedelta +from pandas.core.indexes.datetimes import Timestamp +from pandas.core.indexes.timedeltas import Timedelta import pandas.core.nanops as nanops from pandas.compat import range, zip diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index f1ae7765648ca..354010a5d89ea 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -2,7 +2,7 @@ import pandas as pd import pandas.util.testing as tm -import pandas.tseries.period as period +import pandas.core.indexes.period as period from pandas import Series, period_range, DataFrame, Period diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py index 339d871b63049..e61297bdcce3e 100644 --- a/pandas/tests/series/test_quantile.py +++ b/pandas/tests/series/test_quantile.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import (Index, Series, _np_version_under1p9) -from pandas.tseries.index import Timestamp +from pandas.core.indexes.datetimes import Timestamp from pandas.core.dtypes.common import is_integer import pandas.util.testing as tm diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 0f960a890e72b..0322933e96631 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -8,8 +8,8 @@ import pandas.util.testing as tm from pandas._libs.tslib import iNaT from pandas.compat import lrange, StringIO, product -from pandas.tseries.tdi import TimedeltaIndex -from pandas.tseries.index import DatetimeIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.indexes.datetimes import DatetimeIndex from pandas.tseries.offsets import BDay, BMonthEnd from pandas import (Index, Series, date_range, NaT, concat, DataFrame, Timestamp, to_datetime, offsets, @@ -739,7 +739,7 @@ def test_between_time_formats(self): "%s - %s" % time_string) def test_to_period(self): - from pandas.tseries.period import period_range + from pandas.core.indexes.period import period_range ts = _simple_ts('1/1/2000', '1/1/2001') diff --git a/pandas/tests/sparse/test_frame.py b/pandas/tests/sparse/test_frame.py index 0a58713125a30..6ee8dacf17c62 100644 --- a/pandas/tests/sparse/test_frame.py +++ b/pandas/tests/sparse/test_frame.py @@ -14,7 +14,7 @@ is_float_dtype, is_object_dtype, is_float) -from pandas.tseries.index import DatetimeIndex +from pandas.core.indexes.datetimes import DatetimeIndex from pandas.tseries.offsets import BDay from pandas.util import testing as tm from pandas.compat import lrange diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 148f2ae425629..6321dcce7153b 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -18,7 +18,7 @@ from pandas.compat import StringIO from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.base import PandasDelegate, NoNewAttributesMixin -from pandas.tseries.base import DatetimeIndexOpsMixin +from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin from pandas._libs.tslib import iNaT diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 17f55b41970b1..0594cc9878056 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -4312,7 +4312,7 @@ def test_str_accessor_api_for_categorical(self): def test_dt_accessor_api_for_categorical(self): # https://github.com/pandas-dev/pandas/issues/10661 - from pandas.tseries.common import Properties + from pandas.core.indexes.accessors import Properties s_dr = Series(date_range('1/1/2015', periods=5, tz="MET")) c_dr = s_dr.astype("category") diff --git a/pandas/tests/tseries/test_resample.py b/pandas/tests/test_resample.py old mode 100755 new mode 100644 similarity index 99% rename from pandas/tests/tseries/test_resample.py rename to pandas/tests/test_resample.py index e81dfd8649e8e..a39242c9dd882 --- a/pandas/tests/tseries/test_resample.py +++ b/pandas/tests/test_resample.py @@ -19,12 +19,12 @@ from pandas.core.groupby import DataError from pandas.tseries.frequencies import MONTHS, DAYS from pandas.tseries.frequencies import to_offset -from pandas.tseries.index import date_range +from pandas.core.indexes.datetimes import date_range from pandas.tseries.offsets import Minute, BDay -from pandas.tseries.period import period_range, PeriodIndex, Period -from pandas.tseries.resample import (DatetimeIndex, TimeGrouper, - DatetimeIndexResampler) -from pandas.tseries.tdi import timedelta_range, TimedeltaIndex +from pandas.core.indexes.period import period_range, PeriodIndex, Period +from pandas.core.resample import (DatetimeIndex, TimeGrouper, + DatetimeIndexResampler) +from pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex from pandas.util.testing import (assert_series_equal, assert_almost_equal, assert_frame_equal, assert_index_equal) from pandas._libs.period import IncompatibleFrequency diff --git a/pandas/tests/tools/__init__.py b/pandas/tests/tools/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py new file mode 100644 index 0000000000000..96b49c5fb97a6 --- /dev/null +++ b/pandas/tests/tools/test_numeric.py @@ -0,0 +1,371 @@ +import pytest +import decimal + +import numpy as np +import pandas as pd +from pandas import to_numeric, _np_version_under1p9 + +from pandas.util import testing as tm +from numpy import iinfo + + +class TestToNumeric(tm.TestCase): + + def test_series(self): + s = pd.Series(['1', '-3.14', '7']) + res = to_numeric(s) + expected = pd.Series([1, -3.14, 7]) + tm.assert_series_equal(res, expected) + + s = pd.Series(['1', '-3.14', 7]) + res = to_numeric(s) + tm.assert_series_equal(res, expected) + + def test_series_numeric(self): + s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX') + res = to_numeric(s) + tm.assert_series_equal(res, s) + + s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX') + res = to_numeric(s) + tm.assert_series_equal(res, s) + + # bool is regarded as numeric + s = pd.Series([True, False, True, True], + index=list('ABCD'), name='XXX') + res = to_numeric(s) + tm.assert_series_equal(res, s) + + def test_error(self): + s = pd.Series([1, -3.14, 'apple']) + msg = 'Unable to parse string "apple" at position 2' + with tm.assertRaisesRegexp(ValueError, msg): + to_numeric(s, errors='raise') + + res = to_numeric(s, errors='ignore') + expected = pd.Series([1, -3.14, 'apple']) + tm.assert_series_equal(res, expected) + + res = to_numeric(s, errors='coerce') + expected = pd.Series([1, -3.14, np.nan]) + tm.assert_series_equal(res, expected) + + s = pd.Series(['orange', 1, -3.14, 'apple']) + msg = 'Unable to parse string "orange" at position 0' + with tm.assertRaisesRegexp(ValueError, msg): + to_numeric(s, errors='raise') + + def test_error_seen_bool(self): + s = pd.Series([True, False, 'apple']) + msg = 'Unable to parse string "apple" at position 2' + with tm.assertRaisesRegexp(ValueError, msg): + to_numeric(s, errors='raise') + + res = to_numeric(s, errors='ignore') + expected = pd.Series([True, False, 'apple']) + tm.assert_series_equal(res, expected) + + # coerces to float + res = to_numeric(s, errors='coerce') + expected = pd.Series([1., 0., np.nan]) + tm.assert_series_equal(res, expected) + + def test_list(self): + s = ['1', '-3.14', '7'] + res = to_numeric(s) + expected = np.array([1, -3.14, 7]) + tm.assert_numpy_array_equal(res, expected) + + def test_list_numeric(self): + s = [1, 3, 4, 5] + res = to_numeric(s) + tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64)) + + s = [1., 3., 4., 5.] + res = to_numeric(s) + tm.assert_numpy_array_equal(res, np.array(s)) + + # bool is regarded as numeric + s = [True, False, True, True] + res = to_numeric(s) + tm.assert_numpy_array_equal(res, np.array(s)) + + def test_numeric(self): + s = pd.Series([1, -3.14, 7], dtype='O') + res = to_numeric(s) + expected = pd.Series([1, -3.14, 7]) + tm.assert_series_equal(res, expected) + + s = pd.Series([1, -3.14, 7]) + res = to_numeric(s) + tm.assert_series_equal(res, expected) + + # GH 14827 + df = pd.DataFrame(dict( + a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'], + b=[1.0, 2.0, 3.0, 4.0], + )) + expected = pd.DataFrame(dict( + a=[1.2, 3.14, np.inf, 0.1], + b=[1.0, 2.0, 3.0, 4.0], + )) + + # Test to_numeric over one column + df_copy = df.copy() + df_copy['a'] = df_copy['a'].apply(to_numeric) + tm.assert_frame_equal(df_copy, expected) + + # Test to_numeric over multiple columns + df_copy = df.copy() + df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric) + tm.assert_frame_equal(df_copy, expected) + + def test_numeric_lists_and_arrays(self): + # Test to_numeric with embedded lists and arrays + df = pd.DataFrame(dict( + a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1] + )) + df['a'] = df['a'].apply(to_numeric) + expected = pd.DataFrame(dict( + a=[[3.14, 1.0], 1.6, 0.1], + )) + tm.assert_frame_equal(df, expected) + + df = pd.DataFrame(dict( + a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1] + )) + df['a'] = df['a'].apply(to_numeric) + expected = pd.DataFrame(dict( + a=[[3.14, 1.0], 0.1], + )) + tm.assert_frame_equal(df, expected) + + def test_all_nan(self): + s = pd.Series(['a', 'b', 'c']) + res = to_numeric(s, errors='coerce') + expected = pd.Series([np.nan, np.nan, np.nan]) + tm.assert_series_equal(res, expected) + + def test_type_check(self): + # GH 11776 + df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']}) + with tm.assertRaisesRegexp(TypeError, "1-d array"): + to_numeric(df) + for errors in ['ignore', 'raise', 'coerce']: + with tm.assertRaisesRegexp(TypeError, "1-d array"): + to_numeric(df, errors=errors) + + def test_scalar(self): + self.assertEqual(pd.to_numeric(1), 1) + self.assertEqual(pd.to_numeric(1.1), 1.1) + + self.assertEqual(pd.to_numeric('1'), 1) + self.assertEqual(pd.to_numeric('1.1'), 1.1) + + with tm.assertRaises(ValueError): + to_numeric('XX', errors='raise') + + self.assertEqual(to_numeric('XX', errors='ignore'), 'XX') + self.assertTrue(np.isnan(to_numeric('XX', errors='coerce'))) + + def test_numeric_dtypes(self): + idx = pd.Index([1, 2, 3], name='xxx') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, idx) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(idx, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, idx.values) + + idx = pd.Index([1., np.nan, 3., np.nan], name='xxx') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, idx) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(idx, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, idx.values) + + def test_str(self): + idx = pd.Index(['1', '2', '3'], name='xxx') + exp = np.array([1, 2, 3], dtype='int64') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, pd.Index(exp, name='xxx')) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(exp, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, exp) + + idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx') + exp = np.array([1.5, 2.7, 3.4]) + res = pd.to_numeric(idx) + tm.assert_index_equal(res, pd.Index(exp, name='xxx')) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(exp, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, exp) + + def test_datetimelike(self): + for tz in [None, 'US/Eastern', 'Asia/Tokyo']: + idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, idx.asi8) + + def test_timedelta(self): + idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) + + res = pd.to_numeric(pd.Series(idx, name='xxx')) + tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) + + res = pd.to_numeric(idx.values) + tm.assert_numpy_array_equal(res, idx.asi8) + + def test_period(self): + idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx') + res = pd.to_numeric(idx) + tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) + + # ToDo: enable when we can support native PeriodDtype + # res = pd.to_numeric(pd.Series(idx, name='xxx')) + # tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) + + def test_non_hashable(self): + # Test for Bug #13324 + s = pd.Series([[10.0, 2], 1.0, 'apple']) + res = pd.to_numeric(s, errors='coerce') + tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan])) + + res = pd.to_numeric(s, errors='ignore') + tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple'])) + + with self.assertRaisesRegexp(TypeError, "Invalid object type"): + pd.to_numeric(s) + + def test_downcast(self): + # see gh-13352 + mixed_data = ['1', 2, 3] + int_data = [1, 2, 3] + date_data = np.array(['1970-01-02', '1970-01-03', + '1970-01-04'], dtype='datetime64[D]') + + invalid_downcast = 'unsigned-integer' + msg = 'invalid downcasting method provided' + + smallest_int_dtype = np.dtype(np.typecodes['Integer'][0]) + smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0]) + + # support below np.float32 is rare and far between + float_32_char = np.dtype(np.float32).char + smallest_float_dtype = float_32_char + + for data in (mixed_data, int_data, date_data): + with self.assertRaisesRegexp(ValueError, msg): + pd.to_numeric(data, downcast=invalid_downcast) + + expected = np.array([1, 2, 3], dtype=np.int64) + + res = pd.to_numeric(data) + tm.assert_numpy_array_equal(res, expected) + + res = pd.to_numeric(data, downcast=None) + tm.assert_numpy_array_equal(res, expected) + + expected = np.array([1, 2, 3], dtype=smallest_int_dtype) + + for signed_downcast in ('integer', 'signed'): + res = pd.to_numeric(data, downcast=signed_downcast) + tm.assert_numpy_array_equal(res, expected) + + expected = np.array([1, 2, 3], dtype=smallest_uint_dtype) + res = pd.to_numeric(data, downcast='unsigned') + tm.assert_numpy_array_equal(res, expected) + + expected = np.array([1, 2, 3], dtype=smallest_float_dtype) + res = pd.to_numeric(data, downcast='float') + tm.assert_numpy_array_equal(res, expected) + + # if we can't successfully cast the given + # data to a numeric dtype, do not bother + # with the downcast parameter + data = ['foo', 2, 3] + expected = np.array(data, dtype=object) + res = pd.to_numeric(data, errors='ignore', + downcast='unsigned') + tm.assert_numpy_array_equal(res, expected) + + # cannot cast to an unsigned integer because + # we have a negative number + data = ['-1', 2, 3] + expected = np.array([-1, 2, 3], dtype=np.int64) + res = pd.to_numeric(data, downcast='unsigned') + tm.assert_numpy_array_equal(res, expected) + + # cannot cast to an integer (signed or unsigned) + # because we have a float number + data = (['1.1', 2, 3], + [10000.0, 20000, 3000, 40000.36, 50000, 50000.00]) + expected = (np.array([1.1, 2, 3], dtype=np.float64), + np.array([10000.0, 20000, 3000, + 40000.36, 50000, 50000.00], dtype=np.float64)) + + for _data, _expected in zip(data, expected): + for downcast in ('integer', 'signed', 'unsigned'): + res = pd.to_numeric(_data, downcast=downcast) + tm.assert_numpy_array_equal(res, _expected) + + # the smallest integer dtype need not be np.(u)int8 + data = ['256', 257, 258] + + for downcast, expected_dtype in zip( + ['integer', 'signed', 'unsigned'], + [np.int16, np.int16, np.uint16]): + expected = np.array([256, 257, 258], dtype=expected_dtype) + res = pd.to_numeric(data, downcast=downcast) + tm.assert_numpy_array_equal(res, expected) + + def test_downcast_limits(self): + # Test the limits of each downcast. Bug: #14401. + # Check to make sure numpy is new enough to run this test. + if _np_version_under1p9: + pytest.skip("Numpy version is under 1.9") + + i = 'integer' + u = 'unsigned' + dtype_downcast_min_max = [ + ('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]), + ('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]), + ('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]), + ('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]), + ('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]), + ('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]), + ('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]), + ('uint64', u, [iinfo(np.uint64).min, iinfo(np.uint64).max]), + ('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]), + ('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]), + ('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]), + ('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]), + ('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]), + ('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]), + ('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]), + ('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]), + ('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]) + ] + + for dtype, downcast, min_max in dtype_downcast_min_max: + series = pd.to_numeric(pd.Series(min_max), downcast=downcast) + assert series.dtype == dtype diff --git a/pandas/tests/tseries/test_frequencies.py b/pandas/tests/tseries/test_frequencies.py index 5fbef465ca8fc..327dad6d47634 100644 --- a/pandas/tests/tseries/test_frequencies.py +++ b/pandas/tests/tseries/test_frequencies.py @@ -7,10 +7,10 @@ date_range, period_range) import pandas.tseries.frequencies as frequencies -from pandas.tseries.tools import to_datetime +from pandas.core.tools.datetimes import to_datetime import pandas.tseries.offsets as offsets -from pandas.tseries.period import PeriodIndex +from pandas.core.indexes.period import PeriodIndex import pandas.compat as compat from pandas.compat import is_platform_windows diff --git a/pandas/tests/tseries/test_offsets.py b/pandas/tests/tseries/test_offsets.py index 2dc2485550bc5..b0c84cf555ede 100644 --- a/pandas/tests/tseries/test_offsets.py +++ b/pandas/tests/tseries/test_offsets.py @@ -15,7 +15,8 @@ from pandas.tseries.frequencies import (_offset_map, get_freq_code, _get_freq_str, _INVALID_FREQ_ERROR, get_offset, get_standard_freq) -from pandas.tseries.index import _to_m8, DatetimeIndex, _daterange_cache +from pandas.core.indexes.datetimes import ( + _to_m8, DatetimeIndex, _daterange_cache) from pandas.tseries.offsets import (BDay, CDay, BQuarterEnd, BMonthEnd, BusinessHour, WeekOfMonth, CBMonthEnd, CustomBusinessHour, WeekDay, @@ -27,8 +28,9 @@ QuarterEnd, BusinessMonthEnd, FY5253, Milli, Nano, Easter, FY5253Quarter, LastWeekOfMonth, CacheableOffset) -from pandas.tseries.tools import (format, ole2datetime, parse_time_string, - to_datetime, DateParseError) +from pandas.core.tools.datetimes import ( + format, ole2datetime, parse_time_string, + to_datetime, DateParseError) import pandas.tseries.offsets as offsets from pandas.io.pickle import read_pickle from pandas._libs.tslib import normalize_date, NaT, Timestamp, Timedelta diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index 125e031b5e3a2..06b6bbbcbc559 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -7,10 +7,10 @@ from pytz import NonExistentTimeError import pandas.util.testing as tm -import pandas.tseries.tools as tools +import pandas.core.tools.datetimes as tools import pandas.tseries.offsets as offsets from pandas.compat import lrange, zip -from pandas.tseries.index import bdate_range, date_range +from pandas.core.indexes.datetimes import bdate_range, date_range from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas._libs import tslib from pandas import (Index, Series, DataFrame, isnull, Timestamp, NaT, diff --git a/pandas/tseries/api.py b/pandas/tseries/api.py index a00ccf99e1b96..71386c02547ba 100644 --- a/pandas/tseries/api.py +++ b/pandas/tseries/api.py @@ -4,11 +4,5 @@ # flake8: noqa -from pandas.tseries.index import DatetimeIndex, date_range, bdate_range from pandas.tseries.frequencies import infer_freq -from pandas.tseries.tdi import Timedelta, TimedeltaIndex, timedelta_range -from pandas.tseries.period import Period, PeriodIndex, period_range, pnow -from pandas.tseries.resample import TimeGrouper -from pandas.tseries.timedeltas import to_timedelta -from pandas._libs.lib import NaT import pandas.tseries.offsets as offsets diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index a097c56a0ffd3..f9f4adc1b2c81 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -4,7 +4,7 @@ import numpy as np from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex, ABCPeriod -from pandas.tseries.tools import to_datetime, normalize_date +from pandas.core.tools.datetimes import to_datetime, normalize_date from pandas.core.common import AbstractMethodError # import after tools, dateutil check diff --git a/setup.py b/setup.py index 69b9a974b9935..830968768ceb2 100755 --- a/setup.py +++ b/setup.py @@ -644,6 +644,7 @@ def pxd(name): 'pandas.core.computation', 'pandas.core.reshape', 'pandas.core.sparse', + 'pandas.core.tools', 'pandas.errors', 'pandas.io', 'pandas.io.json', @@ -675,6 +676,7 @@ def pxd(name): 'pandas.tests.scalar', 'pandas.tests.tseries', 'pandas.tests.plotting', + 'pandas.tests.tools', 'pandas.tools', 'pandas.tseries', 'pandas.util.clipboard' From 816f94575c9ec1af2169a28536217c4d16dd6b4b Mon Sep 17 00:00:00 2001 From: Jeff Reback Date: Tue, 18 Apr 2017 12:45:36 +0000 Subject: [PATCH 54/56] PERF: better perf on _ensure_data in core/algorithms, helping perf of unique, duplicated, factorize (#16046) --- pandas/core/algorithms.py | 71 +++++++++++++++++------------------- pandas/core/dtypes/common.py | 50 +++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 37 deletions(-) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 63df4b3d94bc8..8437861bea19e 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -14,6 +14,7 @@ from pandas.core.dtypes.common import ( is_unsigned_integer_dtype, is_signed_integer_dtype, is_integer_dtype, is_complex_dtype, + is_object_dtype, is_categorical_dtype, is_sparse, is_period_dtype, is_numeric_dtype, is_float_dtype, @@ -63,6 +64,35 @@ def _ensure_data(values, dtype=None): """ + # we check some simple dtypes first + try: + if is_bool_dtype(values) or is_bool_dtype(dtype): + # we are actually coercing to uint64 + # until our algos suppport uint8 directly (see TODO) + return np.asarray(values).astype('uint64'), 'bool', 'uint64' + elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): + return _ensure_int64(values), 'int64', 'int64' + elif (is_unsigned_integer_dtype(values) or + is_unsigned_integer_dtype(dtype)): + return _ensure_uint64(values), 'uint64', 'uint64' + elif is_float_dtype(values) or is_float_dtype(dtype): + return _ensure_float64(values), 'float64', 'float64' + elif is_object_dtype(values) and dtype is None: + return _ensure_object(np.asarray(values)), 'object', 'object' + elif is_complex_dtype(values) or is_complex_dtype(dtype): + + # ignore the fact that we are casting to float + # which discards complex parts + with catch_warnings(record=True): + values = _ensure_float64(values) + return values, 'float64', 'float64' + + except (TypeError, ValueError): + # if we are trying to coerce to a dtype + # and it is incompat this will fall thru to here + return _ensure_object(values), 'object', 'object' + + # datetimelike if (needs_i8_conversion(values) or is_period_dtype(dtype) or is_datetime64_any_dtype(dtype) or @@ -94,43 +124,9 @@ def _ensure_data(values, dtype=None): return values, dtype, 'int64' + # we have failed, return object values = np.asarray(values) - - try: - if is_bool_dtype(values) or is_bool_dtype(dtype): - # we are actually coercing to uint64 - # until our algos suppport uint8 directly (see TODO) - values = values.astype('uint64') - dtype = 'bool' - ndtype = 'uint64' - elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): - values = _ensure_int64(values) - ndtype = dtype = 'int64' - elif (is_unsigned_integer_dtype(values) or - is_unsigned_integer_dtype(dtype)): - values = _ensure_uint64(values) - ndtype = dtype = 'uint64' - elif is_complex_dtype(values) or is_complex_dtype(dtype): - - # ignore the fact that we are casting to float - # which discards complex parts - with catch_warnings(record=True): - values = _ensure_float64(values) - ndtype = dtype = 'float64' - elif is_float_dtype(values) or is_float_dtype(dtype): - values = _ensure_float64(values) - ndtype = dtype = 'float64' - else: - values = _ensure_object(values) - ndtype = dtype = 'object' - - except (TypeError, ValueError): - # if we are trying to coerce to a dtype - # and it is incompat this will fall thru to here - values = _ensure_object(values) - ndtype = dtype = 'object' - - return values, dtype, ndtype + return _ensure_object(values), 'object', 'object' def _reconstruct_data(values, dtype, original): @@ -465,7 +461,7 @@ def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False): if not is_list_like(values): raise TypeError("Only list-like objects are allowed to be passed to" "safe_sort as values") - values = np.array(values, copy=False) + values = np.asarray(values) def sort_mixed(values): # order ints before strings, safe in py3 @@ -547,6 +543,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None): PeriodIndex """ + values = _ensure_arraylike(values) original = values values, dtype, _ = _ensure_data(values) (hash_klass, vec_klass), values = _get_data_algo(values, _hashtables) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 0b14e484d40a7..156e43fc4e5fb 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -82,6 +82,8 @@ def _ensure_categorical(arr): def is_object_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.object_) @@ -120,6 +122,8 @@ def is_period(array): def is_datetime64_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False try: tipo = _get_dtype_type(arr_or_dtype) except TypeError: @@ -128,23 +132,33 @@ def is_datetime64_dtype(arr_or_dtype): def is_datetime64tz_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False return DatetimeTZDtype.is_dtype(arr_or_dtype) def is_timedelta64_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.timedelta64) def is_period_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False return PeriodDtype.is_dtype(arr_or_dtype) def is_interval_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False return IntervalDtype.is_dtype(arr_or_dtype) def is_categorical_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False return CategoricalDtype.is_dtype(arr_or_dtype) @@ -178,6 +192,8 @@ def is_string_dtype(arr_or_dtype): # TODO: gh-15585: consider making the checks stricter. + if arr_or_dtype is None: + return False try: dtype = _get_dtype(arr_or_dtype) return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype) @@ -224,45 +240,61 @@ def is_dtype_equal(source, target): def is_any_int_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.integer) def is_integer_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return (issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))) def is_signed_integer_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return (issubclass(tipo, np.signedinteger) and not issubclass(tipo, (np.datetime64, np.timedelta64))) def is_unsigned_integer_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return (issubclass(tipo, np.unsignedinteger) and not issubclass(tipo, (np.datetime64, np.timedelta64))) def is_int64_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.int64) def is_int_or_datetime_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return (issubclass(tipo, np.integer) or issubclass(tipo, (np.datetime64, np.timedelta64))) def is_datetime64_any_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False return (is_datetime64_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype)) def is_datetime64_ns_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False try: tipo = _get_dtype(arr_or_dtype) except TypeError: @@ -303,6 +335,8 @@ def is_timedelta64_ns_dtype(arr_or_dtype): False """ + if arr_or_dtype is None: + return False try: tipo = _get_dtype(arr_or_dtype) return tipo == _TD_DTYPE @@ -311,6 +345,8 @@ def is_timedelta64_ns_dtype(arr_or_dtype): def is_datetime_or_timedelta_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, (np.datetime64, np.timedelta64)) @@ -398,12 +434,16 @@ def is_object(x): def needs_i8_conversion(arr_or_dtype): + if arr_or_dtype is None: + return False return (is_datetime_or_timedelta_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype) or is_period_dtype(arr_or_dtype)) def is_numeric_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return (issubclass(tipo, (np.number, np.bool_)) and not issubclass(tipo, (np.datetime64, np.timedelta64))) @@ -438,6 +478,8 @@ def is_string_like_dtype(arr_or_dtype): False """ + if arr_or_dtype is None: + return False try: dtype = _get_dtype(arr_or_dtype) return dtype.kind in ('S', 'U') @@ -446,16 +488,22 @@ def is_string_like_dtype(arr_or_dtype): def is_float_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.floating) def is_floating_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return isinstance(tipo, np.floating) def is_bool_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False try: tipo = _get_dtype_type(arr_or_dtype) except ValueError: @@ -479,6 +527,8 @@ def is_extension_type(value): def is_complex_dtype(arr_or_dtype): + if arr_or_dtype is None: + return False tipo = _get_dtype_type(arr_or_dtype) return issubclass(tipo, np.complexfloating) From 2522efa9e687e777d966f49af70b325922699bea Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Tue, 18 Apr 2017 09:55:30 -0500 Subject: [PATCH 55/56] DOC: Various doc fixes (#16035) - Fixed spacing - Fixed method reference - Fixed list line wrapping - Fixed unbalanced ticks - Fixed section-heading without colon - Changed Interval Properties -> Attributes - Changed Styler properties --- doc/source/whatsnew/v0.20.0.txt | 12 ++++++------ pandas/_libs/interval.pyx | 2 +- pandas/core/generic.py | 16 ++++++++-------- pandas/io/formats/style.py | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 9fe0b66028ac5..7951a4dd43534 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -176,7 +176,7 @@ The following are now part of this API: ``pandas.testing`` ^^^^^^^^^^^^^^^^^^ -We are adding a standard module that exposes the public testing functions in ``pandas.testing``(:issue:`9895`). Those functions can be used when writing tests for functionality using pandas objects. +We are adding a standard module that exposes the public testing functions in ``pandas.testing`` (:issue:`9895`). Those functions can be used when writing tests for functionality using pandas objects. The following testing functions are now part of this API: @@ -517,7 +517,7 @@ Other Enhancements - ``pandas.io.json.json_normalize()`` gained the option ``errors='ignore'|'raise'``; the default is ``errors='raise'`` which is backward compatible. (:issue:`14583`) - ``pandas.io.json.json_normalize()`` with an empty ``list`` will return an empty ``DataFrame`` (:issue:`15534`) - ``pandas.io.json.json_normalize()`` has gained a ``sep`` option that accepts ``str`` to separate joined fields; the default is ".", which is backward compatible. (:issue:`14883`) -- :method:`~MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels `. (:issue:`15694`) +- :meth:`~MultiIndex.remove_unused_levels` has been added to facilitate :ref:`removing unused levels `. (:issue:`15694`) - ``pd.read_csv()`` will now raise a ``ParserError`` error whenever any parsing error occurs (:issue:`15913`, :issue:`15925`) - ``pd.read_csv()`` now supports the ``error_bad_lines`` and ``warn_bad_lines`` arguments for the Python parser (:issue:`15925`) - The ``display.show_dimensions`` option can now also be used to specify @@ -620,8 +620,8 @@ However, ``.agg(..)`` can *also* accept a dict that allows 'renaming' of the res between ``Series`` and ``DataFrame``. We are deprecating this 'renaming' functionaility. - We are deprecating passing a dict to a grouped/rolled/resampled ``Series``. This allowed -one to ``rename`` the resulting aggregation, but this had a completely different -meaning than passing a dictionary to a grouped ``DataFrame``, which accepts column-to-aggregations. + one to ``rename`` the resulting aggregation, but this had a completely different + meaning than passing a dictionary to a grouped ``DataFrame``, which accepts column-to-aggregations. - We are deprecating passing a dict-of-dicts to a grouped/rolled/resampled ``DataFrame`` in a similar manner. This is an illustrative example: @@ -1363,9 +1363,9 @@ If indicated, a deprecation warning will be issued if you reference theses modul "pandas._testing", "pandas.util.libtesting", "" "pandas._window", "pandas.core.libwindow", "" -- The function :func:`~pandas.api.type.union_categoricals` is now importable from ``pandas.api.types``, formerly from ``pandas.types.concat`` (:issue:`15998`) +- The function :func:`~pandas.api.types.union_categoricals` is now importable from ``pandas.api.types``, formerly from ``pandas.types.concat`` (:issue:`15998`) -.. _whatsnew_0200.privacy.deprecate_plotting +.. _whatsnew_0200.privacy.deprecate_plotting: Deprecate .plotting ^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 60a34aff16e9d..e287e1fc8bdaf 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -52,7 +52,7 @@ cdef class Interval(IntervalMixin): .. versionadded:: 0.20.0 - Properties + Attributes ---------- left, right : values Left and right bounds for each interval. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1555157610609..74d3053821e39 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4217,14 +4217,14 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, Parameters ---------- - by : mapping function / list of functions, dict, Series, ndarray, - or tuple / list of column names or index level names or - Series or ndarrays - Called on each element of the object index to determine the groups. - If a dict or Series is passed, the Series or dict VALUES will be - used to determine the groups (the Series' values are first - aligned; see ``.align()`` method). If ndarray is passed, the - values as-is determine the groups. + by : mapping, function, str, or iterable + Used to determine the groups for the groupby. + If ``by`` is a function, it's called on each value of the object's + index. If a dict or Series is passed, the Series or dict VALUES + will be used to determine the groups (the Series' values are first + aligned; see ``.align()`` method). If an ndarray is passed, the + values are used as-is determine the groups. A str or list of strs + may be passed to group by the columns in ``self`` axis : int, default 0 level : int, level name, or sequence of such, default None If the axis is a MultiIndex (hierarchical), group by a particular diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 9321c29c99790..10f18fc35e43f 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -71,7 +71,7 @@ class Styler(object): Attributes ---------- env : Jinja2 Environment - template: Jinja2 Template + template : Jinja2 Template loader : Jinja2 Loader Notes From e082eb2c95a22a16d67e533cbf581a304cf5e70e Mon Sep 17 00:00:00 2001 From: yui-knk Date: Mon, 4 Jul 2016 00:06:27 +0900 Subject: [PATCH 56/56] BUG: `pivot_table` always returns a `DataFrame` Before this commit, if * `values` is not list like * `columns` is `None` * `aggfunc` is not instance of `list` `pivot_table` returns a `Series`. This commit adds checking for `columns.nlevels` is greater than 1 to prevent from casting `table` to a `Series`. This will fix #4386. DOC: add docs for #13554 --- doc/source/whatsnew/v0.20.0.txt | 32 +++++++++++++++++++++++++ pandas/core/reshape/pivot.py | 3 ++- pandas/tests/reshape/test_pivot.py | 38 ++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt index 7951a4dd43534..6b6f532ed2323 100644 --- a/doc/source/whatsnew/v0.20.0.txt +++ b/doc/source/whatsnew/v0.20.0.txt @@ -1287,6 +1287,38 @@ joins, :meth:`DataFrame.join` and :func:`merge`, and the ``.align`` methods. left.join(right, how='inner') +.. _whatsnew_0200.api_breaking.pivot_table: + +Pivot Table always returns a DataFrame +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The documentation for :meth:`pivot_table` states that a ``DataFrame`` is *always* returned. Here a bug +is fixed that allowed this to return a ``Series`` under a narrow circumstance. (:issue:`4386`) + +.. ipython:: python + + df = DataFrame({'col1': [3, 4, 5], + 'col2': ['C', 'D', 'E'], + 'col3': [1, 3, 9]}) + df + +Previous Behavior: + +.. code-block:: ipython + + In [2]: df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum) + Out[2]: + col3 col2 + 1 C 3 + 3 D 4 + 9 E 5 + Name: col1, dtype: int64 + +New Behavior: + +.. ipython:: python + + df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum) .. _whatsnew_0200.api: diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 1c5250615d410..74dbbfc00cb11 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -170,7 +170,8 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', margins_name=margins_name) # discard the top level - if values_passed and not values_multi and not table.empty: + if values_passed and not values_multi and not table.empty and \ + (table.columns.nlevels > 1): table = table[values[0]] if len(index) == 0 and len(columns) > 0: diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 88d25b9d053c3..7d122baa8ae64 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -940,6 +940,44 @@ def test_categorical_pivot_index_ordering(self): columns=expected_columns) tm.assert_frame_equal(result, expected) + def test_pivot_table_not_series(self): + # GH 4386 + # pivot_table always returns a DataFrame + # when values is not list like and columns is None + # and aggfunc is not instance of list + df = DataFrame({'col1': [3, 4, 5], + 'col2': ['C', 'D', 'E'], + 'col3': [1, 3, 9]}) + + result = df.pivot_table('col1', index=['col3', 'col2'], aggfunc=np.sum) + m = MultiIndex.from_arrays([[1, 3, 9], + ['C', 'D', 'E']], + names=['col3', 'col2']) + expected = DataFrame([3, 4, 5], + index=m, columns=['col1']) + + tm.assert_frame_equal(result, expected) + + result = df.pivot_table( + 'col1', index='col3', columns='col2', aggfunc=np.sum + ) + expected = DataFrame([[3, np.NaN, np.NaN], + [np.NaN, 4, np.NaN], + [np.NaN, np.NaN, 5]], + index=Index([1, 3, 9], name='col3'), + columns=Index(['C', 'D', 'E'], name='col2')) + + tm.assert_frame_equal(result, expected) + + result = df.pivot_table('col1', index='col3', aggfunc=[np.sum]) + m = MultiIndex.from_arrays([['sum'], + ['col1']]) + expected = DataFrame([3, 4, 5], + index=Index([1, 3, 9], name='col3'), + columns=m) + + tm.assert_frame_equal(result, expected) + class TestCrosstab(tm.TestCase):
-
- {%- if logo %} - - {%- endif %} - {%- block headertitle %} -

{{ shorttitle|e }}

- {%- endblock %} -
- {%- for rellink in rellinks|reverse %} - {{ rellink[3] }} - {%- if not loop.last %}{{ reldelim2 }}{% endif %} - {%- endfor %} -
-
-