diff --git a/pandas/io/tests/test_cparser.py b/pandas/io/tests/test_cparser.py index 9100673f99579..6cfe4bea01045 100644 --- a/pandas/io/tests/test_cparser.py +++ b/pandas/io/tests/test_cparser.py @@ -82,8 +82,8 @@ def test_skipinitialspace(self): header=None) result = reader.read() - self.assert_(np.array_equal(result[0], ['a', 'a', 'a', 'a'])) - self.assert_(np.array_equal(result[1], ['b', 'b', 'b', 'b'])) + self.assert_numpy_array_equal(result[0], ['a', 'a', 'a', 'a']) + self.assert_numpy_array_equal(result[1], ['b', 'b', 'b', 'b']) def test_parse_booleans(self): data = 'True\nFalse\nTrue\nTrue' @@ -100,8 +100,8 @@ def test_delimit_whitespace(self): header=None) result = reader.read() - self.assert_(np.array_equal(result[0], ['a', 'a', 'a'])) - self.assert_(np.array_equal(result[1], ['b', 'b', 'b'])) + self.assert_numpy_array_equal(result[0], ['a', 'a', 'a']) + self.assert_numpy_array_equal(result[1], ['b', 'b', 'b']) def test_embedded_newline(self): data = 'a\n"hello\nthere"\nthis' @@ -110,7 +110,7 @@ def test_embedded_newline(self): result = reader.read() expected = ['a', 'hello\nthere', 'this'] - self.assert_(np.array_equal(result[0], expected)) + self.assert_numpy_array_equal(result[0], expected) def test_euro_decimal(self): data = '12345,67\n345,678' diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 904853a3cdce8..03823157a90c0 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -810,9 +810,9 @@ def test_unnamed_columns(self): [11, 12, 13, 14, 15]] df = self.read_table(StringIO(data), sep=',') tm.assert_almost_equal(df.values, expected) - self.assert_(np.array_equal(df.columns, - ['A', 'B', 'C', 'Unnamed: 3', - 'Unnamed: 4'])) + self.assert_numpy_array_equal(df.columns, + ['A', 'B', 'C', 'Unnamed: 3', + 'Unnamed: 4']) def test_string_nas(self): data = """A,B,C @@ -963,11 +963,11 @@ def test_no_header(self): tm.assert_almost_equal(df.values, expected) tm.assert_almost_equal(df.values, df2.values) - self.assert_(np.array_equal(df_pref.columns, - ['X0', 'X1', 'X2', 'X3', 'X4'])) - self.assert_(np.array_equal(df.columns, lrange(5))) + self.assert_numpy_array_equal(df_pref.columns, + ['X0', 'X1', 'X2', 'X3', 'X4']) + self.assert_numpy_array_equal(df.columns, lrange(5)) - self.assert_(np.array_equal(df2.columns, names)) + self.assert_numpy_array_equal(df2.columns, names) def test_no_header_prefix(self): data = """1,2,3,4,5 @@ -982,8 +982,8 @@ def test_no_header_prefix(self): [11, 12, 13, 14, 15]] tm.assert_almost_equal(df_pref.values, expected) - self.assert_(np.array_equal(df_pref.columns, - ['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])) + self.assert_numpy_array_equal(df_pref.columns, + ['Field0', 'Field1', 'Field2', 'Field3', 'Field4']) def test_header_with_index_col(self): data = """foo,1,2,3 @@ -1004,7 +1004,7 @@ def test_read_csv_dataframe(self): df = self.read_csv(self.csv1, index_col=0, parse_dates=True) df2 = self.read_table(self.csv1, sep=',', index_col=0, parse_dates=True) - self.assert_(np.array_equal(df.columns, ['A', 'B', 'C', 'D'])) + self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D']) self.assertEqual(df.index.name, 'index') self.assert_(isinstance(df.index[0], (datetime, np.datetime64, Timestamp))) @@ -1015,7 +1015,7 @@ def test_read_csv_no_index_name(self): df = self.read_csv(self.csv2, index_col=0, parse_dates=True) df2 = self.read_table(self.csv2, sep=',', index_col=0, parse_dates=True) - self.assert_(np.array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])) + self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E']) self.assert_(isinstance(df.index[0], (datetime, np.datetime64, Timestamp))) self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']].values.dtype, np.float64) diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py index 2d1d695ebd14f..ece50094b3a03 100644 --- a/pandas/sparse/tests/test_sparse.py +++ b/pandas/sparse/tests/test_sparse.py @@ -1099,8 +1099,8 @@ def test_set_value(self): res2 = res.set_value('foobar', 'qux', 1.5) self.assert_(res2 is not res) - self.assert_(np.array_equal(res2.columns, - list(self.frame.columns) + ['qux'])) + self.assert_numpy_array_equal(res2.columns, + list(self.frame.columns) + ['qux']) self.assertEqual(res2.get_value('foobar', 'qux'), 1.5) def test_fancy_index_misc(self): @@ -1126,7 +1126,7 @@ def test_getitem_overload(self): subindex = self.frame.index[indexer] subframe = self.frame[indexer] - self.assert_(np.array_equal(subindex, subframe.index)) + self.assert_numpy_array_equal(subindex, subframe.index) self.assertRaises(Exception, self.frame.__getitem__, indexer[:-1]) def test_setitem(self): @@ -1413,8 +1413,8 @@ def _check(frame): from_sparse_lp = spf.stack_sparse_frame(frame) - self.assert_(np.array_equal(from_dense_lp.values, - from_sparse_lp.values)) + self.assert_numpy_array_equal(from_dense_lp.values, + from_sparse_lp.values) _check(self.frame) _check(self.iframe) @@ -1624,7 +1624,7 @@ def _compare_with_dense(panel): slp = panel.to_frame() dlp = panel.to_dense().to_frame() - self.assert_(np.array_equal(slp.values, dlp.values)) + self.assert_numpy_array_equal(slp.values, dlp.values) self.assert_(slp.index.equals(dlp.index)) _compare_with_dense(self.panel) diff --git a/pandas/stats/tests/test_ols.py b/pandas/stats/tests/test_ols.py index 752d2f8ce16f2..82f96bd444429 100644 --- a/pandas/stats/tests/test_ols.py +++ b/pandas/stats/tests/test_ols.py @@ -881,7 +881,7 @@ def testFilterWithDictRHS(self): self.tsAssertEqual(exp_rhs2, rhs['x2']) def tsAssertEqual(self, ts1, ts2): - self.assert_(np.array_equal(ts1, ts2)) + self.assert_numpy_array_equal(ts1, ts2) if __name__ == '__main__': diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py index 6645391aeda64..2117d7179ce0c 100644 --- a/pandas/tools/tests/test_merge.py +++ b/pandas/tools/tests/test_merge.py @@ -90,8 +90,8 @@ def test_cython_left_outer_join(self): exp_rs = exp_rs.take(exp_ri) exp_rs[exp_ri == -1] = -1 - self.assert_(np.array_equal(ls, exp_ls)) - self.assert_(np.array_equal(rs, exp_rs)) + self.assert_numpy_array_equal(ls, exp_ls) + self.assert_numpy_array_equal(rs, exp_rs) def test_cython_right_outer_join(self): left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) @@ -116,8 +116,8 @@ def test_cython_right_outer_join(self): exp_rs = exp_rs.take(exp_ri) exp_rs[exp_ri == -1] = -1 - self.assert_(np.array_equal(ls, exp_ls)) - self.assert_(np.array_equal(rs, exp_rs)) + self.assert_numpy_array_equal(ls, exp_ls) + self.assert_numpy_array_equal(rs, exp_rs) def test_cython_inner_join(self): left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64) @@ -140,8 +140,8 @@ def test_cython_inner_join(self): exp_rs = exp_rs.take(exp_ri) exp_rs[exp_ri == -1] = -1 - self.assert_(np.array_equal(ls, exp_ls)) - self.assert_(np.array_equal(rs, exp_rs)) + self.assert_numpy_array_equal(ls, exp_ls) + self.assert_numpy_array_equal(rs, exp_rs) def test_left_outer_join(self): joined_key2 = merge(self.df, self.df2, on='key2') @@ -199,8 +199,8 @@ def test_join_on(self): source = self.source merged = target.join(source, on='C') - self.assert_(np.array_equal(merged['MergedA'], target['A'])) - self.assert_(np.array_equal(merged['MergedD'], target['D'])) + self.assert_numpy_array_equal(merged['MergedA'], target['A']) + self.assert_numpy_array_equal(merged['MergedD'], target['D']) # join with duplicates (fix regression from DataFrame/Matrix merge) df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']}) @@ -285,8 +285,8 @@ def test_join_on_inner(self): expected = df.join(df2, on='key') expected = expected[expected['value'].notnull()] - self.assert_(np.array_equal(joined['key'], expected['key'])) - self.assert_(np.array_equal(joined['value'], expected['value'])) + self.assert_numpy_array_equal(joined['key'], expected['key']) + self.assert_numpy_array_equal(joined['value'], expected['value']) self.assert_(joined.index.equals(expected.index)) def test_join_on_singlekey_list(self): @@ -612,7 +612,7 @@ def test_join_sort(self): # smoke test joined = left.join(right, on='key', sort=False) - self.assert_(np.array_equal(joined.index, lrange(4))) + self.assert_numpy_array_equal(joined.index, lrange(4)) def test_intelligently_handle_join_key(self): # #733, be a bit more 1337 about not returning unconsolidated DataFrame @@ -651,15 +651,15 @@ def test_handle_join_key_pass_array(self): rkey = np.array([1, 1, 2, 3, 4, 5]) merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer') - self.assert_(np.array_equal(merged['key_0'], - np.array([1, 1, 1, 1, 2, 2, 3, 4, 5]))) + self.assert_numpy_array_equal(merged['key_0'], + np.array([1, 1, 1, 1, 2, 2, 3, 4, 5])) left = DataFrame({'value': lrange(3)}) right = DataFrame({'rvalue': lrange(6)}) key = np.array([0, 1, 1, 2, 2, 3]) merged = merge(left, right, left_index=True, right_on=key, how='outer') - self.assert_(np.array_equal(merged['key_0'], key)) + self.assert_numpy_array_equal(merged['key_0'], key) def test_mixed_type_join_with_suffix(self): # GH #916 @@ -1414,7 +1414,7 @@ def test_concat_keys_specific_levels(self): levels=[level], names=['group_key']) - self.assert_(np.array_equal(result.columns.levels[0], level)) + self.assert_numpy_array_equal(result.columns.levels[0], level) self.assertEqual(result.columns.names[0], 'group_key') def test_concat_dataframe_keys_bug(self): @@ -1518,7 +1518,7 @@ def test_concat_keys_and_levels(self): ('baz', 'one'), ('baz', 'two')], names=['first', 'second']) self.assertEqual(result.index.names, ('first', 'second') + (None,)) - self.assert_(np.array_equal(result.index.levels[0], ['baz', 'foo'])) + self.assert_numpy_array_equal(result.index.levels[0], ['baz', 'foo']) def test_concat_keys_levels_no_overlap(self): # GH #1406 diff --git a/pandas/tools/tests/test_tile.py b/pandas/tools/tests/test_tile.py index efb28ccb4c9e2..e3cd561920b74 100644 --- a/pandas/tools/tests/test_tile.py +++ b/pandas/tools/tests/test_tile.py @@ -76,12 +76,12 @@ def test_labels(self): result, bins = cut(arr, 4, retbins=True) ex_levels = ['(-0.001, 0.25]', '(0.25, 0.5]', '(0.5, 0.75]', '(0.75, 1]'] - self.assert_(np.array_equal(result.levels, ex_levels)) + self.assert_numpy_array_equal(result.levels, ex_levels) result, bins = cut(arr, 4, retbins=True, right=False) ex_levels = ['[0, 0.25)', '[0.25, 0.5)', '[0.5, 0.75)', '[0.75, 1.001)'] - self.assert_(np.array_equal(result.levels, ex_levels)) + self.assert_numpy_array_equal(result.levels, ex_levels) def test_cut_pass_series_name_to_factor(self): s = Series(np.random.randn(100), name='foo') @@ -95,7 +95,7 @@ def test_label_precision(self): result = cut(arr, 4, precision=2) ex_levels = ['(-0.00072, 0.18]', '(0.18, 0.36]', '(0.36, 0.54]', '(0.54, 0.72]'] - self.assert_(np.array_equal(result.levels, ex_levels)) + self.assert_numpy_array_equal(result.levels, ex_levels) def test_na_handling(self): arr = np.arange(0, 0.75, 0.01) @@ -137,7 +137,7 @@ def test_qcut(self): assert_almost_equal(bins, ex_bins) ex_levels = cut(arr, ex_bins, include_lowest=True) - self.assert_(np.array_equal(labels, ex_levels)) + self.assert_numpy_array_equal(labels, ex_levels) def test_qcut_bounds(self): arr = np.random.randn(1000) @@ -162,7 +162,7 @@ def test_cut_out_of_bounds(self): mask = result.labels == -1 ex_mask = (arr < -1) | (arr > 1) - self.assert_(np.array_equal(mask, ex_mask)) + self.assert_numpy_array_equal(mask, ex_mask) def test_cut_pass_labels(self): arr = [50, 5, 10, 15, 20, 30, 70] diff --git a/pandas/tseries/tests/test_daterange.py b/pandas/tseries/tests/test_daterange.py index 0062ca107141c..626d47b51a30e 100644 --- a/pandas/tseries/tests/test_daterange.py +++ b/pandas/tseries/tests/test_daterange.py @@ -42,13 +42,13 @@ class TestGenRangeGeneration(tm.TestCase): def test_generate(self): rng1 = list(generate_range(START, END, offset=datetools.bday)) rng2 = list(generate_range(START, END, time_rule='B')) - self.assert_(np.array_equal(rng1, rng2)) + self.assert_numpy_array_equal(rng1, rng2) def test_generate_cday(self): _skip_if_no_cday() rng1 = list(generate_range(START, END, offset=datetools.cday)) rng2 = list(generate_range(START, END, time_rule='C')) - self.assert_(np.array_equal(rng1, rng2)) + self.assert_numpy_array_equal(rng1, rng2) def test_1(self): eq_gen_range(dict(start=datetime(2009, 3, 25), periods=2), @@ -139,7 +139,7 @@ def test_repr(self): def test_getitem(self): smaller = self.rng[:5] - self.assert_(np.array_equal(smaller, self.rng.view(np.ndarray)[:5])) + self.assert_numpy_array_equal(smaller, self.rng.view(np.ndarray)[:5]) self.assertEquals(smaller.offset, self.rng.offset) sliced = self.rng[::5] @@ -156,7 +156,7 @@ def test_getitem(self): def test_getitem_matplotlib_hackaround(self): values = self.rng[:, None] expected = self.rng.values[:, None] - self.assert_(np.array_equal(values, expected)) + self.assert_numpy_array_equal(values, expected) def test_shift(self): shifted = self.rng.shift(5) @@ -204,7 +204,7 @@ def test_union(self): tm.assert_isinstance(the_union, DatetimeIndex) # order does not matter - self.assert_(np.array_equal(right.union(left), the_union)) + self.assert_numpy_array_equal(right.union(left), the_union) # overlapping, but different offset rng = date_range(START, END, freq=datetools.bmonthEnd) @@ -352,7 +352,7 @@ def test_range_bug(self): start = datetime(2011, 1, 1) exp_values = [start + i * offset for i in range(5)] - self.assert_(np.array_equal(result, DatetimeIndex(exp_values))) + self.assert_numpy_array_equal(result, DatetimeIndex(exp_values)) def test_range_tz(self): # GH 2906 @@ -459,7 +459,7 @@ def test_repr(self): def test_getitem(self): smaller = self.rng[:5] - self.assert_(np.array_equal(smaller, self.rng.view(np.ndarray)[:5])) + self.assert_numpy_array_equal(smaller, self.rng.view(np.ndarray)[:5]) self.assertEquals(smaller.offset, self.rng.offset) sliced = self.rng[::5] @@ -476,7 +476,7 @@ def test_getitem(self): def test_getitem_matplotlib_hackaround(self): values = self.rng[:, None] expected = self.rng.values[:, None] - self.assert_(np.array_equal(values, expected)) + self.assert_numpy_array_equal(values, expected) def test_shift(self): shifted = self.rng.shift(5) @@ -524,7 +524,7 @@ def test_union(self): tm.assert_isinstance(the_union, DatetimeIndex) # order does not matter - self.assert_(np.array_equal(right.union(left), the_union)) + self.assert_numpy_array_equal(right.union(left), the_union) # overlapping, but different offset rng = date_range(START, END, freq=datetools.bmonthEnd) diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 074a8a04a96a5..ba2d1843eb7fd 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -1088,7 +1088,7 @@ def test_astype(self): idx = period_range('1990', '2009', freq='A') result = idx.astype('i8') - self.assert_(np.array_equal(result, idx.values)) + self.assert_numpy_array_equal(result, idx.values) def test_constructor_use_start_freq(self): # GH #1118 @@ -1140,8 +1140,8 @@ def test_constructor_arrays_negative_year(self): pindex = PeriodIndex(year=years, quarter=quarters) - self.assert_(np.array_equal(pindex.year, years)) - self.assert_(np.array_equal(pindex.quarter, quarters)) + self.assert_numpy_array_equal(pindex.year, years) + self.assert_numpy_array_equal(pindex.quarter, quarters) def test_constructor_invalid_quarters(self): self.assertRaises(ValueError, PeriodIndex, year=lrange(2000, 2004), @@ -1210,7 +1210,7 @@ def test_comp_period(self): result = idx < idx[10] exp = idx.values < idx.values[10] - self.assert_(np.array_equal(result, exp)) + self.assert_numpy_array_equal(result, exp) def test_getitem_ndim2(self): idx = period_range('2007-01', periods=3, freq='M') @@ -2215,7 +2215,7 @@ def test_nanosecondly(self): def _check_freq(self, freq, base_date): rng = PeriodIndex(start=base_date, periods=10, freq=freq) exp = np.arange(10, dtype=np.int64) - self.assert_(np.array_equal(rng.values, exp)) + self.assert_numpy_array_equal(rng.values, exp) def test_negone_ordinals(self): freqs = ['A', 'M', 'Q', 'D', 'H', 'T', 'S'] diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index c66df3c1c9a49..c3c40aa542947 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -308,7 +308,7 @@ def test_pass_datetimeindex_to_index(self): expected = Index(rng.to_pydatetime(), dtype=object) - self.assert_(np.array_equal(idx.values, expected.values)) + self.assert_numpy_array_equal(idx.values, expected.values) def test_contiguous_boolean_preserve_freq(self): rng = date_range('1/1/2000', '3/1/2000', freq='B') @@ -850,7 +850,7 @@ def test_nat_vector_field_access(self): result = getattr(idx, field) expected = [getattr(x, field) if x is not NaT else -1 for x in idx] - self.assert_(np.array_equal(result, expected)) + self.assert_numpy_array_equal(result, expected) def test_nat_scalar_field_access(self): fields = ['year', 'quarter', 'month', 'day', 'hour', @@ -1068,7 +1068,7 @@ def test_promote_datetime_date(self): result = rng.get_indexer(ts2.index) expected = rng.get_indexer(ts_slice.index) - self.assert_(np.array_equal(result, expected)) + self.assert_numpy_array_equal(result, expected) def test_asfreq_normalize(self): rng = date_range('1/1/2000 09:30', periods=20) @@ -1554,7 +1554,7 @@ def test_astype_object(self): casted = rng.astype('O') exp_values = list(rng) - self.assert_(np.array_equal(casted, exp_values)) + self.assert_numpy_array_equal(casted, exp_values) def test_catch_infinite_loop(self): offset = datetools.DateOffset(minute=5) @@ -1683,7 +1683,7 @@ def test_series_interpolate_intraday(self): new_index = index.append(index + pd.DateOffset(hours=1)).order() result = ts.reindex(new_index).interpolate(method='time') - self.assert_(np.array_equal(result.values, exp.values)) + self.assert_numpy_array_equal(result.values, exp.values) def test_frame_dict_constructor_datetime64_1680(self): dr = date_range('1/1/2012', periods=10) @@ -1848,7 +1848,7 @@ def test_astype(self): rng = date_range('1/1/2000', periods=10) result = rng.astype('i8') - self.assert_(np.array_equal(result, rng.asi8)) + self.assert_numpy_array_equal(result, rng.asi8) def test_to_period_nofreq(self): idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04']) @@ -1918,7 +1918,7 @@ def test_comparisons_coverage(self): result = rng == list(rng) exp = rng == rng - self.assert_(np.array_equal(result, exp)) + self.assert_numpy_array_equal(result, exp) def test_map(self): rng = date_range('1/1/2000', periods=10) @@ -1926,7 +1926,7 @@ def test_map(self): f = lambda x: x.strftime('%Y%m%d') result = rng.map(f) exp = [f(x) for x in rng] - self.assert_(np.array_equal(result, exp)) + self.assert_numpy_array_equal(result, exp) def test_add_union(self): rng = date_range('1/1/2000', periods=5) @@ -2024,11 +2024,11 @@ def test_order(self): ordered, dexer = idx.order(return_indexer=True) self.assert_(ordered.is_monotonic) - self.assert_(np.array_equal(dexer, [1, 2, 0])) + self.assert_numpy_array_equal(dexer, [1, 2, 0]) ordered, dexer = idx.order(return_indexer=True, ascending=False) self.assert_(ordered[::-1].is_monotonic) - self.assert_(np.array_equal(dexer, [0, 2, 1])) + self.assert_numpy_array_equal(dexer, [0, 2, 1]) def test_insert(self): idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) @@ -2055,7 +2055,7 @@ def test_map_bug_1677(self): result = index.map(f) expected = np.array([f(index[0])]) - self.assert_(np.array_equal(result, expected)) + self.assert_numpy_array_equal(result, expected) def test_groupby_function_tuple_1677(self): df = DataFrame(np.random.rand(100), @@ -2090,7 +2090,7 @@ def test_union(self): i2 = Int64Index(np.arange(10, 30, 2)) result = i1.union(i2) expected = Int64Index(np.arange(0, 30, 2)) - self.assert_(np.array_equal(result, expected)) + self.assert_numpy_array_equal(result, expected) def test_union_with_DatetimeIndex(self): i1 = Int64Index(np.arange(0, 20, 2)) @@ -2214,7 +2214,7 @@ def test_datetimeindex_accessors(self): def test_nanosecond_field(self): dti = DatetimeIndex(np.arange(10)) - self.assert_(np.array_equal(dti.nanosecond, np.arange(10))) + self.assert_numpy_array_equal(dti.nanosecond, np.arange(10)) def test_datetimeindex_diff(self): dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31), @@ -2398,12 +2398,12 @@ def test_series_comparison_scalars(self): val = datetime(2000, 1, 4) result = self.series > val expected = np.array([x > val for x in self.series]) - self.assert_(np.array_equal(result, expected)) + self.assert_numpy_array_equal(result, expected) val = self.series[5] result = self.series > val expected = np.array([x > val for x in self.series]) - self.assert_(np.array_equal(result, expected)) + self.assert_numpy_array_equal(result, expected) def test_between(self): left, right = self.series[[2, 7]] @@ -2893,7 +2893,7 @@ def test_date_range_normalize(self): values = np.array([snap + i * offset for i in range(n)], dtype='M8[ns]') - self.assert_(np.array_equal(rng, values)) + self.assert_numpy_array_equal(rng, values) rng = date_range( '1/1/2000 08:15', periods=n, normalize=False, freq='B') @@ -3143,8 +3143,8 @@ def test_to_datetime_infer_datetime_format_consistent_format(self): # Whether the format is explicitly passed, it is inferred, or # it is not inferred, the results should all be the same - self.assert_(np.array_equal(with_format, no_infer)) - self.assert_(np.array_equal(no_infer, yes_infer)) + self.assert_numpy_array_equal(with_format, no_infer) + self.assert_numpy_array_equal(no_infer, yes_infer) def test_to_datetime_infer_datetime_format_inconsistent_format(self): test_series = pd.Series( @@ -3156,10 +3156,10 @@ def test_to_datetime_infer_datetime_format_inconsistent_format(self): # When the format is inconsistent, infer_datetime_format should just # fallback to the default parsing - self.assert_(np.array_equal( + self.assert_numpy_array_equal( pd.to_datetime(test_series, infer_datetime_format=False), pd.to_datetime(test_series, infer_datetime_format=True) - )) + ) test_series = pd.Series( np.array([ @@ -3168,10 +3168,10 @@ def test_to_datetime_infer_datetime_format_inconsistent_format(self): 'Mar/01/2011', ])) - self.assert_(np.array_equal( + self.assert_numpy_array_equal( pd.to_datetime(test_series, infer_datetime_format=False), pd.to_datetime(test_series, infer_datetime_format=True) - )) + ) def test_to_datetime_infer_datetime_format_series_with_nans(self): test_series = pd.Series( @@ -3182,10 +3182,10 @@ def test_to_datetime_infer_datetime_format_series_with_nans(self): np.nan, ])) - self.assert_(np.array_equal( + self.assert_numpy_array_equal( pd.to_datetime(test_series, infer_datetime_format=False), pd.to_datetime(test_series, infer_datetime_format=True) - )) + ) def test_to_datetime_infer_datetime_format_series_starting_with_nans(self): test_series = pd.Series( @@ -3197,10 +3197,10 @@ def test_to_datetime_infer_datetime_format_series_starting_with_nans(self): '01/03/2011 00:00:00', ])) - self.assert_(np.array_equal( + self.assert_numpy_array_equal( pd.to_datetime(test_series, infer_datetime_format=False), pd.to_datetime(test_series, infer_datetime_format=True) - )) + ) class TestGuessDatetimeFormat(tm.TestCase): diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py index 48fd68b71cfc1..698ec7beb913d 100644 --- a/pandas/tseries/tests/test_timezones.py +++ b/pandas/tseries/tests/test_timezones.py @@ -75,7 +75,7 @@ def test_utc_to_local_no_modify(self): rng_eastern = rng.tz_convert('US/Eastern') # Values are unmodified - self.assert_(np.array_equal(rng.asi8, rng_eastern.asi8)) + self.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) self.assertEqual(rng_eastern.tz, pytz.timezone('US/Eastern')) @@ -89,7 +89,7 @@ def test_localize_utc_conversion(self): converted = rng.tz_localize('US/Eastern') expected_naive = rng + offsets.Hour(5) - self.assert_(np.array_equal(converted.asi8, expected_naive.asi8)) + self.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) # DST ambiguity, this should fail rng = date_range('3/11/2012', '3/12/2012', freq='30T') @@ -146,10 +146,10 @@ def test_tz_localize_dti(self): end='1/1/2005 5:00:30.256', freq='L', tz='utc') - self.assert_(np.array_equal(dti2.values, dti_utc.values)) + self.assert_numpy_array_equal(dti2.values, dti_utc.values) dti3 = dti2.tz_convert('US/Pacific') - self.assert_(np.array_equal(dti3.values, dti_utc.values)) + self.assert_numpy_array_equal(dti3.values, dti_utc.values) dti = DatetimeIndex(start='11/6/2011 1:59', end='11/6/2011 2:00', freq='L') @@ -289,7 +289,7 @@ def test_pass_dates_localize_to_utc(self): fromdates = DatetimeIndex(strdates, tz='US/Eastern') self.assertEqual(conv.tz, fromdates.tz) - self.assert_(np.array_equal(conv.values, fromdates.values)) + self.assert_numpy_array_equal(conv.values, fromdates.values) def test_field_access_localize(self): strdates = ['1/1/2012', '3/1/2012', '4/1/2012'] @@ -301,7 +301,7 @@ def test_field_access_localize(self): tz='America/Atikokan') expected = np.arange(10) - self.assert_(np.array_equal(dr.hour, expected)) + self.assert_numpy_array_equal(dr.hour, expected) def test_with_tz(self): tz = pytz.timezone('US/Central') @@ -332,7 +332,7 @@ def test_tz_localize(self): dr = bdate_range('1/1/2009', '1/1/2010') dr_utc = bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc) localized = dr.tz_localize(pytz.utc) - self.assert_(np.array_equal(dr_utc, localized)) + self.assert_numpy_array_equal(dr_utc, localized) def test_with_tz_ambiguous_times(self): tz = pytz.timezone('US/Eastern') @@ -373,14 +373,14 @@ def test_infer_dst(self): '11/06/2011 01:00', '11/06/2011 02:00', '11/06/2011 03:00']) localized = di.tz_localize(tz, infer_dst=True) - self.assert_(np.array_equal(dr, localized)) + self.assert_numpy_array_equal(dr, localized) # When there is no dst transition, nothing special happens dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=datetools.Hour()) localized = dr.tz_localize(tz) localized_infer = dr.tz_localize(tz, infer_dst=True) - self.assert_(np.array_equal(localized, localized_infer)) + self.assert_numpy_array_equal(localized, localized_infer) # test utility methods @@ -484,9 +484,9 @@ def test_fixedtz_topydatetime(self): datetime(2000, 1, 2, tzinfo=fixed_off), datetime(2000, 1, 3, tzinfo=fixed_off)]) result = to_datetime(dates).to_pydatetime() - self.assert_(np.array_equal(dates, result)) + self.assert_numpy_array_equal(dates, result) result = to_datetime(dates)._mpl_repr() - self.assert_(np.array_equal(dates, result)) + self.assert_numpy_array_equal(dates, result) def test_convert_tz_aware_datetime_datetime(self): # #1581 @@ -502,7 +502,7 @@ def test_convert_tz_aware_datetime_datetime(self): converted = to_datetime(dates_aware, utc=True) ex_vals = [Timestamp(x).value for x in dates_aware] - self.assert_(np.array_equal(converted.asi8, ex_vals)) + self.assert_numpy_array_equal(converted.asi8, ex_vals) self.assert_(converted.tz is pytz.utc) def test_to_datetime_utc(self): diff --git a/pandas/tseries/tests/test_tslib.py b/pandas/tseries/tests/test_tslib.py index 8c31254d26c02..bc5b8dcfbd49a 100644 --- a/pandas/tseries/tests/test_tslib.py +++ b/pandas/tseries/tests/test_tslib.py @@ -122,10 +122,10 @@ def test_number_looking_strings_not_into_datetime(self): # These strings don't look like datetimes so they shouldn't be # attempted to be converted arr = np.array(['-352.737091', '183.575577'], dtype=object) - self.assert_(np.array_equal(tslib.array_to_datetime(arr), arr)) + self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr) arr = np.array(['1', '2', '3', '4', '5'], dtype=object) - self.assert_(np.array_equal(tslib.array_to_datetime(arr), arr)) + self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr) def test_coercing_dates_outside_of_datetime64_ns_bounds(self): invalid_dates = [ @@ -172,7 +172,7 @@ def test_coerce_of_invalid_datetimes(self): # Without coercing, the presence of any invalid dates prevents # any values from being converted - self.assert_(np.array_equal(tslib.array_to_datetime(arr), arr)) + self.assert_numpy_array_equal(tslib.array_to_datetime(arr), arr) # With coercing, the invalid dates becomes iNaT self.assert_(