diff --git a/nbs/src/nixtla_client.ipynb b/nbs/src/nixtla_client.ipynb index 5599e663..94460f7d 100644 --- a/nbs/src/nixtla_client.ipynb +++ b/nbs/src/nixtla_client.ipynb @@ -1161,7 +1161,6 @@ " )\n", " out = ufp.assign_columns(out, 'TimeGPT', resp['mean'])\n", " out = _maybe_add_intervals(out, resp['intervals'])\n", - " out = _maybe_convert_level_to_quantiles(out, quantiles)\n", " if add_history:\n", " in_sample_df = _parse_in_sample_output(\n", " in_sample_output=in_sample_resp,\n", @@ -1173,6 +1172,7 @@ " )\n", " in_sample_df = ufp.drop_columns(in_sample_df, target_col)\n", " out = ufp.vertical_concat([in_sample_df, out])\n", + " out = _maybe_convert_level_to_quantiles(out, quantiles)\n", " self._maybe_assign_feature_contributions(\n", " expected_contributions=feature_contributions,\n", " resp=resp,\n", @@ -1184,8 +1184,10 @@ " sort_idxs = ufp.maybe_compute_sort_indices(out, id_col=id_col, time_col=time_col)\n", " if sort_idxs is not None:\n", " out = ufp.take_rows(out, sort_idxs)\n", + " out = ufp.drop_index_if_pandas(out)\n", " if hasattr(self, 'feature_contributions'):\n", " self.feature_contributions = ufp.take_rows(self.feature_contributions, sort_idxs)\n", + " self.feature_contributions = ufp.drop_index_if_pandas(self.feature_contributions)\n", " out = _maybe_drop_id(df=out, id_col=id_col, drop=drop_id)\n", " self._maybe_assign_weights(weights=resp['weights_x'], df=df, x_cols=x_cols)\n", " return out\n", @@ -1833,8 +1835,10 @@ " **kwargs\n", " )\n", " assert all(col in df_qls.columns for col in exp_q_cols)\n", + " assert not any('-lo-' in col for col in df_qls.columns)\n", " # test monotonicity of quantiles\n", - " df_qls.apply(lambda x: x.is_monotonic_increasing, axis=1).sum() == len(exp_q_cols)\n", + " for c1, c2 in zip(exp_q_cols[:-1], exp_q_cols[1:]):\n", + " assert df_qls[c1].lt(df_qls[c2]).all()\n", "test_method_qls(nixtla_client.forecast)\n", "test_method_qls(nixtla_client.forecast, add_history=True)\n", "test_method_qls(nixtla_client.cross_validation)" @@ -2394,8 +2398,8 @@ "anom_inferred_df_index = nixtla_client.detect_anomalies(df_ds_index)\n", "fcst_inferred_df = nixtla_client.forecast(df_[['ds', 'unique_id', 'y']], h=10)\n", "anom_inferred_df = nixtla_client.detect_anomalies(df_[['ds', 'unique_id', 'y']])\n", - "pd.testing.assert_frame_equal(fcst_inferred_df_index, fcst_inferred_df, atol=1e-3)\n", - "pd.testing.assert_frame_equal(anom_inferred_df_index, anom_inferred_df, atol=1e-3)\n", + "pd.testing.assert_frame_equal(fcst_inferred_df_index, fcst_inferred_df, atol=1e-4, rtol=1e-3)\n", + "pd.testing.assert_frame_equal(anom_inferred_df_index, anom_inferred_df, atol=1e-4, rtol=1e-3)\n", "df_ds_index = df_ds_index.groupby('unique_id').tail(80)\n", "for freq in ['Y', 'W-MON', 'Q-DEC', 'H']:\n", " df_ds_index.index = np.concatenate(\n", @@ -2405,7 +2409,7 @@ " fcst_inferred_df_index = nixtla_client.forecast(df_ds_index, h=10)\n", " df_test = df_ds_index.reset_index()\n", " fcst_inferred_df = nixtla_client.forecast(df_test, h=10)\n", - " pd.testing.assert_frame_equal(fcst_inferred_df_index, fcst_inferred_df, atol=1e-3)" + " pd.testing.assert_frame_equal(fcst_inferred_df_index, fcst_inferred_df, atol=1e-4, rtol=1e-3)" ] }, { @@ -2547,7 +2551,9 @@ "\n", "pd.testing.assert_frame_equal(\n", " timegpt_anomalies_df_1,\n", - " timegpt_anomalies_df_2 \n", + " timegpt_anomalies_df_2,\n", + " atol=1e-4,\n", + " rtol=1e-3,\n", ")" ] }, diff --git a/nixtla/nixtla_client.py b/nixtla/nixtla_client.py index 2d11c4da..2298d21f 100644 --- a/nixtla/nixtla_client.py +++ b/nixtla/nixtla_client.py @@ -1093,7 +1093,6 @@ def forecast( ) out = ufp.assign_columns(out, "TimeGPT", resp["mean"]) out = _maybe_add_intervals(out, resp["intervals"]) - out = _maybe_convert_level_to_quantiles(out, quantiles) if add_history: in_sample_df = _parse_in_sample_output( in_sample_output=in_sample_resp, @@ -1105,6 +1104,7 @@ def forecast( ) in_sample_df = ufp.drop_columns(in_sample_df, target_col) out = ufp.vertical_concat([in_sample_df, out]) + out = _maybe_convert_level_to_quantiles(out, quantiles) self._maybe_assign_feature_contributions( expected_contributions=feature_contributions, resp=resp, @@ -1118,10 +1118,14 @@ def forecast( ) if sort_idxs is not None: out = ufp.take_rows(out, sort_idxs) + out = ufp.drop_index_if_pandas(out) if hasattr(self, "feature_contributions"): self.feature_contributions = ufp.take_rows( self.feature_contributions, sort_idxs ) + self.feature_contributions = ufp.drop_index_if_pandas( + self.feature_contributions + ) out = _maybe_drop_id(df=out, id_col=id_col, drop=drop_id) self._maybe_assign_weights(weights=resp["weights_x"], df=df, x_cols=x_cols) return out