diff --git a/fmskill/metrics.py b/fmskill/metrics.py index 71ed8268b..33386b91d 100644 --- a/fmskill/metrics.py +++ b/fmskill/metrics.py @@ -42,7 +42,7 @@ >>> mef(obs, mod) 0.9231099877688299 >>> si(obs, mod) -0.7294663886165093 +0.8715019052958266 >>> spearmanr(obs, mod) 0.5 >>> cc(obs, mod) @@ -410,6 +410,26 @@ def si(obs: np.ndarray, model: np.ndarray) -> float: def scatter_index(obs: np.ndarray, model: np.ndarray) -> float: """Scatter index (SI) + Which is the same as the unbiased-RMSE normalized by the absolute mean of the observations. + + .. math:: + \\frac{ \\sqrt{ \\frac{1}{n} \\sum_{i=1}^n \\left( (model_i - \\overline {model}) - (obs_i - \\overline {obs}) \\right)^2} } + {\\frac{1}{n} \\sum_{i=1}^n | obs_i | } + + Range: [0, \\infty); Best: 0 + """ + assert obs.size == model.size + if len(obs) == 0: + return np.nan + + residual = obs.ravel() - model.ravel() + residual = residual - residual.mean() # unbiased + return np.sqrt(np.mean(residual**2)) / np.mean(np.abs(obs.ravel())) + + +def scatter_index2(obs: np.ndarray, model: np.ndarray) -> float: + """Alternative formulation of the scatter index (SI) + .. math:: \\sqrt {\\frac{\\sum_{i=1}^n \\left( (model_i - \\overline {model}) - (obs_i - \\overline {obs}) \\right)^2} {\\sum_{i=1}^n obs_i^2}} diff --git a/tests/test_multimodelcompare.py b/tests/test_multimodelcompare.py index efed980c4..cb5193459 100644 --- a/tests/test_multimodelcompare.py +++ b/tests/test_multimodelcompare.py @@ -118,7 +118,7 @@ def test_mm_skill_obs(cc): assert s.loc["SW_2"].bias == s2.loc["SW_2"].bias df = cc.mean_skill(model=0, observation=[0, "c2"]).df - assert pytest.approx(df.si[0]) == 0.10358979 + assert pytest.approx(df.si[0]) == 0.11113215 def test_mm_skill_missing_obs(cc, o1):