From 36d498c3a6b63b5b7f1b7950a34ecd34aeae835a Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 12 Jan 2022 17:37:26 +0000 Subject: [PATCH 1/2] Modifications to code, documentation and test, particularly to improve/clarify the usage of the scale parameter. --- .../ensemble_calibration.rst | 37 ++++++-- improver/calibration/ensemble_calibration.py | 4 +- .../ensemble_copula_coupling.py | 18 ++-- ...alibratedForecastDistributionParameters.py | 16 ++-- ...LocationAndScaleParametersToPercentiles.py | 85 +++++++++---------- ...cationAndScaleParametersToProbabilities.py | 10 +-- 6 files changed, 93 insertions(+), 77 deletions(-) diff --git a/doc/source/extended_documentation/calibration/ensemble_calibration/ensemble_calibration.rst b/doc/source/extended_documentation/calibration/ensemble_calibration/ensemble_calibration.rst index 55a01d59ab..0e85fb2c53 100644 --- a/doc/source/extended_documentation/calibration/ensemble_calibration/ensemble_calibration.rst +++ b/doc/source/extended_documentation/calibration/ensemble_calibration/ensemble_calibration.rst @@ -56,9 +56,9 @@ A normal (Gaussian) distribution is often represented using the syntax: where :math:`\mu` is mean and :math:`\sigma^{2}` is the variance. The normal distribution is a special case, where :math:`\mu` can be interpreted as both the mean and the location parameter and :math:`\sigma^{2}` can be interpreted -as both the variance and the scale parameter. For an alternative distribution, -such as a truncated normal distribution that has been truncated to lie within -0 and infinity, the distribution can be represented as: +as both the variance and the square of the scale parameter. For an alternative +distribution, such as a truncated normal distribution that has been truncated +to lie within 0 and infinity, the distribution can be represented as: .. math:: @@ -82,6 +82,29 @@ The scale parameter indicates the width in the distribution. If the scale parameter is large, then the distribution will be broader. If the scale is smaller, then the distribution will be narrower. +**************************************************** +Implementation details +**************************************************** + +In this implementation, we will choose to define the distributions +using the scale parameter (as this matches scipy's expectation), +rather than the square of the scale parameter: + +.. math:: + + \mathcal{N}(\mu,\,\sigma) + +The full equation when estimating the EMOS coefficients using +the ensemble mean is therefore: + +.. math:: + + \mathcal{N}(a + b\bar{X}, \sqrt{c + dS^{2}}) + +This matches the equations noted in `Allen et al., 2021`_. + +.. _Allen et al., 2021: https://doi.org/10.1002/qj.3983 + **************************************************** Estimating EMOS coefficients using the ensemble mean **************************************************** @@ -91,7 +114,7 @@ If the predictor is the ensemble mean, coefficients are estimated as .. math:: - \mathcal{N}(a + \bar{X}, c + dS^{2}) + \mathcal{N}(a + b\bar{X}, \sqrt{c + dS^{2}}) where N is a chosen distribution and values of a, b, c and d are solved in the format of :math:`\alpha, \beta, \gamma` and :math:`\delta`, see the equations @@ -121,7 +144,7 @@ If the predictor is the ensemble realizations, coefficients are estimated for .. math:: - \mathcal{N}(a + b_1X_1 + ... + b_mX_m, c + dS^{2}) + \mathcal{N}(a + b_1X_1 + ... + b_mX_m, \sqrt{c + dS^{2}}) where N is a chosen distribution, the values of a, b, c and d relate to alpha, beta, gamma and delta through the equations above with @@ -140,14 +163,14 @@ The EMOS coefficients represent adjustments to the ensemble mean and ensemble variance, in order to generate the location and scale parameters that, for the chosen distribution, minimise the CRPS. The coefficients can therefore be used to construct the location parameter, :math:`\mu`, and scale parameter, -:math:`\sigma^{2}`, for the calibrated forecast from today's ensemble mean, or +:math:`\sigma`, for the calibrated forecast from today's ensemble mean, or ensemble realizations, and the ensemble variance. .. math:: \mu = a + b\bar{X} - \sigma^{2} = c + dS^{2} + \sigma = \sqrt{c + dS^{2}} Note here that this procedure holds whether the distribution is normal, i.e. where the application of the EMOS coefficients to the raw ensemble mean results diff --git a/improver/calibration/ensemble_calibration.py b/improver/calibration/ensemble_calibration.py index 38b1931743..c70536b94f 100644 --- a/improver/calibration/ensemble_calibration.py +++ b/improver/calibration/ensemble_calibration.py @@ -1583,7 +1583,7 @@ def _calculate_scale_parameter(self) -> ndarray: # Calculating the scale parameter, based on the raw variance S^2, # where predicted variance = c + dS^2, where c = (gamma)^2 and # d = (delta)^2 - scale_parameter = ( + scale_parameter = np.sqrt( self.coefficients_cubelist.extract_cube("emos_coefficient_gamma").data * self.coefficients_cubelist.extract_cube("emos_coefficient_gamma").data + self.coefficients_cubelist.extract_cube("emos_coefficient_delta").data @@ -1622,7 +1622,7 @@ def _create_output_cubes( ) scale_parameter_cube = create_new_diagnostic_cube( "scale_parameter", - f"({template_cube.units})^2", + template_cube.units, template_cube, template_cube.attributes, data=scale_parameter, diff --git a/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index ec0bacbf6a..ef4428b4db 100644 --- a/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -871,10 +871,10 @@ def _location_and_scale_parameters_to_percentiles( (len(percentiles_as_fractions), location_data.shape[0]), dtype=np.float32 ) - self._rescale_shape_parameters(location_data, np.sqrt(scale_data)) + self._rescale_shape_parameters(location_data, scale_data) percentile_method = self.distribution( - *self.shape_parameters, loc=location_data, scale=np.sqrt(scale_data) + *self.shape_parameters, loc=location_data, scale=scale_data ) # Loop over percentiles, and use the distribution as the @@ -1039,11 +1039,9 @@ def _check_unit_compatibility( ) -> None: """ The location parameter, scale parameters, and threshold values come - from three different cubes. They should all be in the same base unit, - with the units of the scale parameter being the squared units of the - location parameter and threshold values. This is a sanity check to - ensure the units are as expected, converting units of the location - parameter and scale parameter if possible. + from three different cubes. This is a sanity check to ensure the units + are as expected, converting units of the location parameter and + scale parameter if possible. Args: location_parameter: @@ -1060,7 +1058,7 @@ def _check_unit_compatibility( try: location_parameter.convert_units(threshold_units) - scale_parameter.convert_units(threshold_units ** 2) + scale_parameter.convert_units(threshold_units) except ValueError as err: msg = ( "Error: {} This is likely because the mean " @@ -1107,7 +1105,7 @@ def _location_and_scale_parameters_to_probabilities( relative_to_threshold = probability_is_above_or_below(probability_cube_template) self._rescale_shape_parameters( - location_parameter.data.flatten(), np.sqrt(scale_parameter.data).flatten() + location_parameter.data.flatten(), scale_parameter.data.flatten() ) # Loop over thresholds, and use the specified distribution with the @@ -1118,7 +1116,7 @@ def _location_and_scale_parameters_to_probabilities( distribution = self.distribution( *self.shape_parameters, loc=location_parameter.data.flatten(), - scale=np.sqrt(scale_parameter.data.flatten()), + scale=scale_parameter.data.flatten(), ) probability_method = distribution.cdf diff --git a/improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py b/improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py index 22075aae32..7529aac227 100644 --- a/improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py +++ b/improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py @@ -169,9 +169,9 @@ def setUp(self): ) self.expected_scale_param_mean = np.array( [ - [0.2316, 0.2342, 0.0168], - [0.0271, 0.0237, 0.0168], - [0.0634, 0.1151, 0.0116], + [0.4813, 0.4840, 0.1295], + [0.1647, 0.1538, 0.1295], + [0.2517, 0.3393, 0.1076], ], dtype=np.float32, ) @@ -188,7 +188,7 @@ def setUp(self): ) self.expected_scale_param_realizations_sites = np.array( - [0, 0, 0, 0], dtype=np.float32 + [0.0005, 0.0005, 0.0005, 0.0005], dtype=np.float32 ) self.expected_loc_param_mean_alt = np.array( @@ -202,9 +202,9 @@ def setUp(self): self.expected_scale_param_mean_alt = np.array( [ - [0.4347, 0.4396, 0.0308], - [0.0503, 0.0438, 0.0308], - [0.1184, 0.2157, 0.0211], + [0.6593, 0.663, 0.1756], + [0.2242, 0.2093, 0.1756], + [0.3441, 0.4645, 0.1452], ], dtype=np.float32, ) @@ -219,7 +219,7 @@ def setUp(self): self.expected_scale_param_mean_cube = set_up_variable_cube( self.expected_scale_param_mean, name="scale_parameter", - units="Kelvin^2", + units="K", attributes=MANDATORY_ATTRIBUTE_DEFAULTS, ) diff --git a/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToPercentiles.py b/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToPercentiles.py index 9d0a2eccd1..a51686882d 100644 --- a/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToPercentiles.py +++ b/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToPercentiles.py @@ -72,19 +72,19 @@ def setUp(self): self.data = np.array( [ [ - [225.568115, 236.818115, 248.068115], - [259.318115, 270.568115, 281.818115], - [293.068115, 304.318115, 315.568115], + [225.5681, 236.8181, 248.0681], + [259.3181, 270.5681, 281.8181], + [293.0681, 304.3181, 315.5681], ], [ - [229.483322, 240.733322, 251.983322], - [263.233307, 274.483307, 285.733307], - [296.983307, 308.233307, 319.483307], + [229.4833, 240.7333, 251.9833], + [263.2333, 274.4833, 285.7333], + [296.9833, 308.2333, 319.4833], ], [ - [233.398529, 244.648529, 255.898529], - [267.148499, 278.398499, 289.648499], - [300.898499, 312.148499, 323.398499], + [233.3985, 244.6485, 255.8985], + [267.1485, 278.3985, 289.6485], + [300.8985, 312.1485, 323.3985], ], ], dtype=np.float32, @@ -94,7 +94,7 @@ def setUp(self): "realization", iris.analysis.MEAN ) self.scale_parameter = self.temperature_cube.collapsed( - "realization", iris.analysis.VARIANCE + "realization", iris.analysis.STD_DEV, ) self.percentiles = [10, 50, 90] @@ -202,19 +202,19 @@ def test_simple_data_truncnorm_distribution(self): expected_data = np.array( [ [ - [1.3042759, 1.3042759, 1.3042759], - [1.3042759, 1.3042759, 1.3042759], - [1.3042759, 1.3042759, 1.3042759], + [1.0121, 1.0121, 1.0121], + [1.0121, 1.0121, 1.0121], + [1.0121, 1.0121, 1.0121], ], [ - [3.0300407, 3.0300407, 3.0300407], - [3.0300407, 3.0300407, 3.0300407], - [3.0300407, 3.0300407, 3.0300407], + [3.1677, 3.1677, 3.1677], + [3.1677, 3.1677, 3.1677], + [3.1677, 3.1677, 3.1677], ], [ - [4.8261294, 4.8261294, 4.8261294], - [4.8261294, 4.8261294, 4.8261294], - [4.8261294, 4.8261294, 4.8261294], + [5.6412, 5.6412, 5.6412], + [5.6412, 5.6412, 5.6412], + [5.6412, 5.6412, 5.6412], ], ] ) @@ -227,22 +227,22 @@ def test_simple_data_truncnorm_distribution(self): current_forecast_predictor.data = current_forecast_predictor.data + 1 # Use an adjusted version of the ensemble variance as a proxy for the # scale parameter for the truncated normal distribution. - current_forecast_variance = self.temperature_cube.collapsed( - "realization", iris.analysis.VARIANCE + current_forecast_stddev = self.temperature_cube.collapsed( + "realization", iris.analysis.STD_DEV, ) - current_forecast_variance.data = current_forecast_variance.data + 1 + current_forecast_stddev.data = current_forecast_stddev.data + 1 plugin = Plugin( distribution="truncnorm", shape_parameters=np.array([0, np.inf], dtype=np.float32), ) result = plugin._location_and_scale_parameters_to_percentiles( current_forecast_predictor, - current_forecast_variance, + current_forecast_stddev, self.temperature_cube, self.percentiles, ) self.assertIsInstance(result, Cube) - self.assertArrayAlmostEqual(result.data, expected_data) + np.testing.assert_allclose(result.data, expected_data, rtol=1.0e-4) @ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_simple_data(self): @@ -280,16 +280,16 @@ def test_simple_data(self): current_forecast_predictor = self.temperature_cube.collapsed( "realization", iris.analysis.MEAN ) - current_forecast_variance = self.temperature_cube.collapsed( - "realization", iris.analysis.VARIANCE + current_forecast_stddev = self.temperature_cube.collapsed( + "realization", iris.analysis.STD_DEV ) result = Plugin()._location_and_scale_parameters_to_percentiles( current_forecast_predictor, - current_forecast_variance, + current_forecast_stddev, self.temperature_cube, self.percentiles, ) - self.assertArrayAlmostEqual(result.data, expected_data) + np.testing.assert_allclose(result.data, expected_data, rtol=1.0e-4) @ManageWarnings( ignored_messages=[ @@ -322,16 +322,16 @@ def test_if_identical_data(self): current_forecast_predictor = self.temperature_cube.collapsed( "realization", iris.analysis.MEAN ) - current_forecast_variance = self.temperature_cube.collapsed( - "realization", iris.analysis.VARIANCE + current_forecast_stddev = self.temperature_cube.collapsed( + "realization", iris.analysis.STD_DEV ) result = Plugin()._location_and_scale_parameters_to_percentiles( current_forecast_predictor, - current_forecast_variance, + current_forecast_stddev, self.temperature_cube, self.percentiles, ) - self.assertArrayAlmostEqual(result.data, expected_data) + np.testing.assert_allclose(result.data, expected_data, rtol=1.0e-4) @ManageWarnings( ignored_messages=[ @@ -368,16 +368,16 @@ def test_if_nearly_identical_data(self): current_forecast_predictor = self.temperature_cube.collapsed( "realization", iris.analysis.MEAN ) - current_forecast_variance = self.temperature_cube.collapsed( - "realization", iris.analysis.VARIANCE + current_forecast_stddev = self.temperature_cube.collapsed( + "realization", iris.analysis.STD_DEV ) result = Plugin()._location_and_scale_parameters_to_percentiles( current_forecast_predictor, - current_forecast_variance, + current_forecast_stddev, self.temperature_cube, self.percentiles, ) - self.assertArrayAlmostEqual(result.data, expected_data) + np.testing.assert_allclose(result.data, expected_data, rtol=1.0e-4) @ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_many_percentiles(self): @@ -422,17 +422,12 @@ def test_spot_forecasts_check_data(self): cube = set_up_spot_test_cube() current_forecast_predictor = cube.collapsed("realization", iris.analysis.MEAN) - current_forecast_variance = cube.collapsed( - "realization", iris.analysis.VARIANCE - ) + current_forecast_stddev = cube.collapsed("realization", iris.analysis.STD_DEV) result = Plugin()._location_and_scale_parameters_to_percentiles( - current_forecast_predictor, - current_forecast_variance, - cube, - self.percentiles, + current_forecast_predictor, current_forecast_stddev, cube, self.percentiles, ) self.assertIsInstance(result, Cube) - self.assertArrayAlmostEqual(result.data, data) + np.testing.assert_allclose(result.data, data, rtol=1.0e-4) @ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_scalar_realisation_percentile(self): @@ -459,7 +454,7 @@ def setUp(self): self.cube = set_up_variable_cube(ECC_TEMPERATURE_REALIZATIONS) self.forecast_predictor = self.cube.collapsed("realization", iris.analysis.MEAN) self.forecast_variance = self.cube.collapsed( - "realization", iris.analysis.VARIANCE + "realization", iris.analysis.STD_DEV ) self.no_of_percentiles = len(self.cube.coord("realization").points) diff --git a/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToProbabilities.py b/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToProbabilities.py index 08e4cff2fd..a7c2740abd 100644 --- a/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToProbabilities.py +++ b/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToProbabilities.py @@ -119,7 +119,7 @@ def setUp(self): self.location_parameters = self.template_cube[0, :, :].copy() self.location_parameters.units = "Celsius" self.scale_parameters = self.template_cube[0, :, :].copy() - self.scale_parameters.units = "Celsius2" + self.scale_parameters.units = "Celsius" def test_compatible_units(self): """Pass in compatible cubes that should not raise an exception. No @@ -133,7 +133,7 @@ def test_convertible_units(self): """Pass in cubes with units that can be made equivalent by modification to match the threshold units.""" self.location_parameters.units = "Fahrenheit" - self.scale_parameters.units = "Fahrenheit2" + self.scale_parameters.units = "Fahrenheit" Plugin()._check_unit_compatibility( self.location_parameters, self.scale_parameters, self.template_cube ) @@ -163,7 +163,7 @@ def setUp(self): ) location_parameter_values = np.ones((3, 3)) * 2 - scale_parameter_values = np.ones((3, 3)) * 4 + scale_parameter_values = np.ones((3, 3)) * 2 self.expected = (np.ones((3, 3, 3)) * [0.75, 0.5, 0.25]).T self.location_parameter_values = self.template_cube[0, :, :].copy( data=location_parameter_values @@ -172,7 +172,7 @@ def setUp(self): self.scale_parameter_values = self.template_cube[0, :, :].copy( data=scale_parameter_values ) - self.scale_parameter_values.units = "Celsius2" + self.scale_parameter_values.units = "Celsius" def test_threshold_above_cube(self): """Test that the expected probabilities are returned for a cube in @@ -288,7 +288,7 @@ def setUp(self): self.scale_parameter_values = self.template_cube[0, :, :].copy( data=scale_parameter_values ) - self.scale_parameter_values.units = "Celsius2" + self.scale_parameter_values.units = "Celsius" def test_metadata_matches_template(self): """Test that the returned cube's metadata matches the template cube.""" From 55b5feb9215777c55ed7be456f80dff7e9a19bd8 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 17 Jan 2022 17:00:24 +0000 Subject: [PATCH 2/2] Update docstrings following review comments. --- .../ensemble_calibration.rst | 2 +- improver/calibration/ensemble_calibration.py | 5 ++-- .../ensemble_copula_coupling.py | 9 +++--- ...LocationAndScaleParametersToPercentiles.py | 30 +++++++++---------- ...cationAndScaleParametersToProbabilities.py | 2 +- 5 files changed, 24 insertions(+), 24 deletions(-) diff --git a/doc/source/extended_documentation/calibration/ensemble_calibration/ensemble_calibration.rst b/doc/source/extended_documentation/calibration/ensemble_calibration/ensemble_calibration.rst index 0e85fb2c53..292a7597a1 100644 --- a/doc/source/extended_documentation/calibration/ensemble_calibration/ensemble_calibration.rst +++ b/doc/source/extended_documentation/calibration/ensemble_calibration/ensemble_calibration.rst @@ -65,7 +65,7 @@ to lie within 0 and infinity, the distribution can be represented as: \mathcal{N^0}(\mu,\,\sigma^{2}) In this case, the :math:`\mu` is strictly interpreted as the location parameter -and :math:`\sigma^{2}` is strictly interpreted as the scale parameter. +and :math:`\sigma^{2}` is strictly interpreted as the square of the scale parameter. =============================== What is the location parameter? diff --git a/improver/calibration/ensemble_calibration.py b/improver/calibration/ensemble_calibration.py index c70536b94f..994a3f1939 100644 --- a/improver/calibration/ensemble_calibration.py +++ b/improver/calibration/ensemble_calibration.py @@ -1581,8 +1581,9 @@ def _calculate_scale_parameter(self) -> ndarray: ) # Calculating the scale parameter, based on the raw variance S^2, - # where predicted variance = c + dS^2, where c = (gamma)^2 and - # d = (delta)^2 + # where predicted scale parameter (or equivalently standard deviation + # for a normal distribution) = sqrt(c + dS^2), where c = (gamma)^2 and + # d = (delta)^2. scale_parameter = np.sqrt( self.coefficients_cubelist.extract_cube("emos_coefficient_gamma").data * self.coefficients_cubelist.extract_cube("emos_coefficient_gamma").data diff --git a/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index ef4428b4db..d8dfc463ef 100644 --- a/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -885,8 +885,9 @@ def _location_and_scale_parameters_to_percentiles( result[index, :] = percentile_method.ppf(percentile_list) # If percent point function (PPF) returns NaNs, fill in # mean instead of NaN values. NaN will only be generated if the - # variance is zero. Therefore, if the variance is zero, the mean - # value is used for all gridpoints with a NaN. + # scale parameter (standard deviation) is zero. Therefore, if the + # scale parameter (standard deviation) is zero, the mean value is + # used for all gridpoints with a NaN. if np.any(scale_data == 0): nan_index = np.argwhere(np.isnan(result[index, :])) result[index, nan_index] = location_data[nan_index] @@ -1061,8 +1062,8 @@ def _check_unit_compatibility( scale_parameter.convert_units(threshold_units) except ValueError as err: msg = ( - "Error: {} This is likely because the mean " - "variance and template cube threshold units are " + "Error: {} This is likely because the location parameter, " + "scale parameter and template cube threshold units are " "not equivalent/compatible.".format(err) ) raise ValueError(msg) diff --git a/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToPercentiles.py b/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToPercentiles.py index a51686882d..d370046428 100644 --- a/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToPercentiles.py +++ b/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToPercentiles.py @@ -104,8 +104,8 @@ def test_check_data(self): Test that the plugin returns an Iris.cube.Cube matching the expected data values when a cubes containing location and scale parameters are passed in, which are equivalent to the ensemble mean and ensemble - variance. The resulting data values are the percentiles, which have - been generated. + standard deviation. The resulting data values are the percentiles, which + have been generated. """ result = Plugin()._location_and_scale_parameters_to_percentiles( self.location_parameter, @@ -185,10 +185,10 @@ def test_simple_data_truncnorm_distribution(self): """ Test that the plugin returns an iris.cube.Cube matching the expected data values when cubes containing the location parameter and scale - parameter are passed in. In this test, the ensemble mean and variance - is used as a proxy for the location and scale parameter. The resulting - data values are the percentiles, which have been generated using a - truncated normal distribution. + parameter are passed in. In this test, the ensemble mean and standard + deviation is used as a proxy for the location and scale parameter. + The resulting data values are the percentiles, which have been + generated using a truncated normal distribution. """ data = np.array( [ @@ -225,7 +225,7 @@ def test_simple_data_truncnorm_distribution(self): "realization", iris.analysis.MEAN ) current_forecast_predictor.data = current_forecast_predictor.data + 1 - # Use an adjusted version of the ensemble variance as a proxy for the + # Use an adjusted version of the ensemble standard deviation as a proxy for the # scale parameter for the truncated normal distribution. current_forecast_stddev = self.temperature_cube.collapsed( "realization", iris.analysis.STD_DEV, @@ -249,7 +249,7 @@ def test_simple_data(self): """ Test that the plugin returns the expected values for the generated percentiles when an idealised set of data values between 1 and 3 - is used to create the mean (location parameter) and the variance + is used to create the mean (location parameter) and the standard deviation (scale parameter). """ data = np.array( @@ -415,7 +415,7 @@ def test_spot_forecasts_check_data(self): """ Test that the plugin returns an Iris.cube.Cube matching the expected data values when a cube containing mean (location parameter) and - variance (scale parameter) is passed in. The resulting data values are + standard deviation (scale parameter) is passed in. The resulting data values are the percentiles, which have been generated for a spot forecast. """ data = np.reshape(self.data, (3, 9)) @@ -453,9 +453,7 @@ def setUp(self): """Set up temperature cube.""" self.cube = set_up_variable_cube(ECC_TEMPERATURE_REALIZATIONS) self.forecast_predictor = self.cube.collapsed("realization", iris.analysis.MEAN) - self.forecast_variance = self.cube.collapsed( - "realization", iris.analysis.STD_DEV - ) + self.forecast_stddev = self.cube.collapsed("realization", iris.analysis.STD_DEV) self.no_of_percentiles = len(self.cube.coord("realization").points) @ManageWarnings( @@ -468,7 +466,7 @@ def test_basic(self): """Test that the plugin returns an Iris.cube.Cube.""" result = Plugin().process( self.forecast_predictor, - self.forecast_variance, + self.forecast_stddev, self.cube, no_of_percentiles=self.no_of_percentiles, ) @@ -507,7 +505,7 @@ def test_number_of_percentiles(self): result = Plugin().process( self.forecast_predictor, - self.forecast_variance, + self.forecast_stddev, self.cube, no_of_percentiles=self.no_of_percentiles, ) @@ -549,7 +547,7 @@ def test_list_of_percentiles(self): result = Plugin().process( self.forecast_predictor, - self.forecast_variance, + self.forecast_stddev, self.cube, percentiles=percentiles, ) @@ -574,7 +572,7 @@ def test_multiple_keyword_arguments_error(self): with self.assertRaisesRegex(ValueError, msg): Plugin().process( self.forecast_predictor, - self.forecast_variance, + self.forecast_stddev, self.cube, no_of_percentiles=self.no_of_percentiles, percentiles=percentiles, diff --git a/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToProbabilities.py b/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToProbabilities.py index a7c2740abd..39d41364a6 100644 --- a/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToProbabilities.py +++ b/improver_tests/ensemble_copula_coupling/test_ConvertLocationAndScaleParametersToProbabilities.py @@ -143,7 +143,7 @@ def test_incompatible_units(self): """Pass in cubes of incompatible units that should raise an exception.""" self.scale_parameters.units = "m s-1" - msg = "This is likely because the mean" + msg = "This is likely because the location" with self.assertRaisesRegex(ValueError, msg): Plugin()._check_unit_compatibility( self.location_parameters, self.scale_parameters, self.template_cube