diff --git a/ladybug/_datacollectionbase.py b/ladybug/_datacollectionbase.py index 8e7751d9..814d1f00 100644 --- a/ladybug/_datacollectionbase.py +++ b/ladybug/_datacollectionbase.py @@ -580,6 +580,17 @@ def _percentile(self, values, percent, key=lambda x: x): d1 = key(vals[int(c)]) * (k - f) return d0 + d1 + def _average(self, vals): + return sum(vals) / len(vals) + + def _total(self, vals): + return sum(vals) + + def _get_percentile_function(self, percentile): + def percentile_function(vals): + return self._percentile(vals, percentile) + return percentile_function + def __len__(self): return len(self._values) @@ -599,6 +610,11 @@ def __iter__(self): def __contains__(self, item): return item in self._values + @property + def is_continuous(self): + """Boolean denoting whether the data collection is continuous.""" + return False + @property def isDataCollection(self): """Return True.""" diff --git a/ladybug/datacollection.py b/ladybug/datacollection.py index 98a14089..881fe5a1 100644 --- a/ladybug/datacollection.py +++ b/ladybug/datacollection.py @@ -166,33 +166,11 @@ def group_by_day(self): def average_daily(self): """Return a daily collection of values averaged for each day.""" - data_dict = self.group_by_day() - avg_data, d_times = [], [] - for i in self.header.analysis_period.doys_int: - vals = data_dict[i] - if vals != []: - avg_data.append(sum(vals) / len(vals)) - d_times.append(i) - new_header = self.header.duplicate() - new_header.metadata['operation'] = 'average' - collection = DailyCollection(new_header, avg_data, d_times) - collection._validated_a_period = True - return collection + return self._time_interval_operation('daily', 'average') def total_daily(self): """Return a daily collection of values totaled over each day.""" - data_dict = self.group_by_day() - total_data, d_times = [], [] - for i in self.header.analysis_period.doys_int: - vals = data_dict[i] - if vals != []: - total_data.append(sum(vals)) - d_times.append(i) - new_header = self.header.duplicate() - new_header.metadata['operation'] = 'total' - collection = DailyCollection(new_header, total_data, d_times) - collection._validated_a_period = True - return collection + return self._time_interval_operation('daily', 'total') def percentile_daily(self, percentile): """Return a daily collection of values at the input percentile of each day. @@ -201,20 +179,7 @@ def percentile_daily(self, percentile): percentile: A float value from 0 to 100 representing the requested percentile. """ - assert 0 <= percentile <= 100, \ - 'percentile must be between 0 and 100. Got {}'.format(percentile) - data_dict = self.group_by_day() - per_data, d_times = [], [] - for i in self.header.analysis_period.doys_int: - vals = data_dict[i] - if vals != []: - per_data.append(self._percentile(vals, percentile)) - d_times.append(i) - new_header = self.header.duplicate() - new_header.metadata['operation'] = '{} percentile'.format(percentile) - collection = DailyCollection(new_header, per_data, d_times) - collection._validated_a_period = True - return collection + return self._time_interval_operation('daily', 'percentile', percentile) def group_by_month(self): """Return a dictionary of this collection's values grouped by each month. @@ -230,33 +195,11 @@ def group_by_month(self): def average_monthly(self): """Return a monthly collection of values averaged for each month.""" - data_dict = self.group_by_month() - avg_data, d_times = [], [] - for i in self.header.analysis_period.months_int: - vals = data_dict[i] - if vals != []: - avg_data.append(sum(vals)/len(vals)) - d_times.append(i) - new_header = self.header.duplicate() - new_header.metadata['operation'] = 'average' - collection = MonthlyCollection(new_header, avg_data, d_times) - collection._validated_a_period = True - return collection + return self._time_interval_operation('monthly', 'average') def total_monthly(self): """Return a monthly collection of values totaled over each month.""" - data_dict = self.group_by_month() - total_data, d_times = [], [] - for i in self.header.analysis_period.months_int: - vals = data_dict[i] - if vals != []: - total_data.append(sum(vals)) - d_times.append(i) - new_header = self.header.duplicate() - new_header.metadata['operation'] = 'total' - collection = MonthlyCollection(new_header, total_data, d_times) - collection._validated_a_period = True - return collection + return self._time_interval_operation('monthly', 'total') def percentile_monthly(self, percentile): """Return a monthly collection of values at the input percentile of each month. @@ -265,20 +208,7 @@ def percentile_monthly(self, percentile): percentile: A float value from 0 to 100 representing the requested percentile. """ - assert 0 <= percentile <= 100, \ - 'percentile must be between 0 and 100. Got {}'.format(percentile) - data_dict = self.group_by_month() - per_data, d_times = [], [] - for i in self.header.analysis_period.months_int: - vals = data_dict[i] - if vals != []: - per_data.append(self._percentile(vals, percentile)) - d_times.append(i) - new_header = self.header.duplicate() - new_header.metadata['operation'] = '{} percentile'.format(percentile) - collection = MonthlyCollection(new_header, per_data, d_times) - collection._validated_a_period = True - return collection + return self._time_interval_operation('monthly', 'percentile', percentile) def group_by_month_per_hour(self): """Return a dictionary of this collection's values grouped by each month per hour. @@ -298,33 +228,11 @@ def group_by_month_per_hour(self): def average_monthly_per_hour(self): """Return a monthly per hour data collection of average values.""" - data_dict = self.group_by_month_per_hour() - avg_data, d_times = [], [] - for i in self.header.analysis_period.months_per_hour: - vals = data_dict[i] - if vals != []: - avg_data.append(sum(vals)/len(vals)) - d_times.append(i) - new_header = self.header.duplicate() - new_header.metadata['operation'] = 'average' - collection = MonthlyPerHourCollection(new_header, avg_data, d_times) - collection._validated_a_period = True - return collection + return self._time_interval_operation('monthlyperhour', 'average') def total_monthly_per_hour(self): """Return a monthly per hour collection of totaled values.""" - data_dict = self.group_by_month_per_hour() - total_data, d_times = [], [] - for i in self.header.analysis_period.months_per_hour: - vals = data_dict[i] - if vals != []: - total_data.append(sum(vals)) - d_times.append(i) - new_header = self.header.duplicate() - new_header.metadata['operation'] = 'total' - collection = MonthlyPerHourCollection(new_header, total_data, d_times) - collection._validated_a_period = True - return collection + return self._time_interval_operation('monthlyperhour', 'total') def percentile_monthly_per_hour(self, percentile): """Return a monthly per hour collection of values at the input percentile. @@ -333,20 +241,7 @@ def percentile_monthly_per_hour(self, percentile): percentile: A float value from 0 to 100 representing the requested percentile. """ - assert 0 <= percentile <= 100, \ - 'percentile must be between 0 and 100. Got {}'.format(percentile) - data_dict = self.group_by_month_per_hour() - total_data, d_times = [], [] - for i in self.header.analysis_period.months_per_hour: - vals = data_dict[i] - if vals != []: - total_data.append(self._percentile(vals, percentile)) - d_times.append(i) - new_header = self.header.duplicate() - new_header.metadata['operation'] = '{} percentile'.format(percentile) - collection = MonthlyPerHourCollection(new_header, total_data, d_times) - collection._validated_a_period = True - return collection + return self._time_interval_operation('monthlyperhour', 'percentile', percentile) def interpolate_holes(self): """Linearly interpolate over holes in this collection to make it continuous. @@ -550,6 +445,54 @@ def _check_analysis_period(self, analysis_period): 'Collection header. {} != {}'.format( analysis_period.is_leap_year, self.header.analysis_period.is_leap_year) + def _time_interval_operation(self, interval, operation, percentile=0): + """Get a collection of a certain time interval with a given math operation.""" + # retrive the function that correctly describes the operation + if operation == 'average': + funct = self._average + elif operation == 'total': + funct = self._total + else: + assert 0 <= percentile <= 100, \ + 'percentile must be between 0 and 100. Got {}'.format(percentile) + funct = self._get_percentile_function(percentile) + + # retrive the data that correctly describes the time interval + if interval == 'monthly': + data_dict = self.group_by_month() + dates = self.header.analysis_period.months_int + elif interval == 'daily': + data_dict = self.group_by_day() + dates = self.header.analysis_period.doys_int + elif interval == 'monthlyperhour': + data_dict = self.group_by_month_per_hour() + dates = self.header.analysis_period.months_per_hour + else: + raise ValueError('Invalid input value for interval: {}'.format(interval)) + # get the data and header for the new collection + new_data, d_times = [], [] + for i in dates: + vals = data_dict[i] + if vals != []: + new_data.append(funct(vals)) + d_times.append(i) + new_header = self.header.duplicate() + if operation == 'percentile': + new_header.metadata['operation'] = '{} percentile'.format(percentile) + else: + new_header.metadata['operation'] = operation + + # build the final data collection + if interval == 'monthly': + collection = MonthlyCollection(new_header, new_data, d_times) + elif interval == 'daily': + collection = DailyCollection(new_header, new_data, d_times) + elif interval == 'monthlyperhour': + collection = MonthlyPerHourCollection(new_header, new_data, d_times) + + collection._validated_a_period = True + return collection + @property def isHourly(self): return True @@ -967,6 +910,11 @@ def _get_analysis_period_subset(self, a_per): else: return AnalysisPeriod(*n_ap) + @property + def is_continuous(self): + """Boolean denoting whether the data collection is continuous.""" + return True + @property def isContinuous(self): return True @@ -1041,6 +989,36 @@ def filter_by_doys(self, doys): _filt_header = self.header.duplicate() return DailyCollection(_filt_header, _filt_values, _filt_datetimes) + def group_by_month(self): + """Return a dictionary of this collection's values grouped by each month. + + Key values are between 1-12. + """ + data_by_month = OrderedDict() + for d in xrange(1, 13): + data_by_month[d] = [] + for v, doy in zip(self._values, self.datetimes): + dt = DateTime.from_hoy(doy * 24) + data_by_month[dt.month].append(v) + return data_by_month + + def average_monthly(self): + """Return a monthly collection of values averaged for each month.""" + return self._monthly_operation('average') + + def total_monthly(self): + """Return a monthly collection of values totaled over each month.""" + return self._monthly_operation('total') + + def percentile_monthly(self, percentile): + """Return a monthly collection of values at the input percentile of each month. + + Args: + percentile: A float value from 0 to 100 representing the + requested percentile. + """ + return self._monthly_operation('percentile', percentile) + def validate_analysis_period(self): """Get a collection where the header analysis_period aligns with datetimes. @@ -1111,6 +1089,46 @@ def _check_analysis_period(self, analysis_period): 'Collection header. {} != {}'.format( analysis_period.is_leap_year, self.header.analysis_period.is_leap_year) + def _monthly_operation(self, operation, percentile=0): + """Get a MonthlyCollection given a certain operation.""" + # Retrive the correct operation. + if operation == 'average': + funct = self._average + elif operation == 'total': + funct = self._total + else: + assert 0 <= percentile <= 100, \ + 'percentile must be between 0 and 100. Got {}'.format(percentile) + funct = self._get_percentile_function(percentile) + + # Get the data for the new collection + data_dict = self.group_by_month() + new_data, d_times = [], [] + for i in self.header.analysis_period.months_int: + vals = data_dict[i] + if vals != []: + new_data.append(funct(vals)) + d_times.append(i) + + # build the new monthly collection + new_header = self.header.duplicate() + if operation == 'percentile': + new_header.metadata['operation'] = '{} percentile'.format(percentile) + else: + new_header.metadata['operation'] = operation + collection = MonthlyCollection(new_header, new_data, d_times) + collection._validated_a_period = True + return collection + + @property + def is_continuous(self): + """Boolean denoting whether the data collection is continuous.""" + if self._validated_a_period is True and \ + len(self.values) == len(self.header.analysis_period.doys_int): + return True + else: + return False + @property def isDaily(self): return True @@ -1233,6 +1251,15 @@ def validate_analysis_period(self): new_coll._validated_a_period = True return new_coll + @property + def is_continuous(self): + """Boolean denoting whether the data collection is continuous.""" + if self._validated_a_period is True and \ + len(self.values) == len(self.header.analysis_period.months_int): + return True + else: + return False + @property def isMonthly(self): return True @@ -1369,6 +1396,16 @@ def validate_analysis_period(self): new_coll._validated_a_period = True return new_coll + @property + def is_continuous(self): + """Boolean denoting whether the data collection is continuous.""" + a_per = self.header.analysis_period + if self._validated_a_period is True and a_per.st_hour == 0 and a_per.end_hour \ + == 23 and len(self.values) == len(a_per.months_per_hour): + return True + else: + return False + @property def isMonthlyPerHour(self): return True diff --git a/ladybug/datatype/base.py b/ladybug/datatype/base.py index 944046d2..c9811a65 100644 --- a/ladybug/datatype/base.py +++ b/ladybug/datatype/base.py @@ -28,9 +28,10 @@ class DataTypeBase(object): (eg. 'UTCI' for Universal Thermal Climate Index). This can also be a letter that represents the data type in a formula. (eg. 'A' for Area; 'P' for Pressure) - unit_descr: An optional description of the units if numerical values - of these units relate to specific categories. - (eg. -1 = Cold, 0 = Neutral, +1 = Hot) (eg. 0 = False, 1 = True) + unit_descr: An optional dictionary describing categories that the numerical + values of the units relate to. For example: + {-1: 'Cold', 0: 'Neutral', +1: 'Hot'} + {0: 'False', 1: 'True'} point_in_time: Boolean to note whether the data type represents conditions at a single instant in time (True) as opposed to being an average or accumulation over time (False) when it is found in hourly lists of data. @@ -57,7 +58,7 @@ class DataTypeBase(object): _max = float('+inf') _abbreviation = '' - _unit_descr = '' + _unit_descr = None _point_in_time = True _cumulative = False diff --git a/ladybug/datatype/energyflux.py b/ladybug/datatype/energyflux.py index bdae0b88..be2d136b 100644 --- a/ladybug/datatype/energyflux.py +++ b/ladybug/datatype/energyflux.py @@ -72,8 +72,12 @@ def isEnergyFlux(self): class MetabolicRate(EnergyFlux): + _min = 0 _abbreviation = 'MetR' - _unit_descr = '1 = Seated, \n1.2 = Standing, \n2 = Walking' + + +class EffectiveRadiantField(EnergyFlux): + _abbreviation = 'ERF' class Irradiance(EnergyFlux): diff --git a/ladybug/datatype/percentage.py b/ladybug/datatype/percentage.py index 7410c868..11c7e28e 100644 --- a/ladybug/datatype/percentage.py +++ b/ladybug/datatype/percentage.py @@ -60,13 +60,6 @@ class PercentagePeopleDissatisfied(Percentage): _abbreviation = 'PPD' -class ThermalComfort(Percentage): - _min = 0 - _max = 100 - _abbreviation = 'TC' - _unit_descr = '1 = comfortable, 0 = uncomfortable' - - class RelativeHumidity(Percentage): _min = 0 _abbreviation = 'RH' diff --git a/ladybug/datatype/rvalue.py b/ladybug/datatype/rvalue.py index 07519690..ee67f534 100644 --- a/ladybug/datatype/rvalue.py +++ b/ladybug/datatype/rvalue.py @@ -51,4 +51,4 @@ def isRValue(self): class ClothingInsulation(RValue): _abbreviation = 'Rclo' - _unit_descr = '0 = No Clothing, \n0.5 = T-shirt + Shorts, \n1 = 3-piece Suit' + _unit_descr = {0: 'No Clothing', 0.5: 'T-shirt + Shorts', 1: '3-piece Suit'} diff --git a/ladybug/datatype/thermalcondition.py b/ladybug/datatype/thermalcondition.py index babb2bd5..ae861443 100644 --- a/ladybug/datatype/thermalcondition.py +++ b/ladybug/datatype/thermalcondition.py @@ -10,8 +10,10 @@ class ThermalCondition(DataTypeBase): _units = ('condition', 'PMV') _si_units = ('condition', 'PMV') _ip_units = ('condition', 'PMV') + _min = -1 + _max = 1 _abbreviation = 'Tcond' - _unit_descr = '-1 = Cold, 0 = Neutral, +1 = Hot' + _unit_descr = {-1: 'Cold', 0: 'Neutral', 1: 'Hot'} def _condition_to_PMV(self, value): return value @@ -38,62 +40,71 @@ def isThermalCondition(self): class PredictedMeanVote(ThermalCondition): + _min = float('-inf') + _max = float('+inf') _abbreviation = 'PMV' - _unit_descr = '-3 = Cold, -2 = Cool, -1 = Slightly Cool, \n' \ - '0 = Neutral, \n' \ - '+1 = Slightly Warm, +2 = Warm, +3 = Hot' + _unit_descr = {-3: 'Cold', -2: 'Cool', -1: 'Slightly Cool', 0: 'Neutral', + 1: 'Slightly Warm', 2: 'Warm', 3: 'Hot'} + + +class ThermalComfort(ThermalCondition): + _min = 0 + _max = 1 + _abbreviation = 'TC' + _unit_descr = {1: 'Comfortable', 0: 'Uncomfortable'} class DiscomfortReason(ThermalCondition): + _min = -2 + _max = 2 _abbreviation = 'RDiscomf' - _unit_descr = '-2 = Too Dry, -1 = Too Cold, \n' \ - '0 = Comfortable, \n' \ - '+1 = Too Hot, +2 = Too Humid' + _unit_descr = {-2: 'Too Dry', -1: 'Too Cold', 0: 'Comfortable', + 1: 'Too Hot', 2: 'Too Humid'} class ThermalConditionFivePoint(ThermalCondition): + _min = -2 + _max = 2 _abbreviation = 'Tcond-5' - _unit_descr = '-2 = Strong/Extreme Cold, -1 = Moderate Cold, \n' \ - '0 = No Thermal Stress, \n' \ - '+1 = Moderate Heat, +2 = Strong/Extreme Heat' + _unit_descr = {-2: 'Strong/Extreme Cold', -1: 'Moderate Cold', + 0: 'No Thermal Stress', 1: 'Moderate Heat', 2: 'Strong/Extreme Heat'} class ThermalConditionSevenPoint(ThermalCondition): + _min = -3 + _max = 3 _abbreviation = 'Tcond-7' - _unit_descr = '-3 = Very Strong/Extreme Cold, ' \ - '-2 = Strong Cold, -1 = Moderate Cold, \n' \ - '0 = No Thermal Stress, \n' \ - '+1 = Moderate Heat, +2 = Strong Heat, ' \ - '+3 = Very Strong/Extreme Heat' + _unit_descr = {-3: 'Very Strong/Extreme Cold', -2: 'Strong Cold', + -1: 'Moderate Cold', 0: 'No Thermal Stress', 1: 'Moderate Heat', + 2: 'Strong Heat', 3: 'Very Strong/Extreme Heat'} class ThermalConditionNinePoint(ThermalCondition): + _min = -4 + _max = 4 _abbreviation = 'Tcond-9' - _unit_descr = '-4 = Very Strong/Extreme Cold, ' \ - '-3 = Strong Cold, -2 = Moderate Cold, -1 = Slight Cold, \n' \ - '0 = No Thermal Stress, \n' \ - '+1 = Slight Heat, +2 = Moderate Heat, +3 = Strong Heat, '\ - '+4 = Very Strong/Extreme Heat' + _unit_descr = {-4: 'Very Strong/Extreme Cold', -3: 'Strong Cold', + -2: 'Moderate Cold', -1: 'Slight Cold', 0: 'No Thermal Stress', + 1: 'Slight Heat', 2: 'Moderate Heat', 3: 'Strong Heat', + 4: 'Very Strong/Extreme Heat'} class ThermalConditionElevenPoint(ThermalCondition): + _min = -5 + _max = 5 _abbreviation = 'Tcond-11' - _unit_descr = '-5 = Extreme Cold, -4 = Very Strong Cold, ' \ - '-3 = Strong Cold, -2 = Moderate Cold, -1 = Slight Cold, \n' \ - '0 = No Thermal Stress, \n' \ - '+1 = Slight Heat, +2 = Moderate Heat, +3 = Strong Heat, ' \ - '+4 = Very Strong Heat, +5 = Extreme Heat' + _unit_descr = {-5: 'Extreme Cold', -4: 'Very Strong Cold', -3: 'Strong Cold', + -2: 'Moderate Cold', -1: 'Slight Cold', 0: 'No Thermal Stress', + 1: 'Slight Heat', 2: 'Moderate Heat', 3: 'Strong Heat', + 4: 'Very Strong Heat', 5: 'Extreme Heat'} class UTCICategory(ThermalCondition): + _min = 0 + _max = 9 _abbreviation = 'UTCIcond' - _unit_descr = '0 = extreme cold stress' \ - '1 = very strong cold stress' \ - '2 = strong cold stress' \ - '3 = moderate cold stress' \ - '4 = slight cold stress' \ - '5 = no thermal stress' \ - '6 = moderate heat stress' \ - '7 = strong heat stress' \ - '8 = strong heat stress' \ - '9 = extreme heat stress' + _unit_descr = {0: 'Extreme Cold Stress', 1: 'Very Strong Cold Stress', + 2: 'Strong Cold Stress', 3: 'Moderate Cold Stress', + 4: 'Slight Cold Stress', 5: 'No Thermal Stress', + 6: 'Moderate Heat Stress', 7: 'Strong Heat Stress', + 8: 'Strong Heat Stress', 9: 'Extreme Heat Stress'} diff --git a/tests/datacollection_test.py b/tests/datacollection_test.py index 8e2b06d0..6af419d9 100644 --- a/tests/datacollection_test.py +++ b/tests/datacollection_test.py @@ -71,6 +71,7 @@ def test_init_hourly(self): assert dc1.datetimes == (dt1, dt2) assert dc1.values == (v1, v2) assert dc1.average == avg + assert dc1.is_continuous is False str(dc1) # Test the string representation of the collection str(dc1.header) # Test the string representation of the header @@ -86,6 +87,7 @@ def test_init_daily(self): assert dc1.datetimes == tuple(a_per.doys_int) assert dc1.values == (v1, v2) assert dc1.average == avg + assert dc1.is_continuous is False str(dc1) # Test the string representation of the collection str(dc1.header) # Test the string representation of the header @@ -101,6 +103,7 @@ def test_init_monthly(self): assert dc1.datetimes == tuple(a_per.months_int) assert dc1.values == (v1, v2) assert dc1.average == avg + assert dc1.is_continuous is False str(dc1) # Test the string representation of the collection str(dc1.header) # Test the string representation of the header @@ -116,6 +119,7 @@ def test_init_monthly_per_hour(self): assert dc1.datetimes == tuple(a_per.months_per_hour) assert dc1.values == tuple(vals) assert dc1.average == avg + assert dc1.is_continuous is False str(dc1) # Test the string representation of the collection str(dc1.header) # Test the string representation of the header @@ -129,6 +133,7 @@ def test_init_continuous(self): assert len(dc1.datetimes) == 8760 assert list(dc1.values) == list(xrange(8760)) assert dc1.average == 4379.5 + assert dc1.is_continuous is True str(dc1) # Test the string representation of the collection str(dc1.header) # Test the string representation of the header @@ -708,6 +713,7 @@ def test_average_daily(self): assert len(new_dc) == 365 assert new_dc.datetimes[0] == 1 assert new_dc.datetimes[-1] == 365 + assert new_dc.is_continuous is True for i, val in dc.group_by_day().items(): assert new_dc[i - 1] == sum(val) / len(val) @@ -721,6 +727,7 @@ def test_total_daily(self): assert len(new_dc) == 365 assert new_dc.datetimes[0] == 1 assert new_dc.datetimes[-1] == 365 + assert new_dc.is_continuous is True for i, val in dc.group_by_day().items(): assert new_dc[i - 1] == sum(val) @@ -734,6 +741,7 @@ def test_percentile_daily(self): assert len(new_dc) == 365 assert new_dc.datetimes[0] == 1 assert new_dc.datetimes[-1] == 365 + assert new_dc.is_continuous is True for i, val in dc.group_by_day().items(): assert new_dc[i - 1] == 5.75 @@ -747,6 +755,7 @@ def test_average_monthly(self): assert len(new_dc) == 12 assert new_dc.datetimes[0] == 1 assert new_dc.datetimes[-1] == 12 + assert new_dc.is_continuous is True for i, val in dc.group_by_month().items(): assert new_dc[i - 1] == sum(val) / len(val) @@ -760,6 +769,7 @@ def test_total_monthly(self): assert len(new_dc) == 12 assert new_dc.datetimes[0] == 1 assert new_dc.datetimes[-1] == 12 + assert new_dc.is_continuous is True for i, val in dc.group_by_month().items(): assert new_dc[i - 1] == sum(val) @@ -773,9 +783,47 @@ def test_percentile_monthly(self): assert len(new_dc) == 12 assert new_dc.datetimes[0] == 1 assert new_dc.datetimes[-1] == 12 + assert new_dc.is_continuous is True for i, val in dc.group_by_month().items(): assert new_dc[i - 1] == 50 + def test_average_monthly_on_daily_collection(self): + """Test the average monthly method.""" + header = Header(Temperature(), 'C', AnalysisPeriod()) + values = list(xrange(365)) + dc = DailyCollection(header, values, values) + new_dc = dc.average_monthly() + assert isinstance(new_dc, MonthlyCollection) + assert len(new_dc) == 12 + assert new_dc.datetimes[0] == 1 + assert new_dc.datetimes[-1] == 12 + for i, val in dc.group_by_month().items(): + assert new_dc[i - 1] == sum(val) / len(val) + + def test_total_monthly_on_daily_collection(self): + """Test the total monthly method.""" + header = Header(Temperature(), 'C', AnalysisPeriod()) + values = list(xrange(365)) + dc = DailyCollection(header, values, values) + new_dc = dc.total_monthly() + assert isinstance(new_dc, MonthlyCollection) + assert len(new_dc) == 12 + assert new_dc.datetimes[0] == 1 + assert new_dc.datetimes[-1] == 12 + for i, val in dc.group_by_month().items(): + assert new_dc[i - 1] == sum(val) + + def test_percentile_monthly_on_daily_collection(self): + """Test the percentile monthly method.""" + header = Header(Temperature(), 'C', AnalysisPeriod()) + values = list(xrange(365)) + dc = DailyCollection(header, values, values) + new_dc = dc.percentile_monthly(25) + assert isinstance(new_dc, MonthlyCollection) + assert len(new_dc) == 12 + assert new_dc.datetimes[0] == 1 + assert new_dc.datetimes[-1] == 12 + def test_average_monthly_per_hour(self): """Test the average monthly per hour method.""" header = Header(Temperature(), 'C', AnalysisPeriod()) @@ -786,6 +834,7 @@ def test_average_monthly_per_hour(self): assert len(new_dc) == 12 * 24 assert new_dc.datetimes[0] == (1, 0) assert new_dc.datetimes[-1] == (12, 23) + assert new_dc.is_continuous is True for i, val in enumerate(dc.group_by_month_per_hour().values()): assert new_dc[i] == sum(val) / len(val) @@ -799,6 +848,7 @@ def test_total_monthly_per_hour(self): assert len(new_dc) == 12 * 24 assert new_dc.datetimes[0] == (1, 0) assert new_dc.datetimes[-1] == (12, 23) + assert new_dc.is_continuous is True for i, val in enumerate(dc.group_by_month_per_hour().values()): assert new_dc[i] == sum(val) @@ -812,6 +862,7 @@ def test_percentile_monthly_per_hour(self): assert len(new_dc) == 12 * 24 assert new_dc.datetimes[0] == (1, 0) assert new_dc.datetimes[-1] == (12, 23) + assert new_dc.is_continuous is True pct_vals = list(xrange(24)) * 12 for i, val in enumerate(pct_vals): assert new_dc[i] == val