Skip to content

Commit

Permalink
rename of module
Browse files Browse the repository at this point in the history
  • Loading branch information
RyanAugust committed Jun 25, 2023
1 parent dd76cc1 commit a12f070
Show file tree
Hide file tree
Showing 12 changed files with 56 additions and 49 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ jobs:
python -m pip install --upgrade pip
pip install flake8
- name: Analysing the code with flake8
run: flake8 CheetahPyAnalytics/ --indent-size=4 --select=F,E112,E113,E203,E304,E502,E702,E703,E71,E72,E731,W191,W6 --statistics -j4
run: flake8 fitlitics/ --indent-size=4 --select=F,E112,E113,E203,E304,E502,E702,E703,E71,E72,E731,W191,W6 --statistics -j4

build:
name: build & test
Expand Down
7 changes: 7 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,11 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0 # this is optional, use `pre-commit autoupdate` to get the latest rev!
hooks:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace

- repo: local
hooks:
- id: flake8
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
# CheetahPyAnalytics
# fitlitics
Python Analytics engine for interacting with both 1p and opendata from Golden Cheetah
4 changes: 2 additions & 2 deletions environment.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
name: CheetahPyAnalytics
name: fitlitics
dependencies:
- python=3.10
- pip
- requests
- numpy
- pip:
- git+https://github.com/RyanAugust/CheetahPy.git
- git+https://github.com/RyanAugust/CheetahPy.git
Original file line number Diff line number Diff line change
Expand Up @@ -10,31 +10,31 @@

class fetch_new_dataset:
def __init__(self):
self.metrics_list = ['Duration', 'TSS', 'Average_Heart_Rate', 'Max_Heartrate',
'Average_Power', 'Athlete_Weight', 'Estimated_VO2MAX', '10_sec_Peak_Pace_Swim', 'xPace',
'Pace', 'IsoPower', 'Power_Index', 'L1_Time_in_Zone', 'L2_Time_in_Zone', 'L3_Time_in_Zone',
self.metrics_list = ['Duration', 'TSS', 'Average_Heart_Rate', 'Max_Heartrate',
'Average_Power', 'Athlete_Weight', 'Estimated_VO2MAX', '10_sec_Peak_Pace_Swim', 'xPace',
'Pace', 'IsoPower', 'Power_Index', 'L1_Time_in_Zone', 'L2_Time_in_Zone', 'L3_Time_in_Zone',
'L4_Time_in_Zone', 'L5_Time_in_Zone', 'L6_Time_in_Zone', 'L7_Time_in_Zone']
self.metadata_list = ['VO2max_detected', 'Shoes', 'Workout_Code', 'Workout_Title',
self.metadata_list = ['VO2max_detected', 'Shoes', 'Workout_Code', 'Workout_Title',
'Indoor', 'Frame', 'Sport']

def build_gc_request(self): ## TODO: rebuild using CheetahPy
base_api_endpoint = 'http://localhost:12021/Ryan%20Duecker?metrics={metrics_fields}&metadata={metadata_fields}'
fmted_endpoint = base_api_endpoint.format(metrics_fields=','.join(self.metrics_list),
metadata_fields=','.join(self.metadata_list))
return fmted_endpoint

def build_new_dataset(self):
data_original = pd.read_csv(
self.build_gc_request()
)
data_original.columns = [x.strip(' "') for x in data_original.columns]

data_original['date'] = pd.to_datetime(data_original['date'])
data_original['VO2max_detected'] = data_original['VO2max_detected'].astype(float)

# force lower case column names
data_original.rename(columns=str.lower, inplace=True)

self.save_dataframe(data_original, name='gc_activitydata_local')

## Set list of activities from earlier filtered call
Expand Down Expand Up @@ -67,7 +67,7 @@ def save_dataframe(self, df:pd.DataFrame, name:str, dir:str='./', index_save_sta
print(f'{name} saved')

def extract_activity_data(self, filename:str):
## Load gc api module to access individual activities
## Load gc api module to access individual activities
ac = CheetahPy.get_activity(athlete="Ryan Duecker",
activity_filename=filename)
var_Ti = np.where(ac['temp'].mean() < -20, 20, ac['temp'].mean())
Expand Down Expand Up @@ -125,22 +125,22 @@ def load_dataset(self, filepath):
def load_local_activity_store(self, filepath):
self.activity_data = self.load_dataset(filepath)
self.activity_data['date'] = pd.to_datetime(self.activity_data['date'])

def load_local_activity_model_params(self, filepath):
self.modeled_data = self.load_dataset(filepath)

def power_index_maker(self, power, duration, cp=340, w_prime=15000, pmax=448):
theoretical_power = w_prime/duration - w_prime/(cp-pmax) + cp
power_index = (power/theoretical_power)*100
return power_index

def _filter_absent_data(self):
for column in ['sport','pace','average_power','average_heart_rate']:
if column not in self.activity_data.columns:
self.activity_data[column] = np.nan
self.activity_data = self.activity_data[~(((self.activity_data['sport'] == 'Run')
self.activity_data = self.activity_data[~(((self.activity_data['sport'] == 'Run')
& (self.activity_data['pace'] <= 0))
| ((self.activity_data['sport'] == 'Bike')
| ((self.activity_data['sport'] == 'Bike')
& (self.activity_data['average_power'] <= 0))
| (self.activity_data['average_heart_rate'] <= 0))].copy()
return 0
Expand All @@ -151,7 +151,7 @@ def _reframe_data_tss(self):
self.activity_data['day_tss'] = self.activity_data['tss'].groupby(
self.activity_data['workoutDate']).transform('sum').fillna(0)
return 0

def _prune_relative_to_performance_metric(self, performance_lower_bound):
# self.activity_data['performance_metric'] = np.where(self.activity_data['Duration'] < 60*60, 0, self.activity_data['performance_metric'])
self.activity_data['performance_metric'] = np.where(
Expand All @@ -161,7 +161,7 @@ def _prune_relative_to_performance_metric(self, performance_lower_bound):
self.activity_data['performance_metric'] = self.activity_data['performance_metric'].replace(0,np.nan)
self.activity_data['performance_metric'] = self.activity_data['performance_metric'].fillna(method='ffill')
return 0

def impute_dates(self, fill_performance_forward=False):
self.processed_activity_data.reset_index(inplace=True)

Expand All @@ -176,7 +176,7 @@ def impute_dates(self, fill_performance_forward=False):
print(f'{exc} occured. Running deduplication and retrying')
self.processed_activity_data = self.processed_activity_data[~self.processed_activity_data.index.duplicated()]
self.processed_activity_data = self.processed_activity_data.reindex(missing_dates, fill_value=0)

# drop extra (incomplete) date col
self.processed_activity_data = self.processed_activity_data[['load_metric','performance_metric']]

Expand All @@ -188,7 +188,7 @@ def impute_dates(self, fill_performance_forward=False):
return 0


def pre_process(self, load_metric:str, performance_metric:str, performance_lower_bound:float=0.0,
def pre_process(self, load_metric:str, performance_metric:str, performance_lower_bound:float=0.0,
sport:bool=False, filter_sport:list=[], fill_performance_forward:bool=True) -> str:
metric_funcs = metric_functions()
self._filter_absent_data()
Expand All @@ -203,7 +203,7 @@ def pre_process(self, load_metric:str, performance_metric:str, performance_lower
athlete_statics = self.athlete_statics)
else:
self.activity_data.rename(columns={load_metric:'load_metric'})

## Use identified fxn to create performace metric for activity row
if performance_metric not in self.activity_data.columns:
self.activity_data['performance_metric'] = metric_funcs.activity_summary_metric(
Expand All @@ -222,7 +222,7 @@ def pre_process(self, load_metric:str, performance_metric:str, performance_lower
if sport:
groupby_list.append('sport')
self.processed_activity_data = self.activity_data.groupby(groupby_list).agg(agg_dict)

# Impute missing dates to create daily values + handle performance data
self.impute_dates(fill_performance_forward=fill_performance_forward)

Expand Down
6 changes: 3 additions & 3 deletions CheetahPyAnalytics/__init__.py → fitlitics/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
__title__ = 'CheetahPyAnalytics'
__title__ = 'fitlitics'
__version__ = '0.2.3'
__author__ = 'RyanAugust'
__license__ = 'MIT'
__copyright__ = 'Copyright 2023'

from .cheetahpyanalytics import (
from .CheetahPyAnalytics import (
fetch_new_dataset,
dataset_preprocess
)
Expand All @@ -27,4 +27,4 @@
'model_classic_pmc'
]

metric_funcs = metric_functions()
metric_funcs = metric_functions()
18 changes: 9 additions & 9 deletions CheetahPyAnalytics/functions.py → fitlitics/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,16 +38,16 @@ def __init__(self):
'VO2': self._s_calc_vo2,
'TSS': self._s_coggan_tss
}

def activity_metric(
self, frame: pd.DataFrame, metric_name: str,
self, frame: pd.DataFrame, metric_name: str,
athlete_statics = None, **kwargs) -> float:
metric_function = self.activity_metric_function_map[metric_name]
values = metric_function(frame=frame, athlete_statics=athlete_statics, **kwargs)
return values

def activity_summary_metric(
self, frame: pd.DataFrame, metric_name: str,
self, frame: pd.DataFrame, metric_name: str,
athlete_statics = None, **kwargs) -> pd.Series:
metric_function = self.activity_summary_metric_function_map[metric_name]
values = metric_function(frame=frame, athlete_statics=athlete_statics, **kwargs)
Expand Down Expand Up @@ -82,7 +82,7 @@ def _s_intensity_factor_power(self, frame: pd.DataFrame, athlete_statics) -> pd.
return values

def _a_normalized_power(self, frame:pd.DataFrame, athlete_statics: athlete) -> float:
"""Takes input of an activty with power data and FTP setting and
"""Takes input of an activty with power data and FTP setting and
returns the Normalized Power value"""
_30sr_p = frame['power'].rolling(window=30, min_periods=1)
value = ((_30sr_p**4).mean()**(1/4)).value
Expand All @@ -95,7 +95,7 @@ def _s_coggan_tss(self, frame:pd.DataFrame, athlete_statics: athlete) -> pd.Seri
if 'functional_threshold_power' not in frame.columns:
assert athlete_statics.bike_functional_threshold_power is not None, "Requires FTP input in dataframe or as parameter"
frame['functional_threshold_power'] = athlete_statics.bike_functional_threshold_power

values = ((frame['normalized_power']*frame['intensity_factor']
*frame['duration'])/(frame['functional_threshold_power']*3600))*100
return values
Expand All @@ -111,7 +111,7 @@ def _a_coggan_tss(self, frame:pd.DataFrame, athlete_statics) -> float:

_tss = self._s_coggan_tss(frame=activity_summary, athlete_statics=athlete_statics)
return _tss

def _a_calc_vo2(
self, frame: pd.DataFrame, athlete_statics,
sport: str) -> float:
Expand All @@ -128,7 +128,7 @@ def _a_calc_vo2(
return value

def _s_calc_vo2(self, frame: pd.DataFrame, athlete_statics) -> float:
"""Takes input of an activity summary with power, heart rate, and sport data
"""Takes input of an activity summary with power, heart rate, and sport data
and returns estimated VO2max values"""
param_data = {
'resting_hr':athlete_statics.resting_heart_rate,
Expand Down Expand Up @@ -163,7 +163,7 @@ def _a_max_Xmin_power(
self, frame: pd.DataFrame, power_duration: int) -> float:
value = frame['power'].rolling(window=power_duration).mean().max()
return value

def _a_hr_at_power(
self, frame: pd.DataFrame, find_power_level: int, power_duration:int=300,
tol:float=5) -> float:
Expand All @@ -190,7 +190,7 @@ def _a_ae_ef(self, frame: pd.DataFrame) -> float:
# def modeled_aerobic_threshold_power(self, row):
# temp = 20
# duration = 60*60

# if (row['a'] != 0) & (row['Duration'] > 999):
# power = row['a'] + row['b'] * self.athlete_statics['threshold_hr'] + row['c'] * duration * temp
# return power
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,15 @@ class model_classic_pmc:
"""Implementation of classic Performance Management Chart logic.
Takes the input of load_data and cronic/accute lookback day settings
to calculate the running CTL and ATL metrics for an athlete
Standard lookback settings are CTL:42 and ATL:7
Standard lookback settings are CTL:42 and ATL:7
However, these should be further customized to each athlete as soon as performance
data is available"""
def __init__(self, load_data: pd.Series, ctl_days: int = 42, atl_days: int = 7):
self.athlete_loadperf = athlete_loadperf(load_data=load_data)
self.ctl_days = ctl_days
self.atl_days = atl_days

def _calculate_ema(self, raw_load: pd.Series, k_days: float):
# current = (1-k) * Yesterday’s CTL + k * Today’s TSS
ema = raw_load.ewm(com=k_days)
Expand All @@ -45,10 +45,10 @@ def _calculate_training_load_vectors(self):
ctl = self._calculate_ema(raw_load=self.athlete_loadperf.load_data, k_days=self.ctl_days).mean()
atl = self._calculate_ema(raw_load=self.athlete_loadperf.load_data, k_days=self.atl_days).mean()
self.ath_pmc = athlete_pmc(ctl=ctl, atl=atl)

def fit(self):
assert "This model does not require fitting of performance data to load data"

def run(self):
self._calculate_training_load_vectors()
return self.ath_pmc
return self.ath_pmc
6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ requires = ["setuptools"]
build-backend = "setuptools.build_meta"

[project]
name = "cheetahpyanalytics"
name = "fitlitics"
description = "Python Analytics engine for interacting with both 1p and opendata from Golden Cheetah"
license = {file = "LICENSE"}
authors = [
Expand All @@ -19,11 +19,11 @@ dependencies = [
"cheetahpy @ git+https://github.com/RyanAugust/CheetahPy.git"
]
[tool.setuptools.dynamic]
version = {attr = "cheetahpyanalytics.__version__"}
version = {attr = "fitlitics.__version__"}
readme = {file = ["README.md"]}

[tool.setuptools]
packages = ["cheetahpyanalytics"]
packages = ["fitlitics"]

[project.optional-dependencies]
testing = ["pytest"]
Expand Down
4 changes: 2 additions & 2 deletions tests/test_metrics.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from cheetahpyanalytics import (
from fitlitics import (
fetch_new_dataset,
dataset_preprocess,
metric_functions,
Expand Down Expand Up @@ -51,4 +51,4 @@ def test_activity_VO2_calc():


if __name__ == '__main__':
print(type(athlete_statics(bike_functional_threshold_power = 300)))
print(type(athlete_statics(bike_functional_threshold_power = 300)))
4 changes: 2 additions & 2 deletions tests/test_model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from cheetahpyanalytics import (
from fitlitics import (
fetch_new_dataset,
dataset_preprocess,
metric_functions,
Expand Down Expand Up @@ -27,4 +27,4 @@ def test_model_classic_pmc():
pmc = model_classic_pmc(load_data=load_series, ctl_days=42, atl_days=7)
pmc.run()
# accute should be lower than chronic
assert pmc.ath_pmc.tsb[10] > 0
assert pmc.ath_pmc.tsb[10] > 0
4 changes: 2 additions & 2 deletions tests/test_process.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from cheetahpyanalytics import (
from fitlitics import (
fetch_new_dataset,
dataset_preprocess,
metric_functions,
Expand Down Expand Up @@ -45,4 +45,4 @@ def test_preprocess():
performance_metric=performance_metric,
performance_lower_bound = 0.0,
sport = False,
fill_performance_forward=True)
fill_performance_forward=True)

0 comments on commit a12f070

Please sign in to comment.