diff --git a/rowers/courseutils.py b/rowers/courseutils.py index 04b34d25..9393ddea 100644 --- a/rowers/courseutils.py +++ b/rowers/courseutils.py @@ -23,12 +23,12 @@ def time_in_path(df, p, maxmin='max', getall=False, name='unknown', logfile=None def f(x): return coordinate_in_path(x['latitude'], x['longitude'], p) - df['inpolygon'] = df.apply(f, axis=1) + inpolygon = df.apply(lambda row:f(row), axis=1).copy() if maxmin == 'max': - b = (~df['inpolygon']).shift(-1)+df['inpolygon'] + b = (~inpolygon).shift(-1)+inpolygon else: # pragma: no cover - b = (~df['inpolygon']).shift(1)+df['inpolygon'] + b = (~inpolygon).shift(1)+inpolygon if len(df[b == 2]): if logfile is not None: # pragma: no cover @@ -90,7 +90,7 @@ def coursetime_first(data, paths, polygons=[], logfile=None): try: entrytime, entrydistance = time_in_path( - data, paths[0], maxmin='max', name=polygons[0][1], logfile=logfile) + data, paths[0], maxmin='max', name=str(polygons[0]), logfile=logfile) coursecompleted = True except InvalidTrajectoryError: # pragma: no cover entrytime = data['time'].max() @@ -118,7 +118,7 @@ def coursetime_paths(data, paths, finalmaxmin='min', polygons=[], logfile=None): ( entrytime, entrydistance - ) = time_in_path(data, paths[0], maxmin=finalmaxmin, name=polygons[0][1], logfile=logfile) + ) = time_in_path(data, paths[0], maxmin=finalmaxmin, name=str(polygons[0]), logfile=logfile) coursecompleted = True except InvalidTrajectoryError: # pragma: no cover entrytime = data['time'].max() @@ -129,7 +129,7 @@ def coursetime_paths(data, paths, finalmaxmin='min', polygons=[], logfile=None): if len(paths) > 1: try: time, dist = time_in_path( - data, paths[0], name=polygons[0][1], logfile=logfile) + data, paths[0], name=str(polygons[0]), logfile=logfile) data2 = data[data['time'] > time].copy() data2['time'] = data2['time'].apply(lambda x: x-time) data2['cum_dist'] = data2['cum_dist'].apply(lambda x: x-dist) diff --git a/rowers/database.py b/rowers/database.py index 7032795b..324965b7 100644 --- a/rowers/database.py +++ b/rowers/database.py @@ -16,3 +16,7 @@ database_url = 'mysql://{user}:{password}@{host}:{port}/{database_name}'.format( if settings.DEBUG or user == '': database_url = 'sqlite:///db.sqlite3' + +#database_name_dev = DEV_DATABASES['default']['NAME'] + +database_url_debug = database_url diff --git a/rowers/dataprep.py b/rowers/dataprep.py index eb2e3c46..3fdfd95d 100644 --- a/rowers/dataprep.py +++ b/rowers/dataprep.py @@ -2,7 +2,8 @@ from rowers.metrics import axes, calc_trimp, rowingmetrics, dtypes, metricsgroup from rowers.utils import lbstoN, myqueue, wavg, dologging from rowers.mytypes import otwtypes, otetypes, rowtypes import glob -import rowingdata.tcxtools as tcxtools +from rowingdata import tcxtools + from rowers.utils import totaltime_sec_to_string from rowers.datautils import p0 from scipy import optimize @@ -10,13 +11,15 @@ from rowers.utils import calculate_age import datetime from scipy.signal import savgol_filter from rowers.opaque import encoder -from rowers.database import * +from rowers.database import database_url, database_url_debug from rowers import mytypes from rowsandall_app.settings import SITE_URL import django_rq from timezonefinder import TimezoneFinder -import rowers.datautils as datautils -import rowers.utils as utils +from rowers import datautils + +from rowers import utils + import sys import sqlalchemy as sa from sqlalchemy import create_engine @@ -75,46 +78,21 @@ import yaml import shutil from shutil import copyfile +from rowingdata import make_cumvalues from rowingdata import ( get_file_type, get_empower_rigging, get_empower_firmware ) -from rowers.tasks import ( - handle_sendemail_unrecognized, handle_setcp, - handle_getagegrouprecords, handle_update_wps, - handle_request_post, handle_calctrimp, - handle_updatecp, handle_updateergcp, - handle_sendemail_breakthrough, - handle_sendemail_hard, -) -from rowers.tasks import handle_zip_file +# All the data preparation, data cleaning and data mangling should +# be defined here -from pandas import DataFrame, Series -import dask.dataframe as dd -from dask.delayed import delayed -import pyarrow.parquet as pq -import pyarrow as pa - -from pyarrow.lib import ArrowInvalid - -from django.utils import timezone from django.utils.timezone import get_current_timezone -from django.urls import reverse -import requests -from django.core.exceptions import ValidationError - -from time import strftime -import arrow thetimezone = get_current_timezone() allowedcolumns = [key for key, value in strokedatafields.items()] -queue = django_rq.get_queue('default') -queuelow = django_rq.get_queue('low') -queuehigh = django_rq.get_queue('default') - # mapping the DB column names to the CSV file column names columndict = { @@ -201,7 +179,7 @@ def get_video_data(w, groups=['basic'], mode='water'): data[c] = df2[c].astype(int).tolist() else: sigfigs = dict(rowingmetrics)[c]['sigfigs'] - if (c != 'pace'): + if c != 'pace': da = ((10**sigfigs)*df2[c]).astype(int)/(10**sigfigs) else: da = df2[c] @@ -315,104 +293,6 @@ def workout_has_latlon(id): return False, latmean, lonmean -def workout_summary_to_df( - rower, - startdate=datetime.datetime(1970, 1, 1), - enddate=timezone.now()+timezone.timedelta(days=1)): - - ws = Workout.objects.filter( - user=rower, date__gte=startdate, date__lte=enddate, - duplicate=False - ).order_by("startdatetime") - - types = [] - names = [] - ids = [] - startdatetimes = [] - timezones = [] - distances = [] - durations = [] - weightcategories = [] - adaptivetypes = [] - weightvalues = [] - notes = [] - tcx_links = [] - csv_links = [] - workout_links = [] - goldstandards = [] - goldstandarddurations = [] - rscores = [] - hrtss = [] - trimps = [] - rankingpieces = [] - boattypes = [] - - counter1 = 0 - counter2 = len(ws) - - for w in ws: - counter1 += 1 - if counter1 % 10 == 0: # pragma: no cover - print(counter1, '/', counter2) - types.append(w.workouttype) - names.append(w.name) - ids.append(encoder.encode_hex(w.id)) - startdatetimes.append(w.startdatetime) - timezones.append(w.timezone) - distances.append(w.distance) - durations.append(w.duration) - weightcategories.append(w.weightcategory) - adaptivetypes.append(w.adaptiveclass) - weightvalues.append(w.weightvalue) - boattypes.append(w.boattype) - notes.append(w.notes) - tcx_link = SITE_URL+'/rowers/workout/{id}/emailtcx'.format( - id=encoder.encode_hex(w.id) - ) - tcx_links.append(tcx_link) - csv_link = SITE_URL+'/rowers/workout/{id}/emailcsv'.format( - id=encoder.encode_hex(w.id) - ) - csv_links.append(csv_link) - workout_link = SITE_URL+'/rowers/workout/{id}/'.format( - id=encoder.encode_hex(w.id) - ) - workout_links.append(workout_link) - trimps.append(workout_trimp(w)[0]) - rscore = workout_rscore(w) - rscores.append(int(rscore[0])) - hrtss.append(int(w.hrtss)) - goldstandard, goldstandardduration = workout_goldmedalstandard(w) - goldstandards.append(int(goldstandard)) - goldstandarddurations.append(int(goldstandardduration)) - rankingpieces.append(w.rankingpiece) - - df = pd.DataFrame({ - 'ID': ids, - 'date': startdatetimes, - 'name': names, - 'link': workout_links, - 'timezone': timezones, - 'type': types, - 'boat type': boattypes, - 'distance (m)': distances, - 'duration ': durations, - 'ranking piece': rankingpieces, - 'weight category': weightcategories, - 'adaptive classification': adaptivetypes, - 'weight (kg)': weightvalues, - 'Stroke Data TCX': tcx_links, - 'Stroke Data CSV': csv_links, - 'TRIMP Training Load': trimps, - 'TSS Training Load': rscores, - 'hrTSS Training Load': hrtss, - 'GS': goldstandards, - 'GS_secs': goldstandarddurations, - 'notes': notes, - }) - - return df - def get_workouts(ids, userid): # pragma: no cover goodids = [] @@ -446,96 +326,6 @@ def filter_df(datadf, fieldname, value, largerthan=True): # joins workouts -def join_workouts(r, ids, title='Joined Workout', - parent=None, - setprivate=False, - forceunit='lbs', killparents=False): - - message = None - - summary = '' - if parent: # pragma: no cover - oarlength = parent.oarlength - inboard = parent.inboard - workouttype = parent.workouttype - notes = parent.notes - summary = parent.summary - if parent.privacy == 'hidden': - makeprivate = True - else: - makeprivate = False - - startdatetime = parent.startdatetime - else: - oarlength = 2.89 - inboard = 0.88 - workouttype = 'rower' - notes = '' - summary = '' - makeprivate = False - startdatetime = timezone.now() - - if setprivate is True and makeprivate is False: # pragma: no cover - makeprivate = True - elif setprivate is False and makeprivate is True: # pragma: no cover - makeprivate = False - - # reorder in chronological order - ws = Workout.objects.filter(id__in=ids).order_by("startdatetime") - - if not parent: - parent = ws[0] - oarlength = parent.oarlength - inboard = parent.inboard - workouttype = parent.workouttype - notes = parent.notes - summary = parent.summary - if parent.privacy == 'hidden': - makeprivate = True - else: - makeprivate = False - startdatetime = parent.startdatetime - - files = [w.csvfilename for w in ws] - - row = rdata(files[0]) - - files = files[1:] - - while len(files): - row2 = rdata(files[0]) - if row2 != 0: - row = row+row2 - files = files[1:] - - timestr = strftime("%Y%m%d-%H%M%S") - csvfilename = 'media/df_' + timestr + '.csv' - - row.write_csv(csvfilename, gzip=True) - id, message = save_workout_database(csvfilename, r, - workouttype=workouttype, - title=title, - notes=notes, - oarlength=oarlength, - inboard=inboard, - startdatetime=startdatetime, - makeprivate=makeprivate, - summary=summary, - dosmooth=False, - consistencychecks=False) - - if killparents: # pragma: no cover - for w in ws: - w.delete() - - w = Workout.objects.get(id=id) - w.duplicate = False - w.save() - if message is not None and "duplicate" in message: - message = "" - - return (id, message) - def df_resample(datadf): # time stamps must be in seconds @@ -545,58 +335,6 @@ def df_resample(datadf): return newdf -def resample(id, r, parent, overwrite='copy'): - data, row = getrowdata_db(id=id) - messages = [] - - # resample - startdatetime = row.startdatetime - data['datetime'] = data['time'].apply( - lambda x: startdatetime+datetime.timedelta(seconds=x/1000.)) - - data = data.resample('S', on='datetime').mean() - data.interpolate(method='linear', inplace=True) - data.reset_index(drop=True, inplace=True) - - # data.drop('datetime',inplace=True) - data['pace'] = data['pace'] / 1000. - data['time'] = data['time'] / 1000. - - if overwrite == 'overwrite': - # remove CP data - try: - cpfile = 'media/cpdata_{id}.parquet.gz'.format(id=parent.id) - os.remove(cpfile) - except FileNotFoundError: - pass - # save - data.rename(columns=columndict, inplace=True) - - starttimeunix = arrow.get(startdatetime).timestamp() - data[' ElapsedTime (sec)'] = data['TimeStamp (sec)'] - - data['TimeStamp (sec)'] = data['TimeStamp (sec)'] + starttimeunix - - row = rrdata(df=data) - - row.write_csv(parent.csvfilename, gzip=True) - - _ = dataprep(row.df, id=parent.id, bands=True, barchart=True, - otwpower=True, empower=True, inboard=parent.inboard) - isbreakthrough, ishard = checkbreakthrough(parent, r) - _ = check_marker(parent) - _ = update_wps(r, mytypes.otwtypes) - _ = update_wps(r, mytypes.otetypes) - - tss, normp = workout_rscore(parent) - goldmedalstandard, goldmedalseconds = workout_goldmedalstandard(parent) - else: - id, message = new_workout_from_df(r, data, title=parent.name + '(Resampled)', - parent=parent, forceunit='N') - messages.append(message) - - return data, id, messages - def clean_df_stats(datadf, workstrokesonly=True, ignorehr=True, ignoreadvanced=False): @@ -997,17 +735,9 @@ def paceformatsecs(values): def update_c2id_sql(id, c2id): - engine = create_engine(database_url, echo=False) - table = 'rowers_workout' - - query = "UPDATE %s SET uploadedtoc2 = %s WHERE `id` = %s;" % ( - table, c2id, id) - - with engine.connect() as conn, conn.begin(): - _ = conn.execute(query) - - conn.close() - engine.dispose() + workout = Workout.objects.get(id=id) + workout.uploadedtoc2 = c2id + workout.save() return 1 @@ -1034,13 +764,15 @@ def deletecpdata_sql(rower_id, table='cpdata'): # pragma: no cover with engine.connect() as conn, conn.begin(): try: _ = conn.execute(query) - except: + except Exception as e: + print(Exception, e) print("Database locked") conn.close() engine.dispose() -def updatecpdata_sql(rower_id, delta, cp, table='cpdata', distance=[]): # pragma: no cover +def updatecpdata_sql(rower_id, delta, cp, table='cpdata', distance=pd.Series([], dtype='float'), + debug=False): # pragma: no cover deletecpdata_sql(rower_id) df = pd.DataFrame( { @@ -1060,18 +792,6 @@ def updatecpdata_sql(rower_id, delta, cp, table='cpdata', distance=[]): # pragm engine.dispose() -def fetchcperg(rower, theworkouts): - thefilenames = [w.csvfilename for w in theworkouts] - cpdf = getcpdata_sql(rower.id, table='ergcpdata') - - _ = myqueue( - queuelow, - handle_updateergcp, - rower.id, - thefilenames) - - return cpdf - def get_workoutsummaries(userid, startdate): # pragma: no cover u = User.objects.get(id=userid) @@ -1083,525 +803,10 @@ def get_workoutsummaries(userid, startdate): # pragma: no cover return df -def workout_goldmedalstandard(workout, reset=False): - if workout.goldmedalstandard > 0 and not reset: - return workout.goldmedalstandard, workout.goldmedalseconds - if workout.workouttype in rowtypes: - goldmedalstandard, goldmedalseconds = calculate_goldmedalstandard( - workout.user, workout) - if workout.workouttype in otwtypes: - factor = 100./(100.-workout.user.otwslack) - goldmedalstandard = goldmedalstandard*factor - workout.goldmedalstandard = goldmedalstandard - workout.goldmedalseconds = goldmedalseconds - workout.save() - return goldmedalstandard, goldmedalseconds - else: - return 0, 0 -def check_marker(workout): - r = workout.user - gmstandard, gmseconds = workout_goldmedalstandard(workout) - if gmseconds < 60: - return None - dd = arrow.get(workout.date).datetime-datetime.timedelta(days=r.kfit) - ws = Workout.objects.filter(date__gte=dd, - date__lte=workout.date, - user=r, duplicate=False, - workouttype__in=mytypes.rowtypes, - ).order_by("date") - ids = [] - gms = [] - for w in ws: - gmstandard, gmseconds = workout_goldmedalstandard(w) - if gmseconds > 60: - ids.append(w.id) - gms.append(gmstandard) - df = pd.DataFrame({ - 'id': ids, - 'gms': gms, - }) - - if df.empty: # pragma: no cover - workout.ranking = True - workout.save() - return workout - - indexmax = df['gms'].idxmax() - theid = df.loc[indexmax, 'id'] - - wmax = Workout.objects.get(id=theid) - # gms_max = wmax.goldmedalstandard - - # check if equal, bigger, or smaller than previous - if not wmax.rankingpiece: - rankingworkouts = ws.filter(rankingpiece=True) - if len(rankingworkouts) == 0: - wmax.rankingpiece = True - wmax.save() - return wmax - - lastranking = rankingworkouts[len(rankingworkouts)-1] - if lastranking.goldmedalstandard+0.2 < wmax.goldmedalstandard: # pragma: no cover - wmax.rankingpiece = True - wmax.save() - return wmax - else: # pragma: no cover - return wmax - - return None - - -def calculate_goldmedalstandard(rower, workout, recurrance=True): - cpfile = 'media/cpdata_{id}.parquet.gz'.format(id=workout.id) - try: - df = pd.read_parquet(cpfile) - except: - background = True - if settings.TESTING: - background = False - df, delta, cpvalues = setcp(workout, background=background) - if df.empty: - return 0, 0 - - if df.empty and recurrance: # pragma: no cover - df, delta, cpvalues = setcp(workout, recurrance=False, background=True) - if df.empty: - return 0, 0 - - age = calculate_age(rower.birthdate, today=workout.date) - - agerecords = CalcAgePerformance.objects.filter( - age=age, - sex=rower.sex, - weightcategory=rower.weightcategory - ) - - wcdurations = [] - wcpower = [] - getrecords = False - if not settings.TESTING: # pragma: no cover - if len(agerecords) == 0: # pragma: no cover - getrecords = True - - for record in agerecords: # pragma: no cover - if record.power > 0: - wcdurations.append(record.duration) - wcpower.append(record.power) - else: - getrecords = True - - if getrecords: # pragma: no cover - durations = [1, 4, 30, 60] - distances = [100, 500, 1000, 2000, 5000, 6000, 10000, 21097, 42195] - df2 = pd.DataFrame( - list( - C2WorldClassAgePerformance.objects.filter( - sex=rower.sex, - weightcategory=rower.weightcategory - ).values() - ) - ) - jsondf = df2.to_json() - _ = myqueue(queuelow, handle_getagegrouprecords, - jsondf, distances, durations, age, rower.sex, rower.weightcategory) - - wcpower = pd.Series(wcpower, dtype='float') - wcdurations = pd.Series(wcdurations, dtype='float') - - def fitfunc(pars, x): - return pars[0] / (1+(x/pars[2])) + pars[1]/(1+(x/pars[3])) - - def errfunc(pars, x, y): - return fitfunc(pars, x)-y - - if len(wcdurations) >= 4: # pragma: no cover - p1wc, success = optimize.leastsq( - errfunc, p0[:], args=(wcdurations, wcpower)) - else: - factor = fitfunc(p0, wcdurations.mean()/wcpower.mean()) - p1wc = [p0[0]/factor, p0[1]/factor, p0[2], p0[3]] - - return 0, 0 - - times = df['delta'] - powers = df['cp'] - wcpowers = fitfunc(p1wc, times) - scores = 100.*powers/wcpowers - - try: - indexmax = scores.idxmax() - delta = int(df.loc[indexmax, 'delta']) - maxvalue = scores.max() - except (ValueError, TypeError): # pragma: no cover - indexmax = 0 - delta = 0 - maxvalue = 0 - - return maxvalue, delta - - -def fetchcp_new(rower, workouts): - - data = [] - for workout in workouts: - cpfile = 'media/cpdata_{id}.parquet.gz'.format(id=workout.id) - try: - df = pd.read_parquet(cpfile) - df['workout'] = str(workout) - df['url'] = workout.url() - data.append(df) - except: - # CP data file doesn't exist yet. has to be created - df, delta, cpvalues = setcp(workout) - df['workout'] = str(workout) - df['url'] = workout.url() - data.append(df) - - if len(data) == 0: - return pd.Series(dtype='float'), pd.Series(dtype='float'), 0, pd.Series(dtype='float'), pd.Series(dtype='float') - if len(data) > 1: - df = pd.concat(data, axis=0) - - try: - df = df[df['cp'] == df.groupby(['delta'])['cp'].transform('max')] - except KeyError: # pragma: no cover - return pd.Series(dtype='float'), pd.Series(dtype='float'), 0, pd.Series(dtype='float'), pd.Series(dtype='float') - - df = df.sort_values(['delta']).reset_index() - - return df['delta'], df['cp'], 0, df['workout'], df['url'] - - -def setcp(workout, background=False, recurrance=True): - filename = 'media/cpdata_{id}.parquet.gz'.format(id=workout.id) - - strokesdf = getsmallrowdata_db( - ['power', 'workoutid', 'time'], ids=[workout.id]) - - try: - if strokesdf['power'].std() == 0: - return pd.DataFrame(), pd.Series(dtype='float'), pd.Series(dtype='float') - except KeyError: - return pd.DataFrame(), pd.Series(dtype='float'), pd.Series(dtype='float') - - if background: # pragma: no cover - _ = myqueue(queuelow, handle_setcp, strokesdf, filename, workout.id) - return pd.DataFrame({'delta': [], 'cp': []}), pd.Series(dtype='float'), pd.Series(dtype='float') - - if not strokesdf.empty: - totaltime = strokesdf['time'].max() - try: - powermean = strokesdf['power'].mean() - except KeyError: # pragma: no cover - powermean = 0 - - if powermean != 0: - thesecs = totaltime - maxt = 1.05 * thesecs - - if maxt > 0: - logarr = datautils.getlogarr(maxt) - dfgrouped = strokesdf.groupby(['workoutid']) - delta, cpvalues, avgpower = datautils.getcp(dfgrouped, logarr) - - df = pd.DataFrame({ - 'delta': delta, - 'cp': cpvalues, - 'id': workout.id, - }) - df.to_parquet(filename, engine='fastparquet', - compression='GZIP') - if recurrance: - goldmedalstandard, goldmedalduration = calculate_goldmedalstandard( - workout.user, workout) - workout.goldmedalstandard = goldmedalstandard - workout.goldmedalduration = goldmedalduration - workout.save() - return df, delta, cpvalues - - return pd.DataFrame({'delta': [], 'cp': []}), pd.Series(dtype='float'), pd.Series(dtype='float') - - -def update_wps(r, types, mode='water', asynchron=True): - firstdate = timezone.now()-datetime.timedelta(days=r.cprange) - workouts = Workout.objects.filter( - date__gte=firstdate, - workouttype__in=types, - user=r - ) - - ids = [w.id for w in workouts] - if asynchron: - _ = myqueue( - queue, - handle_update_wps, - r.id, - types, - ids, - mode - ) - - df = getsmallrowdata_db(['time', 'driveenergy'], ids=ids) - - try: - mask = df['driveenergy'] > 100 - except (KeyError, TypeError): - return False - try: - wps_median = int(df.loc[mask, 'driveenergy'].median()) - if mode == 'water': - r.median_wps = wps_median - else: # pragma: no cover - r.median_wps_erg = wps_median - - r.save() - except ValueError: # pragma: no cover - pass - - return True - - -def update_rolling_cp(r, types, mode='water'): - firstdate = timezone.now()-datetime.timedelta(days=r.cprange) - workouts = Workout.objects.filter( - date__gte=firstdate, - workouttype__in=types, - user=r - ) - - delta, cp, avgpower, workoutnames, urls = fetchcp_new(r, workouts) - - powerdf = pd.DataFrame({ - 'Delta': delta, - 'CP': cp, - }) - - powerdf = powerdf[powerdf['CP'] > 0] - powerdf.dropna(axis=0, inplace=True) - powerdf.sort_values(['Delta', 'CP'], ascending=[1, 0], inplace=True) - powerdf.drop_duplicates(subset='Delta', keep='first', inplace=True) - - res2 = datautils.cpfit(powerdf) - if len(powerdf) != 0: - if mode == 'water': - p1 = res2[0] - r.p0 = p1[0] - r.p1 = p1[1] - r.p2 = p1[2] - r.p3 = p1[3] - r.cpratio = res2[3] - r.save() - else: - p1 = res2[0] - r.ep0 = p1[0] - r.ep1 = p1[1] - r.ep2 = p1[2] - r.ep3 = p1[3] - r.ecpratio = res2[3] - r.save() - - return True - return False - - -def fetchcp(rower, theworkouts, table='cpdata'): # pragma: no cover - # get all power data from database (plus workoutid) - theids = [int(w.id) for w in theworkouts] - columns = ['power', 'workoutid', 'time'] - df = getsmallrowdata_db(columns, ids=theids) - df.dropna(inplace=True, axis=0) - if df.empty: - avgpower2 = {} - for id in theids: - avgpower2[id] = 0 - return pd.Series([], dtype='float'), pd.Series([], dtype='float'), avgpower2 - - try: - dfgrouped = df.groupby(['workoutid']) - except KeyError: - avgpower2 = {} - return pd.Series([], dtype='float'), pd.Series([], dtype='float'), avgpower2 - try: - avgpower2 = dict(dfgrouped.mean()['power'].astype(int)) - except KeyError: - avgpower2 = {} - for id in theids: - avgpower2[id] = 0 - return pd.Series([], dtype='float'), pd.Series([], dtype='float'), avgpower2 - - cpdf = getcpdata_sql(rower.id, table=table) - - if not cpdf.empty: - return cpdf['delta'], cpdf['cp'], avgpower2 - else: - _ = myqueue(queuelow, - handle_updatecp, - rower.id, - theids, - table=table) - - return pd.Series([], dtype='float'), pd.Series([], dtype='float'), avgpower2 - - return pd.Series([], dtype='float'), pd.Series([], dtype='float'), avgpower2 - - -# create a new workout from manually entered data -def create_row_df(r, distance, duration, startdatetime, workouttype='rower', - avghr=None, avgpwr=None, avgspm=None, - rankingpiece=False, - duplicate=False, rpe=-1, - title='Manual entry', notes='', weightcategory='hwt', - adaptiveclass='None'): - - if duration is not None: - totalseconds = duration.hour*3600. - totalseconds += duration.minute*60. - totalseconds += duration.second - totalseconds += duration.microsecond/1.e6 - else: # pragma: no cover - totalseconds = 60. - - if distance is None: # pragma: no cover - distance = 0 - - try: - nr_strokes = int(distance/10.) - except TypeError: # pragma: no cover - nr_strokes = int(20.*totalseconds) - - if nr_strokes == 0: # pragma: no cover - nr_strokes = 100 - - unixstarttime = arrow.get(startdatetime).timestamp() - - if not avgspm: # pragma: no cover - try: - spm = 60.*nr_strokes/totalseconds - except ZeroDivisionError: - spm = 20. - else: - spm = avgspm - - # step = totalseconds/float(nr_strokes) - - elapsed = np.arange(nr_strokes)*totalseconds/(float(nr_strokes-1)) - - # dstep = distance/float(nr_strokes) - - d = np.arange(nr_strokes)*distance/(float(nr_strokes-1)) - - unixtime = unixstarttime + elapsed - - try: - pace = 500.*totalseconds/distance - except ZeroDivisionError: # pragma: no cover - pace = 240. - - if workouttype in ['rower', 'slides', 'dynamic']: - try: - velo = distance/totalseconds - except ZeroDivisionError: # pragma: no cover - velo = 2.4 - power = 2.8*velo**3 - elif avgpwr is not None: # pragma: no cover - power = avgpwr - else: # pragma: no cover - power = 0 - - if avghr is not None: - hr = avghr - else: # pragma: no cover - hr = 0 - - df = pd.DataFrame({ - 'TimeStamp (sec)': unixtime, - ' Horizontal (meters)': d, - ' Cadence (stokes/min)': spm, - ' Stroke500mPace (sec/500m)': pace, - ' ElapsedTime (sec)': elapsed, - ' Power (watts)': power, - ' HRCur (bpm)': hr, - }) - - timestr = strftime("%Y%m%d-%H%M%S") - - csvfilename = 'media/df_' + timestr + '.csv' - df[' ElapsedTime (sec)'] = df['TimeStamp (sec)'] - - row = rrdata(df=df) - - row.write_csv(csvfilename, gzip=True) - - id, message = save_workout_database(csvfilename, r, - title=title, - notes=notes, - rankingpiece=rankingpiece, - duplicate=duplicate, - dosmooth=False, - workouttype=workouttype, - consistencychecks=False, - weightcategory=weightcategory, - adaptiveclass=adaptiveclass, - totaltime=totalseconds) - - return (id, message) - - -def checkbreakthrough(w, r): - isbreakthrough = False - ishard = False - workouttype = w.workouttype - if workouttype in rowtypes: - cpdf, delta, cpvalues = setcp(w) - if not cpdf.empty: - if workouttype in otwtypes: - res, btvalues, res2 = utils.isbreakthrough( - delta, cpvalues, r.p0, r.p1, r.p2, r.p3, r.cpratio) - _ = update_rolling_cp(r, otwtypes, 'water') - - elif workouttype in otetypes: - res, btvalues, res2 = utils.isbreakthrough( - delta, cpvalues, r.ep0, r.ep1, r.ep2, r.ep3, r.ecpratio) - _ = update_rolling_cp(r, otetypes, 'erg') - else: # pragma: no cover - res = 0 - res2 = 0 - if res: - isbreakthrough = True - if res2 and not isbreakthrough: # pragma: no cover - ishard = True - - # submit email task to send email about breakthrough workout - if isbreakthrough: - if not w.duplicate: - w.rankingpiece = True - w.save() - if r.getemailnotifications and not r.emailbounced: # pragma: no cover - _ = myqueue(queuehigh, handle_sendemail_breakthrough, - w.id, - r.user.email, - r.user.first_name, - r.user.last_name, - btvalues=btvalues.to_json()) - - # submit email task to send email about breakthrough workout - if ishard: # pragma: no cover - if not w.duplicate: - w.rankingpiece = True - w.save() - if r.getemailnotifications and not r.emailbounced: - _ = myqueue(queuehigh, handle_sendemail_hard, - w.id, - r.user.email, - r.user.first_name, - r.user.last_name, - btvalues=btvalues.to_json()) - - return isbreakthrough, ishard def checkduplicates(r, workoutdate, workoutstartdatetime, workoutenddatetime): @@ -1620,256 +825,13 @@ def checkduplicates(r, workoutdate, workoutstartdatetime, workoutenddatetime): if enddatetime > workoutstartdatetime: ws2.append(ww) - if (len(ws2) != 0): + if len(ws2) != 0: duplicate = True return duplicate return duplicate -# Processes painsled CSV file to database -def save_workout_database(f2, r, dosmooth=True, workouttype='rower', - boattype='1x', - adaptiveclass='None', - weightcategory='hwt', - dosummary=True, title='Workout', - workoutsource='unknown', - notes='', totaldist=0, totaltime=0, - rankingpiece=False, - rpe=-1, - duplicate=False, - summary='', - makeprivate=False, - oarlength=2.89, inboard=0.88, - forceunit='lbs', - consistencychecks=False, - startdatetime='', - impeller=False): - - message = None - - powerperc = 100 * np.array([r.pw_ut2, - r.pw_ut1, - r.pw_at, - r.pw_tr, r.pw_an]) / r.ftp - - # make workout and put in database - rr = rrower(hrmax=r.max, hrut2=r.ut2, - hrut1=r.ut1, hrat=r.at, - hrtr=r.tr, hran=r.an, ftp=r.ftp, - powerperc=powerperc, powerzones=r.powerzones) - row = rdata(f2, rower=rr) - - startdatetime, startdate, starttime, timezone_str, partofday = get_startdate_time_zone( - r, row, startdatetime=startdatetime) - - if title is None or title == '': - title = 'Workout' - - if partofday is not None: - title = '{partofday} {workouttype}'.format( - partofday=partofday, - workouttype=workouttype, - ) - - if row.df.empty: # pragma: no cover - return (0, 'Error: CSV data file was empty') - - dtavg = row.df['TimeStamp (sec)'].diff().mean() - - if dtavg < 1: - newdf = df_resample(row.df) - try: - os.remove(f2) - except: - pass - return new_workout_from_df(r, newdf, - title=title, boattype=boattype, - workouttype=workouttype, - workoutsource=workoutsource, startdatetime=startdatetime) - try: - checks = row.check_consistency() - allchecks = 1 - for key, value in checks.items(): - if not value: - allchecks = 0 - except ZeroDivisionError: # pragma: no cover - pass - - if not allchecks and consistencychecks: - # row.repair() - pass - - if row == 0: # pragma: no cover - return (0, 'Error: CSV data file not found') - - try: - lat = row.df[' latitude'] - if lat.mean() != 0 and lat.std() != 0 and workouttype == 'rower': - workouttype = 'water' - except KeyError: - pass - - if dosmooth: - # auto smoothing - pace = row.df[' Stroke500mPace (sec/500m)'].values - velo = 500. / pace - - f = row.df['TimeStamp (sec)'].diff().mean() - if f != 0 and not np.isnan(f): - windowsize = 2 * (int(10. / (f))) + 1 - else: # pragma: no cover - windowsize = 1 - if 'originalvelo' not in row.df: - row.df['originalvelo'] = velo - - if windowsize > 3 and windowsize < len(velo): - velo2 = savgol_filter(velo, windowsize, 3) - else: # pragma: no cover - velo2 = velo - - velo3 = pd.Series(velo2, dtype='float') - velo3 = velo3.replace([-np.inf, np.inf], np.nan) - velo3 = velo3.fillna(method='ffill') - - pace2 = 500. / abs(velo3) - - row.df[' Stroke500mPace (sec/500m)'] = pace2 - - row.df = row.df.fillna(0) - - row.write_csv(f2, gzip=True) - try: - os.remove(f2) - except: - pass - - # recalculate power data - if workouttype == 'rower' or workouttype == 'dynamic' or workouttype == 'slides': - try: - if r.erg_recalculatepower: - row.erg_recalculatepower() - row.write_csv(f2, gzip=True) - except: - pass - - averagehr = row.df[' HRCur (bpm)'].mean() - maxhr = row.df[' HRCur (bpm)'].max() - - if totaldist == 0: - totaldist = row.df['cum_dist'].max() - if totaltime == 0: - totaltime = row.df['TimeStamp (sec)'].max( - ) - row.df['TimeStamp (sec)'].min() - try: - totaltime = totaltime + row.df.loc[:, ' ElapsedTime (sec)'].iloc[0] - except KeyError: # pragma: no cover - pass - - if np.isnan(totaltime): # pragma: no cover - totaltime = 0 - - if dosummary: - summary = row.allstats() - - workoutstartdatetime = startdatetime - - dologging('debuglog.log', 'Dataprep line 1721, Workout Startdatetime {workoutstartdatetime}'.format( - workoutstartdatetime=workoutstartdatetime, - )) - - duration = totaltime_sec_to_string(totaltime) - - workoutdate = startdate - workoutstarttime = starttime - - s = 'Dataprep line 1730 workoutdate and time set to {workoutdate} and {workoutstarttime}'.format( - workoutdate=workoutdate, - workoutstarttime=workoutstarttime, - ) - dologging('debuglog.log', s) - - if makeprivate: # pragma: no cover - privacy = 'hidden' - else: - privacy = 'visible' - - # checking for inf values - - totaldist = np.nan_to_num(totaldist) - maxhr = np.nan_to_num(maxhr) - averagehr = np.nan_to_num(averagehr) - - dragfactor = 0 - if workouttype in otetypes: - dragfactor = row.dragfactor - - t = datetime.datetime.strptime(duration, "%H:%M:%S.%f") - delta = datetime.timedelta( - hours=t.hour, minutes=t.minute, seconds=t.second) - - workoutenddatetime = workoutstartdatetime+delta - - # check for duplicate start times and duration - duplicate = checkduplicates( - r, workoutdate, workoutstartdatetime, workoutenddatetime) - if duplicate: - rankingpiece = False - - # test title length - if title is not None and len(title) > 140: # pragma: no cover - title = title[0:140] - - timezone_str = str(workoutstartdatetime.tzinfo) - - w = Workout(user=r, name=title, date=workoutdate, - workouttype=workouttype, - boattype=boattype, - dragfactor=dragfactor, - duration=duration, distance=totaldist, - weightcategory=weightcategory, - adaptiveclass=adaptiveclass, - starttime=workoutstarttime, - duplicate=duplicate, - workoutsource=workoutsource, - rankingpiece=rankingpiece, - forceunit=forceunit, - rpe=rpe, - csvfilename=f2, notes=notes, summary=summary, - maxhr=maxhr, averagehr=averagehr, - startdatetime=workoutstartdatetime, - inboard=inboard, oarlength=oarlength, - timezone=timezone_str, - privacy=privacy, - impeller=impeller) - try: - w.save() - except ValidationError: # pragma: no cover - try: - w.startdatetime = timezone.now() - w.save() - except ValidationError: - return (0, 'Unable to create your workout') - - if privacy == 'visible': - ts = Team.objects.filter(rower=r) - for t in ts: - w.team.add(t) - - # put stroke data in database - _ = dataprep(row.df, id=w.id, bands=True, - barchart=True, otwpower=True, empower=True, inboard=inboard) - - isbreakthrough, ishard = checkbreakthrough(w, r) - _ = check_marker(w) - _ = update_wps(r, mytypes.otwtypes) - _ = update_wps(r, mytypes.otetypes) - - _ = myqueue(queuehigh, handle_calctrimp, w.id, f2, - r.ftp, r.sex, r.hrftp, r.max, r.rest) - - return (w.id, message) - parsers = { 'kinomap': KinoMapParser, @@ -2109,369 +1071,13 @@ def get_workouttype_from_tcx(filename, workouttype='water'): return workouttype # pragma: no cover -def new_workout_from_file(r, f2, - workouttype='rower', - workoutsource=None, - title='Workout', - boattype='1x', - rpe=-1, - makeprivate=False, - startdatetime='', - notes='', - oarlockfirmware='', - inboard=None, - oarlength=None, - impeller=False, - uploadoptions={'boattype': '1x', 'workouttype': 'rower'}): - message = "" - try: - fileformat = get_file_type(f2) - except (IOError, UnicodeDecodeError): # pragma: no cover - os.remove(f2) - message = "Rowsandall could not process this file. The extension is supported but the file seems corrupt. Contact info@rowsandall.com if you think this is incorrect." - return (0, message, f2) - - summary = '' - oarlength = 2.89 - inboard = 0.88 - - # Save zip files to email box for further processing - if len(fileformat) == 3 and fileformat[0] == 'zip': # pragma: no cover - uploadoptions['secret'] = settings.UPLOAD_SERVICE_SECRET - uploadoptions['user'] = r.user.id - uploadoptions['title'] = title - try: - zip_file = zipfile.ZipFile(f2) - for id, filename in enumerate(zip_file.namelist()): - datafile = zip_file.extract(filename, path='media/') - if id > 0: - uploadoptions['title'] = title+' ('+str(id+1)+')' - else: - uploadoptions['title'] = title - - uploadoptions['file'] = datafile - url = settings.UPLOAD_SERVICE_URL - - _ = myqueue(queuehigh, - handle_request_post, - url, - uploadoptions) - - except BadZipFile: # pragma: no cover - pass - - return -1, message, f2 - - # Some people try to upload Concept2 logbook summaries - if fileformat == 'imageformat': # pragma: no cover - os.remove(f2) - message = "You cannot upload image files here" - return (0, message, f2) - - if fileformat == 'json': # pragma: no cover - os.remove(f2) - message = "JSON format not supported in direct upload" - return (0, message, f2) - - if fileformat == 'c2log': - os.remove(f2) - message = "This summary does not contain stroke data. Use the files containing stroke by stroke data." - return (0, message, f2) - - if fileformat == 'nostrokes': # pragma: no cover - os.remove(f2) - message = "It looks like this file doesn't contain stroke data." - return (0, message, f2) - - if fileformat == 'kml': # pragma: no cover - os.remove(f2) - message = "KML files are not supported" - return (0, message, f2) - - # Some people upload corrupted zip files - if fileformat == 'notgzip': # pragma: no cover - os.remove(f2) - message = "Rowsandall could not process this file. The extension is supported but the file seems corrupt. Contact info@rowsandall.com if you think this is incorrect." - return (0, message, f2) - - # Some people try to upload RowPro summary logs - if fileformat == 'rowprolog': # pragma: no cover - os.remove(f2) - message = "This RowPro logbook summary does not contain stroke data. Please use the Stroke Data CSV file for the individual workout in your log." - return (0, message, f2) - - # Sometimes people try an unsupported file type. - # Send an email to info@rowsandall.com with the file attached - # for me to check if it is a bug, or a new file type - # worth supporting - if fileformat == 'gpx': # pragma: no cover - - os.remove(f2) - message = "GPX files support is on our roadmap. Check back soon." - return (0, message, f2) - - if fileformat == 'unknown': # pragma: no cover - message = "We couldn't recognize the file type" - extension = os.path.splitext(f2)[1] - filename = os.path.splitext(f2)[0] - if extension == '.gz': - filename = os.path.splitext(filename)[0] - extension2 = os.path.splitext(filename)[1]+extension - extension = extension2 - f4 = filename+'a'+extension - copyfile(f2, f4) - _ = myqueue(queuehigh, - handle_sendemail_unrecognized, - f4, - r.user.email) - - return (0, message, f2) - - if fileformat == 'att': # pragma: no cover - # email attachment which can safely be ignored - return (0, '', f2) - - # Get workout type from fit & tcx - if (fileformat == 'fit'): # pragma: no cover - workouttype = get_workouttype_from_fit(f2, workouttype=workouttype) - # if (fileformat == 'tcx'): - # workouttype_from_tcx = get_workouttype_from_tcx(f2,workouttype=workouttype) - # if workouttype != 'rower' and workouttype_from_tcx not in mytypes.otwtypes: - # workouttype = workouttype_from_tcx - - # handle non-Painsled by converting it to painsled compatible CSV - if (fileformat != 'csv'): - f2, summary, oarlength, inboard, fileformat, impeller = handle_nonpainsled( - f2, - fileformat, - startdatetime=startdatetime, - summary=summary, - empowerfirmware=oarlockfirmware, - impeller=impeller, - ) - if not f2: # pragma: no cover - message = 'Something went wrong' - return (0, message, '') - - dosummary = (fileformat != 'fit' and 'speedcoach2' not in fileformat) - dosummary = dosummary or summary == '' - - if 'speedcoach2' in fileformat and workouttype == 'rower': - workouttype = 'water' - - if workoutsource is None: - workoutsource = fileformat - - dologging('debuglog.log', 'Saving to database with start date time {startdatetime}'.format( - startdatetime=startdatetime, - )) - - id, message = save_workout_database( - f2, r, - notes=notes, - workouttype=workouttype, - weightcategory=r.weightcategory, - adaptiveclass=r.adaptiveclass, - boattype=boattype, - makeprivate=makeprivate, - dosummary=dosummary, - workoutsource=workoutsource, - summary=summary, - startdatetime=startdatetime, - rpe=rpe, - inboard=inboard, oarlength=oarlength, - title=title, - forceunit='N', - impeller=impeller, - ) - - return (id, message, f2) - - -def split_workout(r, parent, splitsecond, splitmode): - data, row = getrowdata_db(id=parent.id) - latitude, longitude = get_latlon(parent.id) - if not latitude.empty and not longitude.empty: - data[' latitude'] = latitude - data[' longitude'] = longitude - - data['time'] = data['time'] / 1000. - - data1 = data[data['time'] <= splitsecond].copy() - data2 = data[data['time'] > splitsecond].copy() - - data1 = data1.sort_values(['time']) - data1 = data1.interpolate(method='linear', axis=0, limit_direction='both', - limit=10) - data1.fillna(method='bfill', inplace=True) - - # Some new stuff to try out - data1 = data1.groupby('time', axis=0).mean() - data1['time'] = data1.index - data1.reset_index(drop=True, inplace=True) - - data2 = data2.sort_values(['time']) - data2 = data2.interpolate(method='linear', axis=0, limit_direction='both', - limit=10) - data2.fillna(method='bfill', inplace=True) - - # Some new stuff to try out - data2 = data2.groupby('time', axis=0).mean() - data2['time'] = data2.index - data2.reset_index(drop=True, inplace=True) - - data1['pace'] = data1['pace'] / 1000. - data2['pace'] = data2['pace'] / 1000. - - data1.drop_duplicates(subset='time', inplace=True) - data2.drop_duplicates(subset='time', inplace=True) - - messages = [] - ids = [] - - if 'keep first' in splitmode: - if 'firstprivate' in splitmode: # pragma: no cover - setprivate = True - else: - setprivate = False - - id, message = new_workout_from_df(r, data1, - title=parent.name + ' (1)', - parent=parent, - setprivate=setprivate, - forceunit='N') - messages.append(message) - ids.append(encoder.encode_hex(id)) - if 'keep second' in splitmode: - data2['cumdist'] = data2['cumdist'] - data2.iloc[ - 0, - data2.columns.get_loc('cumdist') - ] - data2['distance'] = data2['distance'] - data2.iloc[ - 0, - data2.columns.get_loc('distance') - ] - data2['time'] = data2['time'] - data2.iloc[ - 0, - data2.columns.get_loc('time') - ] - if 'secondprivate' in splitmode: # pragma: no cover - setprivate = True - else: - setprivate = False - - dt = datetime.timedelta(seconds=splitsecond) - - id, message = new_workout_from_df(r, data2, - title=parent.name + ' (2)', - parent=parent, - setprivate=setprivate, - dt=dt, forceunit='N') - messages.append(message) - ids.append(encoder.encode_hex(id)) - - if 'keep original' not in splitmode: # pragma: no cover - if 'keep second' in splitmode or 'keep first' in splitmode: - parent.delete() - messages.append('Deleted Workout: ' + parent.name) - else: - messages.append('That would delete your workout') - ids.append(encoder.encode_hex(parent.id)) - elif 'originalprivate' in splitmode: # pragma: no cover - parent.privacy = 'hidden' - parent.save() - - return ids, messages - # Create new workout from data frame and store it in the database # This routine should be used everywhere in views.py and mailprocessing.py # Currently there is code duplication -def new_workout_from_df(r, df, - title='New Workout', - workoutsource='unknown', - boattype='1x', - workouttype='rower', - parent=None, - startdatetime='', - setprivate=False, - forceunit='lbs', - dt=datetime.timedelta()): - - message = None - - summary = '' - if parent: - oarlength = parent.oarlength - inboard = parent.inboard - - workoutsource = parent.workoutsource - workouttype = parent.workouttype - boattype = parent.boattype - notes = parent.notes - summary = parent.summary - rpe = parent.rpe - if parent.privacy == 'hidden': # pragma: no cover - makeprivate = True - else: - makeprivate = False - - startdatetime = parent.startdatetime + dt - else: - oarlength = 2.89 - inboard = 0.88 - notes = '' - summary = '' - makeprivate = False - rpe = 0 - if startdatetime == '': # pragma: no cover - startdatetime = timezone.now() - - if setprivate: # pragma: no cover - makeprivate = True - - timestr = strftime("%Y%m%d-%H%M%S") - - csvfilename = 'media/df_' + timestr + '.csv' - if forceunit == 'N': - # change to lbs for now - df['peakforce'] /= lbstoN - df['averageforce'] /= lbstoN - - df.rename(columns=columndict, inplace=True) - - starttimeunix = arrow.get(startdatetime).timestamp() - df[' ElapsedTime (sec)'] = df['TimeStamp (sec)'] - - df['TimeStamp (sec)'] = df['TimeStamp (sec)'] + starttimeunix - - row = rrdata(df=df) - - row.write_csv(csvfilename, gzip=True) - - id, message = save_workout_database(csvfilename, r, - workouttype=workouttype, - boattype=boattype, - title=title, - workoutsource=workoutsource, - notes=notes, - summary=summary, - oarlength=oarlength, - inboard=inboard, - makeprivate=makeprivate, - dosmooth=False, - rpe=rpe, - consistencychecks=False) - - _ = myqueue(queuehigh, handle_calctrimp, id, csvfilename, - r.ftp, r.sex, r.hrftp, r.max, r.rest) - - return (id, message) - - # A wrapper around the rowingdata class, with some error catching @@ -2495,7 +1101,7 @@ def rdata(file, rower=rrower()): # Remove all stroke data for workout ID from database -def delete_strokedata(id): +def delete_strokedata(id, debug=False): dirname = 'media/strokedata_{id}.parquet.gz'.format(id=id) try: shutil.rmtree(dirname) @@ -2510,8 +1116,8 @@ def delete_strokedata(id): # Replace stroke data in DB with data from CSV file -def update_strokedata(id, df): - delete_strokedata(id) +def update_strokedata(id, df, debug=False): + delete_strokedata(id, debug=debug) _ = dataprep(df, id=id, bands=True, barchart=True, otwpower=True) # Test that all data are of a numerical time @@ -2539,7 +1145,7 @@ def getrowdata_db(id=0, doclean=False, convertnewtons=True, if data.empty: rowdata, row = getrowdata(id=id) - if not rowdata.empty: + if not rowdata.empty: # pragma: no cover data = dataprep(rowdata.df, id=id, bands=True, barchart=True, otwpower=True) else: @@ -2562,7 +1168,8 @@ def getrowdata_db(id=0, doclean=False, convertnewtons=True, # Fetch a subset of the data from the DB -def getsmallrowdata_db(columns, ids=[], doclean=True, workstrokesonly=True, compute=True): +def getsmallrowdata_db(columns, ids=[], doclean=True, workstrokesonly=True, compute=True, + debug=False): # prepmultipledata(ids) if ids: @@ -2741,9 +1348,6 @@ def read_cols_df_sql(ids, columns, convertnewtons=True): return df, extracols -def initiate_cp(r): - _ = update_rolling_cp(r, otwtypes, 'water') - _ = update_rolling_cp(r, otetypes, 'erg') # Read stroke data from the DB for a Workout ID. Returns a pandas dataframe @@ -2887,7 +1491,7 @@ def add_efficiency(id=0): # pragma: no cover def dataprep(rowdatadf, id=0, bands=True, barchart=True, otwpower=True, - empower=True, inboard=0.88, forceunit='lbs'): + empower=True, inboard=0.88, forceunit='lbs', debug=False): if rowdatadf.empty: return 0 @@ -3172,6 +1776,1684 @@ def dataprep(rowdatadf, id=0, bands=True, barchart=True, otwpower=True, return data + +def delete_agegroup_db(age, sex, weightcategory, debug=False): + if debug: # pragma: no cover + engine = create_engine(database_url_debug, echo=False) + else: # pragma: no cover + engine = create_engine(database_url, echo=False) + + query = sa.text("DELETE from {table} WHERE age='{age}' and weightcategory='{weightcategory}' and sex='{sex}';".format( + sex=sex, + age=age, + weightcategory=weightcategory, + table='calcagegrouprecords' + )) + with engine.connect() as conn, conn.begin(): + _ = conn.execute(query) + conn.close() + engine.dispose() + + + + + +def update_agegroup_db(age, sex, weightcategory, wcdurations, wcpower, + debug=False): + + delete_agegroup_db(age, sex, weightcategory, debug=debug) + + wcdurations = [None if type(y) is float and np.isnan( + y) else y for y in wcdurations] + wcpower = [None if type(y) is float and np.isnan(y) + else y for y in wcpower] + + df = pd.DataFrame( + { + 'duration': wcdurations, + 'power': wcpower, + } + ) + + df['sex'] = sex + df['age'] = age + df['weightcategory'] = weightcategory + df.replace([np.inf, -np.inf], np.nan, inplace=True) + df.dropna(axis=0, inplace=True) + + if debug: # pragma: no cover # pragma: no cover + engine = create_engine(database_url_debug, echo=False) + else: + engine = create_engine(database_url, echo=False) + + table = 'calcagegrouprecords' + with engine.connect() as conn, conn.begin(): + df.to_sql(table, engine, if_exists='append', index=False) + conn.close() + engine.dispose() + + + +def add_c2_stroke_data_db(strokedata, workoutid, starttimeunix, csvfilename, + debug=False, workouttype='rower'): + + res = make_cumvalues(0.1*strokedata['t']) + cum_time = res[0] + lapidx = res[1] + + unixtime = cum_time+starttimeunix + # unixtime[0] = starttimeunix + seconds = 0.1*strokedata.loc[:, 't'] + + nr_rows = len(unixtime) + + try: # pragma: no cover + latcoord = strokedata.loc[:, 'lat'] + loncoord = strokedata.loc[:, 'lon'] + except: + latcoord = np.zeros(nr_rows) + loncoord = np.zeros(nr_rows) + + try: + strokelength = strokedata.loc[:, 'strokelength'] + except: + strokelength = np.zeros(nr_rows) + + dist2 = 0.1*strokedata.loc[:, 'd'] + + try: + spm = strokedata.loc[:, 'spm'] + except KeyError: # pragma: no cover + spm = 0*dist2 + + try: + hr = strokedata.loc[:, 'hr'] + except KeyError: # pragma: no cover + hr = 0*spm + + pace = strokedata.loc[:, 'p']/10. + pace = np.clip(pace, 0, 1e4) + pace = pace.replace(0, 300) + + velo = 500./pace + power = 2.8*velo**3 + if workouttype == 'bike': # pragma: no cover + velo = 1000./pace + + # save csv + # Create data frame with all necessary data to write to csv + df = pd.DataFrame({'TimeStamp (sec)': unixtime, + ' Horizontal (meters)': dist2, + ' Cadence (stokes/min)': spm, + ' HRCur (bpm)': hr, + ' longitude': loncoord, + ' latitude': latcoord, + ' Stroke500mPace (sec/500m)': pace, + ' Power (watts)': power, + ' DragFactor': np.zeros(nr_rows), + ' DriveLength (meters)': np.zeros(nr_rows), + ' StrokeDistance (meters)': strokelength, + ' DriveTime (ms)': np.zeros(nr_rows), + ' StrokeRecoveryTime (ms)': np.zeros(nr_rows), + ' AverageDriveForce (lbs)': np.zeros(nr_rows), + ' PeakDriveForce (lbs)': np.zeros(nr_rows), + ' lapIdx': lapidx, + ' WorkoutState': 4, + ' ElapsedTime (sec)': seconds, + 'cum_dist': dist2 + }) + + df.sort_values(by='TimeStamp (sec)', ascending=True) + + # Create CSV file name and save data to CSV file + + res = df.to_csv(csvfilename, index_label='index', + compression='gzip') + + + data = dataprep(df, id=workoutid, bands=False, debug=debug) + + return data + +# Creates C2 stroke data +def create_c2_stroke_data_db( + distance, duration, workouttype, + workoutid, starttimeunix, csvfilename, debug=False): # pragma: no cover + + nr_strokes = int(distance/10.) + + totalseconds = duration.hour*3600. + totalseconds += duration.minute*60. + totalseconds += duration.second + totalseconds += duration.microsecond/1.e6 + + try: + spm = 60.*nr_strokes/totalseconds + except ZeroDivisionError: + spm = 20*np.zeros(nr_strokes) + + try: + _ = totalseconds/float(nr_strokes) + except ZeroDivisionError: + return 0 + + elapsed = np.arange(nr_strokes)*totalseconds/(float(nr_strokes-1)) + + d = np.arange(nr_strokes)*distance/(float(nr_strokes-1)) + + unixtime = starttimeunix + elapsed + + pace = 500.*totalseconds/distance + + if workouttype in ['rower', 'slides', 'dynamic']: + try: + velo = distance/totalseconds + except ZeroDivisionError: + velo = 0 + power = 2.8*velo**3 + else: + power = 0 + + df = pd.DataFrame({ + 'TimeStamp (sec)': unixtime, + ' Horizontal (meters)': d, + ' Cadence (stokes/min)': spm, + ' Stroke500mPace (sec/500m)': pace, + ' ElapsedTime (sec)': elapsed, + ' Power (watts)': power, + ' HRCur (bpm)': np.zeros(nr_strokes), + ' longitude': np.zeros(nr_strokes), + ' latitude': np.zeros(nr_strokes), + ' DragFactor': np.zeros(nr_strokes), + ' DriveLength (meters)': np.zeros(nr_strokes), + ' StrokeDistance (meters)': np.zeros(nr_strokes), + ' DriveTime (ms)': np.zeros(nr_strokes), + ' StrokeRecoveryTime (ms)': np.zeros(nr_strokes), + ' AverageDriveForce (lbs)': np.zeros(nr_strokes), + ' PeakDriveForce (lbs)': np.zeros(nr_strokes), + ' lapIdx': np.zeros(nr_strokes), + 'cum_dist': d + }) + + df[' ElapsedTime (sec)'] = df['TimeStamp (sec)'] + + _ = df.to_csv(csvfilename, index_label='index', compression='gzip') + + data = dataprep(df, id=workoutid, bands=False, debug=debug) + + return data + + +def update_empower(id, inboard, oarlength, boattype, df, f1, debug=False): # pragma: no cover + + corr_factor = 1.0 + if 'x' in boattype: + # sweep + a = 0.06 + b = 0.275 + else: + # scull + a = 0.15 + b = 0.275 + + corr_factor = empower_bug_correction(oarlength, inboard, a, b) + + success = False + + try: + df['power empower old'] = df[' Power (watts)'] + df[' Power (watts)'] = df[' Power (watts)'] * corr_factor + df['driveenergy empower old'] = df['driveenergy'] + df['driveenergy'] = df['driveenergy'] * corr_factor + success = True + except KeyError: + pass + + if success: + delete_strokedata(id, debug=debug) + if debug: # pragma: no cover + print("updated ", id) + print("correction ", corr_factor) + else: + if debug: # pragma: no cover + print("not updated ", id) + + _ = dataprep(df, id=id, bands=True, barchart=True, otwpower=True, debug=debug) + + row = rrdata(df=df) + row.write_csv(f1, gzip=True) + + return success + + +from rowers.tasks import ( + handle_sendemail_unrecognized, handle_setcp, + handle_getagegrouprecords, handle_update_wps, + handle_request_post, handle_calctrimp, + handle_updatecp, handle_updateergcp, + handle_sendemail_breakthrough, + handle_sendemail_hard, +) +from rowers.tasks import handle_zip_file + +from pandas import DataFrame, Series +import dask.dataframe as dd +from dask.delayed import delayed +import pyarrow.parquet as pq +import pyarrow as pa + +from pyarrow.lib import ArrowInvalid + +from django.utils import timezone +from django.utils.timezone import get_current_timezone +from django.urls import reverse +import requests + +from django.core.exceptions import ValidationError + +from time import strftime +import arrow + +thetimezone = get_current_timezone() + +allowedcolumns = [key for key, value in strokedatafields.items()] + +queue = django_rq.get_queue('default') +queuelow = django_rq.get_queue('low') +queuehigh = django_rq.get_queue('default') + + + +def fetchcperg(rower, theworkouts): + thefilenames = [w.csvfilename for w in theworkouts] + cpdf = getcpdata_sql(rower.id, table='ergcpdata') + + _ = myqueue( + queuelow, + handle_updateergcp, + rower.id, + thefilenames) + + return cpdf + +def workout_goldmedalstandard(workout, reset=False): + if workout.goldmedalstandard > 0 and not reset: + return workout.goldmedalstandard, workout.goldmedalseconds + if workout.workouttype in rowtypes: + goldmedalstandard, goldmedalseconds = calculate_goldmedalstandard( + workout.user, workout) + if workout.workouttype in otwtypes: + factor = 100./(100.-workout.user.otwslack) + goldmedalstandard = goldmedalstandard*factor + workout.goldmedalstandard = goldmedalstandard + workout.goldmedalseconds = goldmedalseconds + workout.save() + return goldmedalstandard, goldmedalseconds + else: + return 0, 0 + + +def check_marker(workout): + r = workout.user + gmstandard, gmseconds = workout_goldmedalstandard(workout) + if gmseconds < 60: + return None + + dd = arrow.get(workout.date).datetime-datetime.timedelta(days=r.kfit) + ws = Workout.objects.filter(date__gte=dd, + date__lte=workout.date, + user=r, duplicate=False, + workouttype__in=mytypes.rowtypes, + ).order_by("date") + ids = [] + gms = [] + for w in ws: + gmstandard, gmseconds = workout_goldmedalstandard(w) + if gmseconds > 60: + ids.append(w.id) + gms.append(gmstandard) + + df = pd.DataFrame({ + 'id': ids, + 'gms': gms, + }) + + if df.empty: # pragma: no cover + workout.ranking = True + workout.save() + return workout + + indexmax = df['gms'].idxmax() + theid = df.loc[indexmax, 'id'] + + wmax = Workout.objects.get(id=theid) + # gms_max = wmax.goldmedalstandard + + # check if equal, bigger, or smaller than previous + if not wmax.rankingpiece: + rankingworkouts = ws.filter(rankingpiece=True) + if len(rankingworkouts) == 0: + wmax.rankingpiece = True + wmax.save() + return wmax + + lastranking = rankingworkouts[len(rankingworkouts)-1] + if lastranking.goldmedalstandard+0.2 < wmax.goldmedalstandard: # pragma: no cover + wmax.rankingpiece = True + wmax.save() + return wmax + else: # pragma: no cover + return wmax + + return None + + +def workout_summary_to_df( + rower, + startdate=datetime.datetime(1970, 1, 1), + enddate=timezone.now()+timezone.timedelta(days=1)): + + ws = Workout.objects.filter( + user=rower, date__gte=startdate, date__lte=enddate, + duplicate=False + ).order_by("startdatetime") + + types = [] + names = [] + ids = [] + startdatetimes = [] + timezones = [] + distances = [] + durations = [] + weightcategories = [] + adaptivetypes = [] + weightvalues = [] + notes = [] + tcx_links = [] + csv_links = [] + workout_links = [] + goldstandards = [] + goldstandarddurations = [] + rscores = [] + hrtss = [] + trimps = [] + rankingpieces = [] + boattypes = [] + + counter1 = 0 + counter2 = len(ws) + + for w in ws: + counter1 += 1 + if counter1 % 10 == 0: # pragma: no cover + print(counter1, '/', counter2) + types.append(w.workouttype) + names.append(w.name) + ids.append(encoder.encode_hex(w.id)) + startdatetimes.append(w.startdatetime) + timezones.append(w.timezone) + distances.append(w.distance) + durations.append(w.duration) + weightcategories.append(w.weightcategory) + adaptivetypes.append(w.adaptiveclass) + weightvalues.append(w.weightvalue) + boattypes.append(w.boattype) + notes.append(w.notes) + tcx_link = SITE_URL+'/rowers/workout/{id}/emailtcx'.format( + id=encoder.encode_hex(w.id) + ) + tcx_links.append(tcx_link) + csv_link = SITE_URL+'/rowers/workout/{id}/emailcsv'.format( + id=encoder.encode_hex(w.id) + ) + csv_links.append(csv_link) + workout_link = SITE_URL+'/rowers/workout/{id}/'.format( + id=encoder.encode_hex(w.id) + ) + workout_links.append(workout_link) + trimps.append(workout_trimp(w)[0]) + rscore = workout_rscore(w) + rscores.append(int(rscore[0])) + hrtss.append(int(w.hrtss)) + goldstandard, goldstandardduration = workout_goldmedalstandard(w) + goldstandards.append(int(goldstandard)) + goldstandarddurations.append(int(goldstandardduration)) + rankingpieces.append(w.rankingpiece) + + df = pd.DataFrame({ + 'ID': ids, + 'date': startdatetimes, + 'name': names, + 'link': workout_links, + 'timezone': timezones, + 'type': types, + 'boat type': boattypes, + 'distance (m)': distances, + 'duration ': durations, + 'ranking piece': rankingpieces, + 'weight category': weightcategories, + 'adaptive classification': adaptivetypes, + 'weight (kg)': weightvalues, + 'Stroke Data TCX': tcx_links, + 'Stroke Data CSV': csv_links, + 'TRIMP Training Load': trimps, + 'TSS Training Load': rscores, + 'hrTSS Training Load': hrtss, + 'GS': goldstandards, + 'GS_secs': goldstandarddurations, + 'notes': notes, + }) + + return df + + +def resample(id, r, parent, overwrite='copy'): + data, row = getrowdata_db(id=id) + messages = [] + + # resample + startdatetime = row.startdatetime + data['datetime'] = data['time'].apply( + lambda x: startdatetime+datetime.timedelta(seconds=x/1000.)) + + data = data.resample('S', on='datetime').mean() + data.interpolate(method='linear', inplace=True) + data.reset_index(drop=True, inplace=True) + + # data.drop('datetime',inplace=True) + data['pace'] = data['pace'] / 1000. + data['time'] = data['time'] / 1000. + + if overwrite == 'overwrite': + # remove CP data + try: + cpfile = 'media/cpdata_{id}.parquet.gz'.format(id=parent.id) + os.remove(cpfile) + except FileNotFoundError: + pass + # save + data.rename(columns=columndict, inplace=True) + + starttimeunix = arrow.get(startdatetime).timestamp() + data[' ElapsedTime (sec)'] = data['TimeStamp (sec)'] + + data['TimeStamp (sec)'] = data['TimeStamp (sec)'] + starttimeunix + + row = rrdata(df=data) + + row.write_csv(parent.csvfilename, gzip=True) + + _ = dataprep(row.df, id=parent.id, bands=True, barchart=True, + otwpower=True, empower=True, inboard=parent.inboard) + isbreakthrough, ishard = checkbreakthrough(parent, r) + _ = check_marker(parent) + _ = update_wps(r, mytypes.otwtypes) + _ = update_wps(r, mytypes.otetypes) + + tss, normp = workout_rscore(parent) + goldmedalstandard, goldmedalseconds = workout_goldmedalstandard(parent) + else: + id, message = new_workout_from_df(r, data, title=parent.name + '(Resampled)', + parent=parent, forceunit='N') + messages.append(message) + + return data, id, messages + + + + +def calculate_goldmedalstandard(rower, workout, recurrance=True): + cpfile = 'media/cpdata_{id}.parquet.gz'.format(id=workout.id) + try: + df = pd.read_parquet(cpfile) + except: + background = True + if settings.TESTING: + background = False + df, delta, cpvalues = setcp(workout, background=background) + if df.empty: + return 0, 0 + + if df.empty and recurrance: # pragma: no cover + df, delta, cpvalues = setcp(workout, recurrance=False, background=True) + if df.empty: + return 0, 0 + + age = calculate_age(rower.birthdate, today=workout.date) + + agerecords = CalcAgePerformance.objects.filter( + age=age, + sex=rower.sex, + weightcategory=rower.weightcategory + ) + + wcdurations = [] + wcpower = [] + getrecords = False + if not settings.TESTING: # pragma: no cover + if len(agerecords) == 0: # pragma: no cover + getrecords = True + + for record in agerecords: # pragma: no cover + if record.power > 0: + wcdurations.append(record.duration) + wcpower.append(record.power) + else: + getrecords = True + + if getrecords: # pragma: no cover + durations = [1, 4, 30, 60] + distances = [100, 500, 1000, 2000, 5000, 6000, 10000, 21097, 42195] + df2 = pd.DataFrame( + list( + C2WorldClassAgePerformance.objects.filter( + sex=rower.sex, + weightcategory=rower.weightcategory + ).values() + ) + ) + jsondf = df2.to_json() + _ = myqueue(queuelow, handle_getagegrouprecords, + jsondf, distances, durations, age, rower.sex, rower.weightcategory) + + wcpower = pd.Series(wcpower, dtype='float') + wcdurations = pd.Series(wcdurations, dtype='float') + + def fitfunc(pars, x): + return pars[0] / (1+(x/pars[2])) + pars[1]/(1+(x/pars[3])) + + def errfunc(pars, x, y): + return fitfunc(pars, x)-y + + if len(wcdurations) >= 4: # pragma: no cover + p1wc, success = optimize.leastsq( + errfunc, p0[:], args=(wcdurations, wcpower)) + else: + factor = fitfunc(p0, wcdurations.mean()/wcpower.mean()) + p1wc = [p0[0]/factor, p0[1]/factor, p0[2], p0[3]] + + return 0, 0 + + times = df['delta'] + powers = df['cp'] + wcpowers = fitfunc(p1wc, times) + scores = 100.*powers/wcpowers + + try: + indexmax = scores.idxmax() + delta = int(df.loc[indexmax, 'delta']) + maxvalue = scores.max() + except (ValueError, TypeError): # pragma: no cover + indexmax = 0 + delta = 0 + maxvalue = 0 + + return maxvalue, delta + + + +def setcp(workout, background=False, recurrance=True): + filename = 'media/cpdata_{id}.parquet.gz'.format(id=workout.id) + + strokesdf = getsmallrowdata_db( + ['power', 'workoutid', 'time'], ids=[workout.id]) + + try: + if strokesdf['power'].std() == 0: + return pd.DataFrame(), pd.Series(dtype='float'), pd.Series(dtype='float') + except KeyError: + return pd.DataFrame(), pd.Series(dtype='float'), pd.Series(dtype='float') + + if background: # pragma: no cover + _ = myqueue(queuelow, handle_setcp, strokesdf, filename, workout.id) + return pd.DataFrame({'delta': [], 'cp': []}), pd.Series(dtype='float'), pd.Series(dtype='float') + + if not strokesdf.empty: + totaltime = strokesdf['time'].max() + try: + powermean = strokesdf['power'].mean() + except KeyError: # pragma: no cover + powermean = 0 + + if powermean != 0: + thesecs = totaltime + maxt = 1.05 * thesecs + + if maxt > 0: + logarr = datautils.getlogarr(maxt) + dfgrouped = strokesdf.groupby(['workoutid']) + delta, cpvalues, avgpower = datautils.getcp(dfgrouped, logarr) + + df = pd.DataFrame({ + 'delta': delta, + 'cp': cpvalues, + 'id': workout.id, + }) + df.to_parquet(filename, engine='fastparquet', + compression='GZIP') + if recurrance: + goldmedalstandard, goldmedalduration = calculate_goldmedalstandard( + workout.user, workout) + workout.goldmedalstandard = goldmedalstandard + workout.goldmedalduration = goldmedalduration + workout.save() + return df, delta, cpvalues + + return pd.DataFrame({'delta': [], 'cp': []}), pd.Series(dtype='float'), pd.Series(dtype='float') + + +def update_wps(r, types, mode='water', asynchron=True): + firstdate = timezone.now()-datetime.timedelta(days=r.cprange) + workouts = Workout.objects.filter( + date__gte=firstdate, + workouttype__in=types, + user=r + ) + + ids = [w.id for w in workouts] + if asynchron: + _ = myqueue( + queue, + handle_update_wps, + r.id, + types, + ids, + mode + ) + + df = getsmallrowdata_db(['time', 'driveenergy'], ids=ids) + + try: + mask = df['driveenergy'] > 100 + except (KeyError, TypeError): + return False + try: + wps_median = int(df.loc[mask, 'driveenergy'].median()) + if mode == 'water': + r.median_wps = wps_median + else: # pragma: no cover + r.median_wps_erg = wps_median + + r.save() + except ValueError: # pragma: no cover + pass + + return True + +def join_workouts(r, ids, title='Joined Workout', + parent=None, + setprivate=False, + forceunit='lbs', killparents=False): + + message = None + + summary = '' + if parent: # pragma: no cover + oarlength = parent.oarlength + inboard = parent.inboard + workouttype = parent.workouttype + notes = parent.notes + summary = parent.summary + if parent.privacy == 'hidden': + makeprivate = True + else: + makeprivate = False + + startdatetime = parent.startdatetime + else: + oarlength = 2.89 + inboard = 0.88 + workouttype = 'rower' + notes = '' + summary = '' + makeprivate = False + startdatetime = timezone.now() + + if setprivate is True and makeprivate is False: # pragma: no cover + makeprivate = True + elif setprivate is False and makeprivate is True: # pragma: no cover + makeprivate = False + + # reorder in chronological order + ws = Workout.objects.filter(id__in=ids).order_by("startdatetime") + + if not parent: + parent = ws[0] + oarlength = parent.oarlength + inboard = parent.inboard + workouttype = parent.workouttype + notes = parent.notes + summary = parent.summary + if parent.privacy == 'hidden': + makeprivate = True + else: + makeprivate = False + startdatetime = parent.startdatetime + + files = [w.csvfilename for w in ws] + + row = rdata(files[0]) + + files = files[1:] + + while len(files): + row2 = rdata(files[0]) + if row2 != 0: + row = row+row2 + files = files[1:] + + timestr = strftime("%Y%m%d-%H%M%S") + csvfilename = 'media/df_' + timestr + '.csv' + + row.write_csv(csvfilename, gzip=True) + id, message = save_workout_database(csvfilename, r, + workouttype=workouttype, + title=title, + notes=notes, + oarlength=oarlength, + inboard=inboard, + startdatetime=startdatetime, + makeprivate=makeprivate, + summary=summary, + dosmooth=False, + consistencychecks=False) + + if killparents: # pragma: no cover + for w in ws: + w.delete() + + w = Workout.objects.get(id=id) + w.duplicate = False + w.save() + if message is not None and "duplicate" in message: + message = "" + + return (id, message) + + + +def fetchcp_new(rower, workouts): + + data = [] + for workout in workouts: + cpfile = 'media/cpdata_{id}.parquet.gz'.format(id=workout.id) + try: + df = pd.read_parquet(cpfile) + df['workout'] = str(workout) + df['url'] = workout.url() + data.append(df) + except: + # CP data file doesn't exist yet. has to be created + df, delta, cpvalues = setcp(workout) + df['workout'] = str(workout) + df['url'] = workout.url() + data.append(df) + + if len(data) == 0: + return pd.Series(dtype='float'), pd.Series(dtype='float'), 0, pd.Series(dtype='float'), pd.Series(dtype='float') + if len(data) > 1: + df = pd.concat(data, axis=0) + + try: + df = df[df['cp'] == df.groupby(['delta'])['cp'].transform('max')] + except KeyError: # pragma: no cover + return pd.Series(dtype='float'), pd.Series(dtype='float'), 0, pd.Series(dtype='float'), pd.Series(dtype='float') + + df = df.sort_values(['delta']).reset_index() + + return df['delta'], df['cp'], 0, df['workout'], df['url'] + + +def fetchcp(rower, theworkouts, table='cpdata'): # pragma: no cover + # get all power data from database (plus workoutid) + theids = [int(w.id) for w in theworkouts] + columns = ['power', 'workoutid', 'time'] + df = getsmallrowdata_db(columns, ids=theids) + df.dropna(inplace=True, axis=0) + if df.empty: + avgpower2 = {} + for id in theids: + avgpower2[id] = 0 + return pd.Series([], dtype='float'), pd.Series([], dtype='float'), avgpower2 + + try: + dfgrouped = df.groupby(['workoutid']) + except KeyError: + avgpower2 = {} + return pd.Series([], dtype='float'), pd.Series([], dtype='float'), avgpower2 + try: + avgpower2 = dict(dfgrouped.mean()['power'].astype(int)) + except KeyError: + avgpower2 = {} + for id in theids: + avgpower2[id] = 0 + return pd.Series([], dtype='float'), pd.Series([], dtype='float'), avgpower2 + + cpdf = getcpdata_sql(rower.id, table=table) + + if not cpdf.empty: + return cpdf['delta'], cpdf['cp'], avgpower2 + else: + _ = myqueue(queuelow, + handle_updatecp, + rower.id, + theids, + table=table) + + return pd.Series([], dtype='float'), pd.Series([], dtype='float'), avgpower2 + + return pd.Series([], dtype='float'), pd.Series([], dtype='float'), avgpower2 + + +def update_rolling_cp(r, types, mode='water'): + firstdate = timezone.now()-datetime.timedelta(days=r.cprange) + workouts = Workout.objects.filter( + date__gte=firstdate, + workouttype__in=types, + user=r + ) + + delta, cp, avgpower, workoutnames, urls = fetchcp_new(r, workouts) + + powerdf = pd.DataFrame({ + 'Delta': delta, + 'CP': cp, + }) + + powerdf = powerdf[powerdf['CP'] > 0] + powerdf.dropna(axis=0, inplace=True) + powerdf.sort_values(['Delta', 'CP'], ascending=[1, 0], inplace=True) + powerdf.drop_duplicates(subset='Delta', keep='first', inplace=True) + + res2 = datautils.cpfit(powerdf) + if len(powerdf) != 0: + if mode == 'water': + p1 = res2[0] + r.p0 = p1[0] + r.p1 = p1[1] + r.p2 = p1[2] + r.p3 = p1[3] + r.cpratio = res2[3] + r.save() + else: + p1 = res2[0] + r.ep0 = p1[0] + r.ep1 = p1[1] + r.ep2 = p1[2] + r.ep3 = p1[3] + r.ecpratio = res2[3] + r.save() + + return True + return False + +def initiate_cp(r): + _ = update_rolling_cp(r, otwtypes, 'water') + _ = update_rolling_cp(r, otetypes, 'erg') + +def split_workout(r, parent, splitsecond, splitmode): + data, row = getrowdata_db(id=parent.id) + latitude, longitude = get_latlon(parent.id) + if not latitude.empty and not longitude.empty: + data[' latitude'] = latitude + data[' longitude'] = longitude + + data['time'] = data['time'] / 1000. + + data1 = data[data['time'] <= splitsecond].copy() + data2 = data[data['time'] > splitsecond].copy() + + data1 = data1.sort_values(['time']) + data1 = data1.interpolate(method='linear', axis=0, limit_direction='both', + limit=10) + data1.fillna(method='bfill', inplace=True) + + # Some new stuff to try out + data1 = data1.groupby('time', axis=0).mean() + data1['time'] = data1.index + data1.reset_index(drop=True, inplace=True) + + data2 = data2.sort_values(['time']) + data2 = data2.interpolate(method='linear', axis=0, limit_direction='both', + limit=10) + data2.fillna(method='bfill', inplace=True) + + # Some new stuff to try out + data2 = data2.groupby('time', axis=0).mean() + data2['time'] = data2.index + data2.reset_index(drop=True, inplace=True) + + data1['pace'] = data1['pace'] / 1000. + data2['pace'] = data2['pace'] / 1000. + + data1.drop_duplicates(subset='time', inplace=True) + data2.drop_duplicates(subset='time', inplace=True) + + messages = [] + ids = [] + + if 'keep first' in splitmode: + if 'firstprivate' in splitmode: # pragma: no cover + setprivate = True + else: + setprivate = False + + id, message = new_workout_from_df(r, data1, + title=parent.name + ' (1)', + parent=parent, + setprivate=setprivate, + forceunit='N') + messages.append(message) + ids.append(encoder.encode_hex(id)) + if 'keep second' in splitmode: + data2['cumdist'] = data2['cumdist'] - data2.iloc[ + 0, + data2.columns.get_loc('cumdist') + ] + data2['distance'] = data2['distance'] - data2.iloc[ + 0, + data2.columns.get_loc('distance') + ] + data2['time'] = data2['time'] - data2.iloc[ + 0, + data2.columns.get_loc('time') + ] + if 'secondprivate' in splitmode: # pragma: no cover + setprivate = True + else: + setprivate = False + + dt = datetime.timedelta(seconds=splitsecond) + + id, message = new_workout_from_df(r, data2, + title=parent.name + ' (2)', + parent=parent, + setprivate=setprivate, + dt=dt, forceunit='N') + messages.append(message) + ids.append(encoder.encode_hex(id)) + + if 'keep original' not in splitmode: # pragma: no cover + if 'keep second' in splitmode or 'keep first' in splitmode: + parent.delete() + messages.append('Deleted Workout: ' + parent.name) + else: + messages.append('That would delete your workout') + ids.append(encoder.encode_hex(parent.id)) + elif 'originalprivate' in splitmode: # pragma: no cover + parent.privacy = 'hidden' + parent.save() + + return ids, messages + +# create a new workout from manually entered data +def create_row_df(r, distance, duration, startdatetime, workouttype='rower', + avghr=None, avgpwr=None, avgspm=None, + rankingpiece=False, + duplicate=False, rpe=-1, + title='Manual entry', notes='', weightcategory='hwt', + adaptiveclass='None'): + + if duration is not None: + totalseconds = duration.hour*3600. + totalseconds += duration.minute*60. + totalseconds += duration.second + totalseconds += duration.microsecond/1.e6 + else: # pragma: no cover + totalseconds = 60. + + if distance is None: # pragma: no cover + distance = 0 + + try: + nr_strokes = int(distance/10.) + except TypeError: # pragma: no cover + nr_strokes = int(20.*totalseconds) + + if nr_strokes == 0: # pragma: no cover + nr_strokes = 100 + + unixstarttime = arrow.get(startdatetime).timestamp() + + if not avgspm: # pragma: no cover + try: + spm = 60.*nr_strokes/totalseconds + except ZeroDivisionError: + spm = 20. + else: + spm = avgspm + + # step = totalseconds/float(nr_strokes) + + elapsed = np.arange(nr_strokes)*totalseconds/(float(nr_strokes-1)) + + # dstep = distance/float(nr_strokes) + + d = np.arange(nr_strokes)*distance/(float(nr_strokes-1)) + + unixtime = unixstarttime + elapsed + + try: + pace = 500.*totalseconds/distance + except ZeroDivisionError: # pragma: no cover + pace = 240. + + if workouttype in ['rower', 'slides', 'dynamic']: + try: + velo = distance/totalseconds + except ZeroDivisionError: # pragma: no cover + velo = 2.4 + power = 2.8*velo**3 + elif avgpwr is not None: # pragma: no cover + power = avgpwr + else: # pragma: no cover + power = 0 + + if avghr is not None: + hr = avghr + else: # pragma: no cover + hr = 0 + + df = pd.DataFrame({ + 'TimeStamp (sec)': unixtime, + ' Horizontal (meters)': d, + ' Cadence (stokes/min)': spm, + ' Stroke500mPace (sec/500m)': pace, + ' ElapsedTime (sec)': elapsed, + ' Power (watts)': power, + ' HRCur (bpm)': hr, + }) + + timestr = strftime("%Y%m%d-%H%M%S") + + csvfilename = 'media/df_' + timestr + '.csv' + df[' ElapsedTime (sec)'] = df['TimeStamp (sec)'] + + row = rrdata(df=df) + + row.write_csv(csvfilename, gzip=True) + + id, message = save_workout_database(csvfilename, r, + title=title, + notes=notes, + rankingpiece=rankingpiece, + duplicate=duplicate, + dosmooth=False, + workouttype=workouttype, + consistencychecks=False, + weightcategory=weightcategory, + adaptiveclass=adaptiveclass, + totaltime=totalseconds) + + return (id, message) + + + +def checkbreakthrough(w, r): + isbreakthrough = False + ishard = False + workouttype = w.workouttype + if workouttype in rowtypes: + cpdf, delta, cpvalues = setcp(w) + if not cpdf.empty: + if workouttype in otwtypes: + res, btvalues, res2 = utils.isbreakthrough( + delta, cpvalues, r.p0, r.p1, r.p2, r.p3, r.cpratio) + _ = update_rolling_cp(r, otwtypes, 'water') + + elif workouttype in otetypes: + res, btvalues, res2 = utils.isbreakthrough( + delta, cpvalues, r.ep0, r.ep1, r.ep2, r.ep3, r.ecpratio) + _ = update_rolling_cp(r, otetypes, 'erg') + else: # pragma: no cover + res = 0 + res2 = 0 + if res: + isbreakthrough = True + if res2 and not isbreakthrough: # pragma: no cover + ishard = True + + # submit email task to send email about breakthrough workout + if isbreakthrough: + if not w.duplicate: + w.rankingpiece = True + w.save() + if r.getemailnotifications and not r.emailbounced: # pragma: no cover + _ = myqueue(queuehigh, handle_sendemail_breakthrough, + w.id, + r.user.email, + r.user.first_name, + r.user.last_name, + btvalues=btvalues.to_json()) + + # submit email task to send email about breakthrough workout + if ishard: # pragma: no cover + if not w.duplicate: + w.rankingpiece = True + w.save() + if r.getemailnotifications and not r.emailbounced: + _ = myqueue(queuehigh, handle_sendemail_hard, + w.id, + r.user.email, + r.user.first_name, + r.user.last_name, + btvalues=btvalues.to_json()) + + return isbreakthrough, ishard + + +# Processes painsled CSV file to database +def save_workout_database(f2, r, dosmooth=True, workouttype='rower', + boattype='1x', + adaptiveclass='None', + weightcategory='hwt', + dosummary=True, title='Workout', + workoutsource='unknown', + notes='', totaldist=0, totaltime=0, + rankingpiece=False, + rpe=-1, + duplicate=False, + summary='', + makeprivate=False, + oarlength=2.89, inboard=0.88, + forceunit='lbs', + consistencychecks=False, + startdatetime='', + impeller=False): + + message = None + + powerperc = 100 * np.array([r.pw_ut2, + r.pw_ut1, + r.pw_at, + r.pw_tr, r.pw_an]) / r.ftp + + # make workout and put in database + rr = rrower(hrmax=r.max, hrut2=r.ut2, + hrut1=r.ut1, hrat=r.at, + hrtr=r.tr, hran=r.an, ftp=r.ftp, + powerperc=powerperc, powerzones=r.powerzones) + row = rdata(f2, rower=rr) + + startdatetime, startdate, starttime, timezone_str, partofday = get_startdate_time_zone( + r, row, startdatetime=startdatetime) + + if title is None or title == '': + title = 'Workout' + + if partofday is not None: + title = '{partofday} {workouttype}'.format( + partofday=partofday, + workouttype=workouttype, + ) + + if row.df.empty: # pragma: no cover + return (0, 'Error: CSV data file was empty') + + dtavg = row.df['TimeStamp (sec)'].diff().mean() + + if dtavg < 1: + newdf = df_resample(row.df) + try: + os.remove(f2) + except: + pass + return new_workout_from_df(r, newdf, + title=title, boattype=boattype, + workouttype=workouttype, + workoutsource=workoutsource, startdatetime=startdatetime) + try: + checks = row.check_consistency() + allchecks = 1 + for key, value in checks.items(): + if not value: + allchecks = 0 + except ZeroDivisionError: # pragma: no cover + pass + + if not allchecks and consistencychecks: + # row.repair() + pass + + if row == 0: # pragma: no cover + return (0, 'Error: CSV data file not found') + + try: + lat = row.df[' latitude'] + if lat.mean() != 0 and lat.std() != 0 and workouttype == 'rower': + workouttype = 'water' + except KeyError: + pass + + if dosmooth: + # auto smoothing + pace = row.df[' Stroke500mPace (sec/500m)'].values + velo = 500. / pace + + f = row.df['TimeStamp (sec)'].diff().mean() + if f != 0 and not np.isnan(f): + windowsize = 2 * (int(10. / (f))) + 1 + else: # pragma: no cover + windowsize = 1 + if 'originalvelo' not in row.df: + row.df['originalvelo'] = velo + + if windowsize > 3 and windowsize < len(velo): + velo2 = savgol_filter(velo, windowsize, 3) + else: # pragma: no cover + velo2 = velo + + velo3 = pd.Series(velo2, dtype='float') + velo3 = velo3.replace([-np.inf, np.inf], np.nan) + velo3 = velo3.fillna(method='ffill') + + pace2 = 500. / abs(velo3) + + row.df[' Stroke500mPace (sec/500m)'] = pace2 + + row.df = row.df.fillna(0) + + row.write_csv(f2, gzip=True) + try: + os.remove(f2) + except: + pass + + # recalculate power data + if workouttype == 'rower' or workouttype == 'dynamic' or workouttype == 'slides': + try: + if r.erg_recalculatepower: + row.erg_recalculatepower() + row.write_csv(f2, gzip=True) + except: + pass + + averagehr = row.df[' HRCur (bpm)'].mean() + maxhr = row.df[' HRCur (bpm)'].max() + + if totaldist == 0: + totaldist = row.df['cum_dist'].max() + if totaltime == 0: + totaltime = row.df['TimeStamp (sec)'].max( + ) - row.df['TimeStamp (sec)'].min() + try: + totaltime = totaltime + row.df.loc[:, ' ElapsedTime (sec)'].iloc[0] + except KeyError: # pragma: no cover + pass + + if np.isnan(totaltime): # pragma: no cover + totaltime = 0 + + if dosummary: + summary = row.allstats() + + workoutstartdatetime = startdatetime + + dologging('debuglog.log', 'Dataprep line 1721, Workout Startdatetime {workoutstartdatetime}'.format( + workoutstartdatetime=workoutstartdatetime, + )) + + duration = totaltime_sec_to_string(totaltime) + + workoutdate = startdate + workoutstarttime = starttime + + s = 'Dataprep line 1730 workoutdate and time set to {workoutdate} and {workoutstarttime}'.format( + workoutdate=workoutdate, + workoutstarttime=workoutstarttime, + ) + dologging('debuglog.log', s) + + if makeprivate: # pragma: no cover + privacy = 'hidden' + else: + privacy = 'visible' + + # checking for inf values + + totaldist = np.nan_to_num(totaldist) + maxhr = np.nan_to_num(maxhr) + averagehr = np.nan_to_num(averagehr) + + dragfactor = 0 + if workouttype in otetypes: + dragfactor = row.dragfactor + + t = datetime.datetime.strptime(duration, "%H:%M:%S.%f") + delta = datetime.timedelta( + hours=t.hour, minutes=t.minute, seconds=t.second) + + workoutenddatetime = workoutstartdatetime+delta + + # check for duplicate start times and duration + duplicate = checkduplicates( + r, workoutdate, workoutstartdatetime, workoutenddatetime) + if duplicate: + rankingpiece = False + + # test title length + if title is not None and len(title) > 140: # pragma: no cover + title = title[0:140] + + timezone_str = str(workoutstartdatetime.tzinfo) + + w = Workout(user=r, name=title, date=workoutdate, + workouttype=workouttype, + boattype=boattype, + dragfactor=dragfactor, + duration=duration, distance=totaldist, + weightcategory=weightcategory, + adaptiveclass=adaptiveclass, + starttime=workoutstarttime, + duplicate=duplicate, + workoutsource=workoutsource, + rankingpiece=rankingpiece, + forceunit=forceunit, + rpe=rpe, + csvfilename=f2, notes=notes, summary=summary, + maxhr=maxhr, averagehr=averagehr, + startdatetime=workoutstartdatetime, + inboard=inboard, oarlength=oarlength, + timezone=timezone_str, + privacy=privacy, + impeller=impeller) + try: + w.save() + except ValidationError: # pragma: no cover + try: + w.startdatetime = timezone.now() + w.save() + except ValidationError: + return (0, 'Unable to create your workout') + + if privacy == 'visible': + ts = Team.objects.filter(rower=r) + for t in ts: + w.team.add(t) + + # put stroke data in database + _ = dataprep(row.df, id=w.id, bands=True, + barchart=True, otwpower=True, empower=True, inboard=inboard) + + isbreakthrough, ishard = checkbreakthrough(w, r) + _ = check_marker(w) + _ = update_wps(r, mytypes.otwtypes) + _ = update_wps(r, mytypes.otetypes) + + _ = myqueue(queuehigh, handle_calctrimp, w.id, f2, + r.ftp, r.sex, r.hrftp, r.max, r.rest) + + return (w.id, message) + + + +def new_workout_from_file(r, f2, + workouttype='rower', + workoutsource=None, + title='Workout', + boattype='1x', + rpe=-1, + makeprivate=False, + startdatetime='', + notes='', + oarlockfirmware='', + inboard=None, + oarlength=None, + impeller=False, + uploadoptions={'boattype': '1x', 'workouttype': 'rower'}): + message = "" + + try: + fileformat = get_file_type(f2) + except (IOError, UnicodeDecodeError): # pragma: no cover + os.remove(f2) + message = "Rowsandall could not process this file. The extension is supported but the file seems corrupt. Contact info@rowsandall.com if you think this is incorrect." + return (0, message, f2) + + summary = '' + oarlength = 2.89 + inboard = 0.88 + + # Save zip files to email box for further processing + if len(fileformat) == 3 and fileformat[0] == 'zip': # pragma: no cover + uploadoptions['secret'] = settings.UPLOAD_SERVICE_SECRET + uploadoptions['user'] = r.user.id + uploadoptions['title'] = title + try: + zip_file = zipfile.ZipFile(f2) + for id, filename in enumerate(zip_file.namelist()): + datafile = zip_file.extract(filename, path='media/') + if id > 0: + uploadoptions['title'] = title+' ('+str(id+1)+')' + else: + uploadoptions['title'] = title + + uploadoptions['file'] = datafile + url = settings.UPLOAD_SERVICE_URL + + _ = myqueue(queuehigh, + handle_request_post, + url, + uploadoptions) + + except BadZipFile: # pragma: no cover + pass + + return -1, message, f2 + + # Some people try to upload Concept2 logbook summaries + if fileformat == 'imageformat': # pragma: no cover + os.remove(f2) + message = "You cannot upload image files here" + return (0, message, f2) + + if fileformat == 'json': # pragma: no cover + os.remove(f2) + message = "JSON format not supported in direct upload" + return (0, message, f2) + + if fileformat == 'c2log': + os.remove(f2) + message = "This summary does not contain stroke data. Use the files containing stroke by stroke data." + return (0, message, f2) + + if fileformat == 'nostrokes': # pragma: no cover + os.remove(f2) + message = "It looks like this file doesn't contain stroke data." + return (0, message, f2) + + if fileformat == 'kml': # pragma: no cover + os.remove(f2) + message = "KML files are not supported" + return (0, message, f2) + + # Some people upload corrupted zip files + if fileformat == 'notgzip': # pragma: no cover + os.remove(f2) + message = "Rowsandall could not process this file. The extension is supported but the file seems corrupt. Contact info@rowsandall.com if you think this is incorrect." + return (0, message, f2) + + # Some people try to upload RowPro summary logs + if fileformat == 'rowprolog': # pragma: no cover + os.remove(f2) + message = "This RowPro logbook summary does not contain stroke data. Please use the Stroke Data CSV file for the individual workout in your log." + return (0, message, f2) + + # Sometimes people try an unsupported file type. + # Send an email to info@rowsandall.com with the file attached + # for me to check if it is a bug, or a new file type + # worth supporting + if fileformat == 'gpx': # pragma: no cover + + os.remove(f2) + message = "GPX files support is on our roadmap. Check back soon." + return (0, message, f2) + + if fileformat == 'unknown': # pragma: no cover + message = "We couldn't recognize the file type" + extension = os.path.splitext(f2)[1] + filename = os.path.splitext(f2)[0] + if extension == '.gz': + filename = os.path.splitext(filename)[0] + extension2 = os.path.splitext(filename)[1]+extension + extension = extension2 + f4 = filename+'a'+extension + copyfile(f2, f4) + _ = myqueue(queuehigh, + handle_sendemail_unrecognized, + f4, + r.user.email) + + return (0, message, f2) + + if fileformat == 'att': # pragma: no cover + # email attachment which can safely be ignored + return (0, '', f2) + + # Get workout type from fit & tcx + if (fileformat == 'fit'): # pragma: no cover + workouttype = get_workouttype_from_fit(f2, workouttype=workouttype) + # if (fileformat == 'tcx'): + # workouttype_from_tcx = get_workouttype_from_tcx(f2,workouttype=workouttype) + # if workouttype != 'rower' and workouttype_from_tcx not in mytypes.otwtypes: + # workouttype = workouttype_from_tcx + + # handle non-Painsled by converting it to painsled compatible CSV + if (fileformat != 'csv'): + f2, summary, oarlength, inboard, fileformat, impeller = handle_nonpainsled( + f2, + fileformat, + startdatetime=startdatetime, + summary=summary, + empowerfirmware=oarlockfirmware, + impeller=impeller, + ) + if not f2: # pragma: no cover + message = 'Something went wrong' + return (0, message, '') + + dosummary = (fileformat != 'fit' and 'speedcoach2' not in fileformat) + dosummary = dosummary or summary == '' + + if 'speedcoach2' in fileformat and workouttype == 'rower': + workouttype = 'water' + + if workoutsource is None: + workoutsource = fileformat + + dologging('debuglog.log', 'Saving to database with start date time {startdatetime}'.format( + startdatetime=startdatetime, + )) + + id, message = save_workout_database( + f2, r, + notes=notes, + workouttype=workouttype, + weightcategory=r.weightcategory, + adaptiveclass=r.adaptiveclass, + boattype=boattype, + makeprivate=makeprivate, + dosummary=dosummary, + workoutsource=workoutsource, + summary=summary, + startdatetime=startdatetime, + rpe=rpe, + inboard=inboard, oarlength=oarlength, + title=title, + forceunit='N', + impeller=impeller, + ) + + return (id, message, f2) + + +def new_workout_from_df(r, df, + title='New Workout', + workoutsource='unknown', + boattype='1x', + workouttype='rower', + parent=None, + startdatetime='', + setprivate=False, + forceunit='lbs', + dt=datetime.timedelta()): + + message = None + + summary = '' + if parent: + oarlength = parent.oarlength + inboard = parent.inboard + + workoutsource = parent.workoutsource + workouttype = parent.workouttype + boattype = parent.boattype + notes = parent.notes + summary = parent.summary + rpe = parent.rpe + if parent.privacy == 'hidden': # pragma: no cover + makeprivate = True + else: + makeprivate = False + + startdatetime = parent.startdatetime + dt + else: + oarlength = 2.89 + inboard = 0.88 + notes = '' + summary = '' + makeprivate = False + rpe = 0 + if startdatetime == '': # pragma: no cover + startdatetime = timezone.now() + + if setprivate: # pragma: no cover + makeprivate = True + + timestr = strftime("%Y%m%d-%H%M%S") + + csvfilename = 'media/df_' + timestr + '.csv' + if forceunit == 'N': + # change to lbs for now + df['peakforce'] /= lbstoN + df['averageforce'] /= lbstoN + + df.rename(columns=columndict, inplace=True) + + starttimeunix = arrow.get(startdatetime).timestamp() + df[' ElapsedTime (sec)'] = df['TimeStamp (sec)'] + + df['TimeStamp (sec)'] = df['TimeStamp (sec)'] + starttimeunix + + row = rrdata(df=df) + + row.write_csv(csvfilename, gzip=True) + + id, message = save_workout_database(csvfilename, r, + workouttype=workouttype, + boattype=boattype, + title=title, + workoutsource=workoutsource, + notes=notes, + summary=summary, + oarlength=oarlength, + inboard=inboard, + makeprivate=makeprivate, + dosmooth=False, + rpe=rpe, + consistencychecks=False) + + _ = myqueue(queuehigh, handle_calctrimp, id, csvfilename, + r.ftp, r.sex, r.hrftp, r.max, r.rest) + + return (id, message) + + +# A wrapper around the rowingdata class, with some error catching + + + + def workout_trimp(w, reset=False): r = w.user diff --git a/rowers/dataprepnodjango.py b/rowers/dataprepnodjango.py deleted file mode 100644 index e3713b7a..00000000 --- a/rowers/dataprepnodjango.py +++ /dev/null @@ -1,1022 +0,0 @@ -from rowers.utils import totaltime_sec_to_string -from rowers.metrics import dtypes -import datetime -from scipy.signal import savgol_filter -import os - -# This is Data prep used for testing purposes (no Django environment) -# Uses the debug SQLite database for stroke data -from rowingdata import rowingdata as rrdata -from rowingdata import make_cumvalues -from rowingdata import rower as rrower -from rowingdata import main as rmain -from rowingdata import empower_bug_correction, get_empower_rigging, get_file_type -from rowingdata.csvparsers import make_cumvalues_array -from time import strftime -from pandas import DataFrame, Series - -import shutil -from shutil import copyfile -import pyarrow as pa - -import pandas as pd -import numpy as np -import itertools -import dask.dataframe as dd -from dask.delayed import delayed - -from sqlalchemy import create_engine -import sqlalchemy as sa - -from rowsandall_app.settings import DATABASES -from rowsandall_app.settings_dev import DATABASES as DEV_DATABASES -from rowsandall_app.settings_dev import use_sqlite - -from rowers.utils import lbstoN -import pytz -from timezonefinder import TimezoneFinder - -from rowingdata import ( - RowProParser, TCXParser, MysteryParser, RowPerfectParser, - ErgDataParser, CoxMateParser, BoatCoachAdvancedParser, BoatCoachOTWParser, - BoatCoachParser, painsledDesktopParser, SpeedCoach2Parser, speedcoachParser, - ErgStickParser, FITParser, fitsummarydata -) - -try: - user = DATABASES['default']['USER'] -except KeyError: # pragma: no cover - user = '' -try: - password = DATABASES['default']['PASSWORD'] -except KeyError: # pragma: no cover - password = '' - -try: - database_name = DATABASES['default']['NAME'] -except KeyError: # pragma: no cover - database_name = '' -try: - host = DATABASES['default']['HOST'] -except KeyError: # pragma: no cover - host = '' -try: - port = DATABASES['default']['PORT'] -except KeyError: # pragma: no cover - port = '' - -database_url = 'mysql://{user}:{password}@{host}:{port}/{database_name}'.format( - user=user, - password=password, - database_name=database_name, - host=host, - port=port, -) - -database_name_dev = DEV_DATABASES['default']['NAME'] - -database_url_debug = database_url - -if use_sqlite: - database_url_debug = 'sqlite:///'+database_name_dev - database_url = database_url_debug - - -# mapping the DB column names to the CSV file column names -columndict = { - 'time': 'TimeStamp (sec)', - 'hr': ' HRCur (bpm)', - 'velo': ' AverageBoatSpeed (m/s)', - 'pace': ' Stroke500mPace (sec/500m)', - 'spm': ' Cadence (stokes/min)', - 'power': ' Power (watts)', - 'averageforce': ' AverageDriveForce (lbs)', - 'drivelength': ' DriveLength (meters)', - 'peakforce': ' PeakDriveForce (lbs)', - 'distance': ' Horizontal (meters)', - 'catch': 'catch', - 'finish': 'finish', - 'peakforceangle': 'peakforceangle', - 'wash': 'wash', - 'slip': 'wash', - 'workoutstate': ' WorkoutState', - 'cumdist': 'cum_dist', -} - - -def niceformat(values): - out = [] - for v in values: - formattedv = strfdelta(v) - out.append(formattedv) - - return out - - -def strfdelta(tdelta): - try: - minutes, seconds = divmod(tdelta.seconds, 60) - tenths = int(tdelta.microseconds/1e5) - except AttributeError: # pragma: no cover - minutes, seconds = divmod(tdelta.view(np.int64), 60e9) - seconds, rest = divmod(seconds, 1e9) - tenths = int(rest/1e8) - res = "{minutes:0>2}:{seconds:0>2}.{tenths:0>1}".format( - minutes=minutes, - seconds=seconds, - tenths=tenths, - ) - - return res - - -def nicepaceformat(values): - out = [] - for v in values: - formattedv = strfdelta(v) - out.append(formattedv) - - return out - - -def timedeltaconv(x): - if not np.isnan(x): - dt = datetime.timedelta(seconds=x) - else: # pragma: no cover - dt = datetime.timedelta(seconds=350.) - - return dt - - -def rdata(file, rower=rrower()): # pragma: no cover - try: - res = rrdata(csvfile=file, rower=rower) - except IOError: - try: - res = rrdata(csvfile=file+'.gz', rower=rower) - except IOError: - res = 0 - - return res - - -# Creates C2 stroke data -def create_c2_stroke_data_db( - distance, duration, workouttype, - workoutid, starttimeunix, csvfilename, debug=False): # pragma: no cover - - nr_strokes = int(distance/10.) - - totalseconds = duration.hour*3600. - totalseconds += duration.minute*60. - totalseconds += duration.second - totalseconds += duration.microsecond/1.e6 - - try: - spm = 60.*nr_strokes/totalseconds - except ZeroDivisionError: - spm = 20*np.zeros(nr_strokes) - - try: - _ = totalseconds/float(nr_strokes) - except ZeroDivisionError: - return 0 - - elapsed = np.arange(nr_strokes)*totalseconds/(float(nr_strokes-1)) - - d = np.arange(nr_strokes)*distance/(float(nr_strokes-1)) - - unixtime = starttimeunix + elapsed - - pace = 500.*totalseconds/distance - - if workouttype in ['rower', 'slides', 'dynamic']: - try: - velo = distance/totalseconds - except ZeroDivisionError: - velo = 0 - power = 2.8*velo**3 - else: - power = 0 - - df = pd.DataFrame({ - 'TimeStamp (sec)': unixtime, - ' Horizontal (meters)': d, - ' Cadence (stokes/min)': spm, - ' Stroke500mPace (sec/500m)': pace, - ' ElapsedTime (sec)': elapsed, - ' Power (watts)': power, - ' HRCur (bpm)': np.zeros(nr_strokes), - ' longitude': np.zeros(nr_strokes), - ' latitude': np.zeros(nr_strokes), - ' DragFactor': np.zeros(nr_strokes), - ' DriveLength (meters)': np.zeros(nr_strokes), - ' StrokeDistance (meters)': np.zeros(nr_strokes), - ' DriveTime (ms)': np.zeros(nr_strokes), - ' StrokeRecoveryTime (ms)': np.zeros(nr_strokes), - ' AverageDriveForce (lbs)': np.zeros(nr_strokes), - ' PeakDriveForce (lbs)': np.zeros(nr_strokes), - ' lapIdx': np.zeros(nr_strokes), - 'cum_dist': d - }) - - df[' ElapsedTime (sec)'] = df['TimeStamp (sec)'] - - _ = df.to_csv(csvfilename, index_label='index', compression='gzip') - - data = dataprep(df, id=workoutid, bands=False, debug=debug) - - return data - -# Saves C2 stroke data to CSV and database - - -def add_c2_stroke_data_db(strokedata, workoutid, starttimeunix, csvfilename, - debug=False, workouttype='rower'): - - res = make_cumvalues(0.1*strokedata['t']) - cum_time = res[0] - lapidx = res[1] - - unixtime = cum_time+starttimeunix - # unixtime[0] = starttimeunix - seconds = 0.1*strokedata.loc[:, 't'] - - nr_rows = len(unixtime) - - try: # pragma: no cover - latcoord = strokedata.loc[:, 'lat'] - loncoord = strokedata.loc[:, 'lon'] - except: - latcoord = np.zeros(nr_rows) - loncoord = np.zeros(nr_rows) - - try: - strokelength = strokedata.loc[:, 'strokelength'] - except: - strokelength = np.zeros(nr_rows) - - dist2 = 0.1*strokedata.loc[:, 'd'] - - try: - spm = strokedata.loc[:, 'spm'] - except KeyError: # pragma: no cover - spm = 0*dist2 - - try: - hr = strokedata.loc[:, 'hr'] - except KeyError: # pragma: no cover - hr = 0*spm - - pace = strokedata.loc[:, 'p']/10. - pace = np.clip(pace, 0, 1e4) - pace = pace.replace(0, 300) - - velo = 500./pace - power = 2.8*velo**3 - if workouttype == 'bike': # pragma: no cover - velo = 1000./pace - - # save csv - # Create data frame with all necessary data to write to csv - df = pd.DataFrame({'TimeStamp (sec)': unixtime, - ' Horizontal (meters)': dist2, - ' Cadence (stokes/min)': spm, - ' HRCur (bpm)': hr, - ' longitude': loncoord, - ' latitude': latcoord, - ' Stroke500mPace (sec/500m)': pace, - ' Power (watts)': power, - ' DragFactor': np.zeros(nr_rows), - ' DriveLength (meters)': np.zeros(nr_rows), - ' StrokeDistance (meters)': strokelength, - ' DriveTime (ms)': np.zeros(nr_rows), - ' StrokeRecoveryTime (ms)': np.zeros(nr_rows), - ' AverageDriveForce (lbs)': np.zeros(nr_rows), - ' PeakDriveForce (lbs)': np.zeros(nr_rows), - ' lapIdx': lapidx, - ' WorkoutState': 4, - ' ElapsedTime (sec)': seconds, - 'cum_dist': dist2 - }) - - df.sort_values(by='TimeStamp (sec)', ascending=True) - - # Create CSV file name and save data to CSV file - - res = df.to_csv(csvfilename, index_label='index', - compression='gzip') - - try: - data = dataprep(df, id=workoutid, bands=False, debug=debug) - except: # pragma: no cover - return 0 - - return data - - -def handle_nonpainsled(f2, fileformat, summary=''): # pragma: no cover - oarlength = 2.89 - inboard = 0.88 - # handle RowPro: - if (fileformat == 'rp'): - row = RowProParser(f2) - # handle TCX - if (fileformat == 'tcx'): - row = TCXParser(f2) - - # handle Mystery - if (fileformat == 'mystery'): - row = MysteryParser(f2) - - # handle RowPerfect - if (fileformat == 'rowperfect3'): - row = RowPerfectParser(f2) - - # handle ErgData - if (fileformat == 'ergdata'): - row = ErgDataParser(f2) - - # handle CoxMate - if (fileformat == 'coxmate'): - row = CoxMateParser(f2) - - # handle Mike - if (fileformat == 'bcmike'): - row = BoatCoachAdvancedParser(f2) - - # handle BoatCoach OTW - if (fileformat == 'boatcoachotw'): - row = BoatCoachOTWParser(f2) - - # handle BoatCoach - if (fileformat == 'boatcoach'): - row = BoatCoachParser(f2) - - # handle painsled desktop - if (fileformat == 'painsleddesktop'): - row = painsledDesktopParser(f2) - - # handle speed coach GPS - if (fileformat == 'speedcoach'): - row = speedcoachParser(f2) - - # handle speed coach GPS 2 - if (fileformat == 'speedcoach2'): - row = SpeedCoach2Parser(f2) - try: - oarlength, inboard = get_empower_rigging(f2) - summary = row.allstats() - except: - pass - - # handle ErgStick - if (fileformat == 'ergstick'): - row = ErgStickParser(f2) - - # handle FIT - if (fileformat == 'fit'): - row = FITParser(f2) - s = fitsummarydata(f2) - s.setsummary() - summary = s.summarytext - - f_to_be_deleted = f2 - # should delete file - f2 = f2[:-4]+'o.csv' - row.write_csv(f2, gzip=True) - - # os.remove(f2) - try: - os.remove(f_to_be_deleted) - except: - os.remove(f_to_be_deleted+'.gz') - - return (f2, summary, oarlength, inboard) - - -def delete_strokedata(id, debug=False): - dirname = 'media/strokedata_{id}.parquet.gz'.format(id=id) - try: - shutil.rmtree(dirname) - except FileNotFoundError: # pragma: no cover - pass - - -def update_strokedata(id, df, debug=False): - delete_strokedata(id, debug=debug) - if debug: # pragma: no cover # pragma: no cover - print("updating ", id) - rowdata = dataprep(df, id=id, bands=True, barchart=True, otwpower=True, - debug=debug) - - return rowdata - - -def update_empower(id, inboard, oarlength, boattype, df, f1, debug=False): # pragma: no cover - - corr_factor = 1.0 - if 'x' in boattype: - # sweep - a = 0.06 - b = 0.275 - else: - # scull - a = 0.15 - b = 0.275 - - corr_factor = empower_bug_correction(oarlength, inboard, a, b) - - success = False - - try: - df['power empower old'] = df[' Power (watts)'] - df[' Power (watts)'] = df[' Power (watts)'] * corr_factor - df['driveenergy empower old'] = df['driveenergy'] - df['driveenergy'] = df['driveenergy'] * corr_factor - success = True - except KeyError: - pass - - if success: - delete_strokedata(id, debug=debug) - if debug: # pragma: no cover - print("updated ", id) - print("correction ", corr_factor) - else: - if debug: # pragma: no cover - print("not updated ", id) - - _ = dataprep(df, id=id, bands=True, barchart=True, otwpower=True, debug=debug) - - row = rrdata(df=df) - row.write_csv(f1, gzip=True) - - return success - - -def testdata(time, distance, pace, spm): # pragma: no cover - t1 = np.issubdtype(time, np.number) - t2 = np.issubdtype(distance, np.number) - t3 = np.issubdtype(pace, np.number) - t4 = np.issubdtype(spm, np.number) - - return t1 and t2 and t3 and t4 - - -def getsmallrowdata_db(columns, ids=[], debug=False): - csvfilenames = [ - 'media/strokedata_{id}.parquet.gz'.format(id=id) for id in ids] - data = [] - columns = [c for c in columns if c != 'None'] - - df = pd.DataFrame() - - if len(ids) > 1: # pragma: no cover - for id, f in zip(ids, csvfilenames): - try: - df = pd.read_parquet(f, columns=columns, engine='pyarrow') - data.append(df) - except OSError: - pass - except pa.lib.ArrowInvalid: - pass - - try: - df = pd.concat(data, axis=0) - except ValueError: - df = pd.DataFrame() - elif len(ids) == 1: - try: - df = pd.read_parquet( - csvfilenames[0], columns=columns, engine='pyarrow') - except (OSError, IndexError): # pragma: no cover - df = pd.DataFrame() - else: # pragma: no cover - df = pd.DataFrame() - - return df - - -def update_workout_field_sql(workoutid, fieldname, value, debug=False): - if debug: # pragma: no cover # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: - engine = create_engine(database_url, echo=False) - - table = 'rowers_workout' - - query = "UPDATE %s SET %s = '%s' WHERE `id` = %s;" % ( - table, fieldname, value, workoutid) - - with engine.connect() as conn, conn.begin(): - _ = conn.execute(query) - - conn.close() - engine.dispose() - - return 1 - - -def update_c2id_sql(id, c2id): # pragma: no cover - engine = create_engine(database_url, echo=False) - table = 'rowers_workout' - - query = "UPDATE %s SET uploadedtoc2 = %s WHERE `id` = %s;" % ( - table, c2id, id) - - with engine.connect() as conn, conn.begin(): - _ = conn.execute(query) - - conn.close() - engine.dispose() - - return 1 - - -def read_cols_df_sql(ids, columns, debug=False): # pragma: no cover - columns = list(columns)+['distance', 'spm'] - columns = [x for x in columns if x != 'None'] - columns = list(set(columns)) - - ids = [int(id) for id in ids] - - if len(ids) == 0: - return pd.DataFrame() - elif len(ids) == 1: - try: - filename = 'media/strokedata_{id}.parquet.gz'.format(id=ids[0]) - df = pd.read_parquet(filename, columns=columns) - except OSError: - pass - else: - data = [] - filenames = [ - 'media/strokedata_{id}.parquet.gz'.format(id=id) for id in ids] - for id, f in zip(ids, filenames): - try: - df = pd.read_parquet(f, columns=columns) - data.append(df) - except OSError: - pass - - df = pd.concat(data, axis=0) - - return df - - -def read_df_sql(id, debug=False): # pragma: no cover - try: - f = 'media/strokedata_{id}.parquet.gz'.format(id=id) - df = pd.read_parquet(f) - except OSError: - pass - - df = df.fillna(value=0) - - return df - - -def getcpdata_sql(rower_id, table='cpdata', debug=False): # pragma: no cover - if debug: # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: - engine = create_engine(database_url, echo=False) - - query = sa.text('SELECT * from {table} WHERE user={rower_id};'.format( - rower_id=rower_id, - table=table, - )) - _ = engine.raw_connection() - df = pd.read_sql_query(query, engine) - - return df - - -def deletecpdata_sql(rower_id, table='cpdata', debug=False): # pragma: no cover - if debug: # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: - engine = create_engine(database_url, echo=False) - - query = sa.text('DELETE from {table} WHERE user={rower_id};'.format( - rower_id=rower_id, - table=table, - )) - with engine.connect() as conn, conn.begin(): - try: - _ = conn.execute(query) - except: # pragma: no cover - print("Database locked") - conn.close() - engine.dispose() - - -def delete_agegroup_db(age, sex, weightcategory, debug=False): - if debug: # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: # pragma: no cover - engine = create_engine(database_url, echo=False) - - query = sa.text('DELETE from {table} WHERE age={age} and weightcategory = {weightcategory} and sex={sex};'.format( - sex=sex, - age=age, - weightcategory=weightcategory, - table='calcagegrouprecords' - )) - with engine.connect() as conn, conn.begin(): - try: - _ = conn.execute(query) - except: # pragma: no cover - print("Database locked") - conn.close() - engine.dispose() - - -def update_agegroup_db(age, sex, weightcategory, wcdurations, wcpower, - debug=False): - - delete_agegroup_db(age, sex, weightcategory, debug=debug) - - wcdurations = [None if type(y) is float and np.isnan( - y) else y for y in wcdurations] - wcpower = [None if type(y) is float and np.isnan(y) - else y for y in wcpower] - - df = pd.DataFrame( - { - 'duration': wcdurations, - 'power': wcpower, - } - ) - - df['sex'] = sex - df['age'] = age - df['weightcategory'] = weightcategory - df.replace([np.inf, -np.inf], np.nan, inplace=True) - df.dropna(axis=0, inplace=True) - - if debug: # pragma: no cover # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: - engine = create_engine(database_url, echo=False) - - table = 'calcagegrouprecords' - with engine.connect() as conn, conn.begin(): - df.to_sql(table, engine, if_exists='append', index=False) - conn.close() - engine.dispose() - - -def updatecpdata_sql(rower_id, delta, cp, table='cpdata', distance=pd.Series([], dtype='float'), debug=False): - deletecpdata_sql(rower_id, table=table, debug=debug) - df = pd.DataFrame( - { - 'delta': delta, - 'cp': cp, - 'user': rower_id - } - ) - - if not distance.empty: - df['distance'] = distance - - if debug: # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: - engine = create_engine(database_url, echo=False) - - with engine.connect() as conn, conn.begin(): - df.to_sql(table, engine, if_exists='append', index=False) - conn.close() - engine.dispose() - - -def smalldataprep(therows, xparam, yparam1, yparam2): # pragma: no cover - df = pd.DataFrame() - if yparam2 == 'None': - yparam2 = 'power' - df[xparam] = [] - df[yparam1] = [] - df[yparam2] = [] - df['distance'] = [] - df['spm'] = [] - for workout in therows: - f1 = workout.csvfilename - - try: - rowdata = dataprep(rrdata(csvfile=f1).df) - - rowdata = pd.DataFrame({xparam: rowdata[xparam], - yparam1: rowdata[yparam1], - yparam2: rowdata[yparam2], - 'distance': rowdata['distance'], - 'spm': rowdata['spm'], - } - ) - df = pd.concat([df, rowdata], ignore_index=True) - except IOError: - try: - rowdata = dataprep(rrdata(csvfile=f1+'.gz').df) - rowdata = pd.DataFrame({xparam: rowdata[xparam], - yparam1: rowdata[yparam1], - yparam2: rowdata[yparam2], - 'distance': rowdata['distance'], - 'spm': rowdata['spm'], - } - ) - df = pd.concat([df, rowdata], ignore_index=True) - except IOError: - pass - - return df - - -def dataprep(rowdatadf, id=0, bands=True, barchart=True, otwpower=True, - empower=True, debug=False, inboard=0.88, forceunit='lbs'): - - if rowdatadf.empty: # pragma: no cover - if debug: # pragma: no cover - print("empty") - return 0 - - # rowdatadf.set_index([range(len(rowdatadf))],inplace=True) - t = rowdatadf.loc[:, 'TimeStamp (sec)'] - t = pd.Series(t-rowdatadf.loc[:, 'TimeStamp (sec)'].iloc[0]) - - row_index = rowdatadf.loc[:, ' Stroke500mPace (sec/500m)'] > 3000 - rowdatadf.loc[row_index, ' Stroke500mPace (sec/500m)'] = 3000. - - p = rowdatadf.loc[:, ' Stroke500mPace (sec/500m)'] - try: - velo = rowdatadf.loc[:, ' AverageBoatSpeed (m/s)'] - except KeyError: - velo = 500./p - - hr = rowdatadf.loc[:, ' HRCur (bpm)'] - spm = rowdatadf.loc[:, ' Cadence (stokes/min)'] - cumdist = rowdatadf.loc[:, 'cum_dist'] - - power = rowdatadf.loc[:, ' Power (watts)'] - averageforce = rowdatadf.loc[:, ' AverageDriveForce (lbs)'] - drivelength = rowdatadf.loc[:, ' DriveLength (meters)'] - try: - workoutstate = rowdatadf.loc[:, ' WorkoutState'] - except KeyError: # pragma: no cover - workoutstate = 0*hr - - peakforce = rowdatadf.loc[:, ' PeakDriveForce (lbs)'] - - forceratio = averageforce/peakforce - forceratio = forceratio.fillna(value=0) - - try: - drivetime = rowdatadf.loc[:, ' DriveTime (ms)'] - recoverytime = rowdatadf.loc[:, ' StrokeRecoveryTime (ms)'] - rhythm = 100.*drivetime/(recoverytime+drivetime) - rhythm = rhythm.fillna(value=0) - except: # pragma: no cover - rhythm = 0.0*forceratio - - f = rowdatadf['TimeStamp (sec)'].diff().mean() - if f != 0: - try: - windowsize = 2*(int(10./(f)))+1 - except ValueError: # pragma: no cover - windowsize = 1 - else: # pragma: no cover - windowsize = 1 - if windowsize <= 3: # pragma: no cover - windowsize = 5 - - if windowsize > 3 and windowsize < len(hr): - spm = savgol_filter(spm, windowsize, 3) - hr = savgol_filter(hr, windowsize, 3) - drivelength = savgol_filter(drivelength, windowsize, 3) - forceratio = savgol_filter(forceratio, windowsize, 3) - - try: - t2 = t.fillna(method='ffill').apply(lambda x: timedeltaconv(x)) - except TypeError: # pragma: no cover - t2 = 0*t - - p2 = p.fillna(method='ffill').apply(lambda x: timedeltaconv(x)) - - try: - drivespeed = drivelength/rowdatadf[' DriveTime (ms)']*1.0e3 - except KeyError: # pragma: no cover - drivespeed = 0.0*rowdatadf['TimeStamp (sec)'] - except TypeError: # pragma: no cover - drivespeed = 0.0*rowdatadf['TimeStamp (sec)'] - - drivespeed = drivespeed.fillna(value=0) - - try: - driveenergy = rowdatadf['driveenergy'] - except KeyError: # pragma: no cover - if forceunit == 'lbs': - driveenergy = drivelength*averageforce*lbstoN - else: # pragma: no cover - driveenergy = drivelength*averageforce - - distance = rowdatadf.loc[:, 'cum_dist'] - - velo = 500./p - - distanceperstroke = 60.*velo/spm - - if forceunit == 'lbs': - averageforce *= lbstoN - peakforce *= lbstoN - - data = DataFrame( - dict( - time=t * 1e3, - hr=hr, - pace=p * 1e3, - spm=spm, - velo=velo, - cumdist=cumdist, - ftime=niceformat(t2), - fpace=nicepaceformat(p2), - driveenergy=driveenergy, - power=power, - workoutstate=workoutstate, - averageforce=averageforce, - drivelength=drivelength, - peakforce=peakforce, - forceratio=forceratio, - distance=distance, - drivespeed=drivespeed, - rhythm=rhythm, - distanceperstroke=distanceperstroke, - ) - ) - - if bands: - # HR bands - data['hr_ut2'] = rowdatadf.loc[:, 'hr_ut2'] - data['hr_ut1'] = rowdatadf.loc[:, 'hr_ut1'] - data['hr_at'] = rowdatadf.loc[:, 'hr_at'] - data['hr_tr'] = rowdatadf.loc[:, 'hr_tr'] - data['hr_an'] = rowdatadf.loc[:, 'hr_an'] - data['hr_max'] = rowdatadf.loc[:, 'hr_max'] - data['hr_bottom'] = 0.0*data['hr'] - - try: - _ = rowdatadf.loc[:, ' ElapsedTime (sec)'] - except KeyError: # pragma: no cover - rowdatadf[' ElapsedTime (sec)'] = rowdatadf['TimeStamp (sec)'] - - if empower: - try: - wash = rowdatadf.loc[:, 'wash'] - except KeyError: - wash = 0*t - - try: - catch = rowdatadf.loc[:, 'catch'] - except KeyError: - catch = 0*t - - try: - finish = rowdatadf.loc[:, 'finish'] - except KeyError: - finish = 0*t - - try: - peakforceangle = rowdatadf.loc[:, 'peakforceangle'] - except KeyError: - peakforceangle = 0*t - - if data['driveenergy'].mean() == 0: - try: - driveenergy = rowdatadf.loc[:, 'driveenergy'] - except KeyError: - driveenergy = power*60/spm - else: - driveenergy = data['driveenergy'] - - arclength = (inboard-0.05)*(np.radians(finish)-np.radians(catch)) - if arclength.mean() > 0: # pragma: no cover - drivelength = arclength - elif drivelength.mean() == 0: - drivelength = driveenergy/(averageforce*4.44822) - - try: - slip = rowdatadf.loc[:, 'slip'] - except KeyError: - slip = 0*t - - try: - totalangle = finish-catch - effectiveangle = finish-wash-catch-slip - except ValueError: # pragma: no cover - totalangle = 0*t - effectiveangle = 0*t - - if windowsize > 3 and windowsize < len(slip): - try: - wash = savgol_filter(wash, windowsize, 3) - except TypeError: # pragma: no cover - pass - try: - slip = savgol_filter(slip, windowsize, 3) - except TypeError: # pragma: no cover - pass - try: - catch = savgol_filter(catch, windowsize, 3) - except TypeError: # pragma: no cover - pass - try: - finish = savgol_filter(finish, windowsize, 3) - except TypeError: # pragma: no cover - pass - try: - peakforceangle = savgol_filter(peakforceangle, windowsize, 3) - except TypeError: # pragma: no cover - pass - try: - driveenergy = savgol_filter(driveenergy, windowsize, 3) - except TypeError: # pragma: no cover - pass - try: - drivelength = savgol_filter(drivelength, windowsize, 3) - except TypeError: # pragma: no cover - pass - try: - totalangle = savgol_filter(totalangle, windowsize, 3) - except TypeError: # pragma: no cover - pass - try: - effectiveangle = savgol_filter(effectiveangle, windowsize, 3) - except TypeError: # pragma: no cover - pass - - velo = 500./p - - ergpw = 2.8*velo**3 - efficiency = 100.*ergpw/power - - efficiency = efficiency.replace([-np.inf, np.inf], np.nan) - efficiency.fillna(method='ffill') - - try: - data['wash'] = wash - data['catch'] = catch - data['slip'] = slip - data['finish'] = finish - data['peakforceangle'] = peakforceangle - data['driveenergy'] = driveenergy - data['drivelength'] = drivelength - data['totalangle'] = totalangle - data['effectiveangle'] = effectiveangle - data['efficiency'] = efficiency - except ValueError: # pragma: no cover - pass - - if otwpower: - try: - nowindpace = rowdatadf.loc[:, 'nowindpace'] - except KeyError: - nowindpace = p - try: - equivergpower = rowdatadf.loc[:, 'equivergpower'] - except KeyError: - equivergpower = 0*p+50. - - nowindpace2 = nowindpace.apply(lambda x: timedeltaconv(x)) - ergvelo = (equivergpower/2.8)**(1./3.) - - ergpace = 500./ergvelo - ergpace[ergpace == np.inf] = 240. - ergpace2 = ergpace.apply(lambda x: timedeltaconv(x)) - - data['ergpace'] = ergpace*1.e3 - data['nowindpace'] = nowindpace*1.e3 - data['equivergpower'] = equivergpower - data['fergpace'] = nicepaceformat(ergpace2) - data['fnowindpace'] = nicepaceformat(nowindpace2) - data['efficiency'] = efficiency - - data = data.replace([-np.inf, np.inf], np.nan) - data = data.fillna(method='ffill') - - data.dropna(axis=0, inplace=True, how='all') - data.dropna(axis=1, inplace=True, how='any') - - # write data if id given - if id != 0: - data['workoutid'] = id - data.fillna(0, inplace=True) - for k, v in dtypes.items(): - try: - data[k] = data[k].astype(v) - except KeyError: - pass - - filename = 'media/strokedata_{id}.parquet.gz'.format(id=id) - df = dd.from_pandas(data, npartitions=1) - df.to_parquet(filename, engine='fastparquet', compression='GZIP') - - return data diff --git a/rowers/middleware.py b/rowers/middleware.py index 7dc0d2d1..5f7d9c43 100644 --- a/rowers/middleware.py +++ b/rowers/middleware.py @@ -2,7 +2,7 @@ from django.shortcuts import redirect from django.http import HttpResponse from django.contrib import messages from rowers.mytypes import otwtypes -from rowers.tasks import handle_sendemail_expired +#from rowers.tasks import handle_sendemail_expired from django.utils import timezone from rowers.models import Workout, PowerTimeFitnessMetric, Rower, PaidPlan import datetime @@ -99,6 +99,7 @@ class RowerPlanMiddleWare(object): # remove from Free Coach groups # send email + from rowers.tasks import handle_sendemail_expired _ = myqueue(queue, handle_sendemail_expired, r.user.email, diff --git a/rowers/models.py b/rowers/models.py index 8225aa16..1da7fc3f 100644 --- a/rowers/models.py +++ b/rowers/models.py @@ -553,8 +553,9 @@ def polygon_coord_center(polygon): return latitudes.mean(), longitudes.mean() -def polygon_to_path(polygon): +def polygon_to_path(polygon, debug=False): points = GeoPoint.objects.filter(polygon=polygon).order_by("order_in_poly") + s = [] for point in points: s.append([point.latitude, point.longitude]) diff --git a/rowers/tasks.py b/rowers/tasks.py index 45f806ea..cad3b9da 100644 --- a/rowers/tasks.py +++ b/rowers/tasks.py @@ -1,3 +1,22 @@ +import os + +os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true" +from YamJam import yamjam +CFG = yamjam()['rowsandallapp'] + +try: + os.environ.setdefault("DJANGO_SETTINGS_MODULE",CFG['settings_name']) +except KeyError: # pragma: no cover + os.environ.setdefault("DJANGO_SETTINGS_MODULE","rowsandall_app.settings") + +from django.core.wsgi import get_wsgi_application + +application = get_wsgi_application() +from rowers.models import ( + Workout, GeoPolygon, GeoPoint, GeoCourse, + VirtualRaceResult, CourseTestResult, Rower + ) + import math from rowers.courseutils import ( coursetime_paths, coursetime_first, time_in_path, @@ -16,8 +35,7 @@ import rowers.longtask as longtask import requests import rowers.datautils as datautils -""" Background tasks done by Celery (develop) or QR (production) """ -import os +""" Background tasks done by QR (production) """ import time import gc import gzip @@ -36,8 +54,8 @@ import rowingdata from rowingdata import make_cumvalues from uuid import uuid4 from rowingdata import rowingdata as rdata + from datetime import timedelta -from sqlalchemy import create_engine from rowers.celery import app from celery import shared_task @@ -61,13 +79,15 @@ import rowers.otw_power_calculator_pb2_grpc as calculator_pb2_grpc import rowers.rowing_workout_metrics_pb2 as metrics_pb2 import rowers.rowing_workout_metrics_pb2_grpc as metrics_pb2_grpc -from rowsandall_app.settings import SITE_URL -from rowsandall_app.settings_dev import SITE_URL as SITE_URL_DEV -from rowsandall_app.settings import PROGRESS_CACHE_SECRET -from rowsandall_app.settings import SETTINGS_NAME -from rowsandall_app.settings import workoutemailbox -from rowsandall_app.settings import UPLOAD_SERVICE_SECRET, UPLOAD_SERVICE_URL -from rowsandall_app.settings import NK_API_LOCATION +from django.conf import settings +SITE_URL = settings.SITE_URL +SITE_URL_DEV = settings.SITE_URL +PROGRESS_CACHE_SECRET = settings.PROGRESS_CACHE_SECRET +SETTINGS_NAME = settings.SETTINGS_NAME + +UPLOAD_SERVICE_URL = settings.UPLOAD_SERVICE_URL +UPLOAD_SERVICE_SECRET = settings.UPLOAD_SERVICE_SECRET +NK_API_LOCATION = settings.NK_API_LOCATION from requests_oauthlib import OAuth1, OAuth1Session @@ -83,16 +103,19 @@ from rowers.emails import htmlstrip from rowers import mytypes -from rowers.dataprepnodjango import ( +from rowers.dataprep import ( getsmallrowdata_db, updatecpdata_sql, update_c2id_sql, - update_workout_field_sql, + #update_workout_field_sql, update_agegroup_db, update_strokedata, add_c2_stroke_data_db, totaltime_sec_to_string, create_c2_stroke_data_db, update_empower, - database_url_debug, database_url, dataprep, + # database_url_debug, + database_url, dataprep, # create_strava_stroke_data_db ) +database_url_debug = database_url + from rowers.opaque import encoder @@ -277,7 +300,9 @@ def summaryfromsplitdata(splitdata, data, filename, sep='|', workouttype='rower' @app.task def handle_request_post(url, data, debug=False, **kwargs): # pragma: no cover - response = requests.post(url, data) + if 'localhost' in url: + url = 'http'+url[5:] + response = requests.post(url, data, verify=False) dologging('upload_api.log', data) dologging('upload_api.log', response.status_code) return response.status_code @@ -297,10 +322,11 @@ def handle_c2_sync(workoutid, url, headers, data, debug=False, **kwargs): s = response.json() c2id = s['data']['id'] - res = update_workout_field_sql( - workoutid, 'uploadedtoc2', c2id, debug=debug) + workout = Workout.objects.get(id=workoutid) + workout.uploadedtoc2 = c2id + workout.save() - return res + return 1 @app.task @@ -316,8 +342,10 @@ def handle_sporttracks_sync(workoutid, url, headers, data, debug=False, **kwargs id = int(m) - _ = update_workout_field_sql( - workoutid, 'uploadedtosporttracks', id, debug=debug) + workout = Workout.objects.get(id=workoutid) + workout.uploadedtosporttracks = id + workout.save() + return 1 @@ -338,7 +366,7 @@ def handle_strava_sync(stravatoken, workoutid, filename, name, activity_type, de tb = traceback.format_exc() dologging('strava_fail.log', tb) failed = True - except stravalib.exc.TimeoutExceeded: + except stravalib.exc.TimeoutExceeded: # pragma: no cover dologging('strava_fail.log', 'Strava upload failed for Workout {id} TimeOutExceeded'.format( id=workoutid)) tb = traceback.format_exc() @@ -378,8 +406,9 @@ def handle_strava_sync(stravatoken, workoutid, filename, name, activity_type, de failed = True if not failed: - _ = update_workout_field_sql( - workoutid, 'uploadedtostrava', res.id, debug=debug) + workout = Workout.objects.get(id=workoutid) + workout.uploadedtostrava = res.id + workout.save() try: act = client.update_activity(res.id, activity_type=activity_type, description=description, device_name='Rowsandall.com') @@ -507,37 +536,14 @@ def getagegrouprecord(age, sex='male', weightcategory='hwt', return power -def polygon_to_path(polygon, debug=True): - pid = polygon[0] - query = "SELECT id, latitude, longitude FROM rowers_geopoint WHERE polygon_id = {pid}"\ - " ORDER BY order_in_poly ASC".format( - pid=pid) - if debug: - engine = create_engine(database_url_debug, echo=False) - else: # pragma: no cover - engine = create_engine(database_url, echo=False) - - with engine.connect() as conn, conn.begin(): - result = conn.execute(query) - points = result.fetchall() - - conn.close() - engine.dispose() - s = [] - - for point in points: - s.append([point[1], point[2]]) - - p = path.Path(s[:-1]) - - return p +from rowers.models import polygon_to_path @app.task(bind=True) def handle_check_race_course(self, f1, workoutid, courseid, recordid, useremail, userfirstname, - **kwargs): # pragma: no cover + **kwargs): logfile = 'courselog_{workoutid}_{courseid}.log'.format( workoutid=workoutid, courseid=courseid) @@ -567,11 +573,11 @@ def handle_check_race_course(self, mode = kwargs['mode'] summary = False - if 'summary' in kwargs: + if 'summary' in kwargs: # pragma: no cover summary = kwargs['summary'] successemail = False - if 'successemail' in kwargs: + if 'successemail' in kwargs: # pragma: no cover successemail = kwargs['successemail'] try: @@ -606,35 +612,21 @@ def handle_check_race_course(self, rowdata.fillna(method='backfill', inplace=True) - rowdata['time'] = rowdata['time']-rowdata.loc[0, 'time'] - rowdata = rowdata[rowdata['time'] > splitsecond] + rowdata.loc[:, 'time'] = rowdata.loc[:, 'time'].copy()-rowdata.loc[0, 'time'] + rowdata = rowdata.copy()[rowdata['time'] > splitsecond] # we may want to expand the time (interpolate) - rowdata['dt'] = rowdata['time'].apply( + + rowdata.loc[:,'dt'] = rowdata['time'].apply( lambda x: safetimedelta(x) - ) + ).values + rowdata = rowdata.resample('100ms', on='dt').mean() rowdata = rowdata.interpolate() - # initiate database engine + course = GeoCourse.objects.get(id=courseid) + polygons = course.polygons.all() - if debug: # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: - engine = create_engine(database_url, echo=False) - - # get polygons - query = "SELECT id,name FROM rowers_geopolygon WHERE course_id = {courseid} ORDER BY order_in_course ASC".format( - courseid=courseid - ) - - with engine.connect() as conn, conn.begin(): - result = conn.execute(query) - polygons = result.fetchall() - - conn.close() - - engine.dispose() paths = [] for polygon in polygons: @@ -649,7 +641,7 @@ def handle_check_race_course(self, try: entrytimes, entrydistances = time_in_path(rowdata, paths[0], maxmin='max', getall=True, name=polygons[0].name, logfile=logfile) - except AttributeError: # for testing + except AttributeError: # pragma: no cover entrytimes, entrydistances = time_in_path(rowdata, paths[0], maxmin='max', getall=True, name='Start', logfile=logfile) with open(logfile, 'ab') as f: @@ -718,7 +710,8 @@ def handle_check_race_course(self, 'endsecond': endseconds, }) - records = records[records['coursecompleted'] is True] + #records = records[records['coursecompleted'] is True] + records = records.loc[records['coursecompleted'], : ] if len(records): coursecompleted = True @@ -736,35 +729,29 @@ def handle_check_race_course(self, coursedistance = coursemeters velo = coursedistance/coursetimeseconds points = 100*(2.-referencespeed/velo) - query = 'UPDATE rowers_virtualraceresult SET coursecompleted = 1,'\ - ' duration = "{duration}", distance = {distance},'\ - ' workoutid = {workoutid}, startsecond = {startsecond},'\ - ' endsecond = {endsecond}, points={points} WHERE id={recordid}'.format( - recordid=recordid, - duration=totaltime_sec_to_string(coursetimeseconds), - distance=int(coursemeters), - points=points, - workoutid=workoutid, - startsecond=startsecond, - endsecond=endsecond,) - if mode == 'coursetest': - query = 'UPDATE rowers_coursetestresult SET coursecompleted = 1,'\ - ' duration = "{duration}", distance = {distance},'\ - ' workoutid = {workoutid}, startsecond = {startsecond},'\ - ' endsecond = {endsecond}, points={points} WHERE id={recordid}'.format( - recordid=recordid, - duration=totaltime_sec_to_string(coursetimeseconds), - distance=int(coursemeters), - points=points, - workoutid=workoutid, - startsecond=startsecond, - endsecond=endsecond,) + if mode != 'coursetest': + record = VirtualRaceResult.objects.get(id=recordid) + record.duration = totaltime_sec_to_string(coursetimeseconds) + record.distance=int(coursemeters) + record.points = points + record.startsecond = startsecond + record.endsecond = endsecond + record.workoutid = workoutid + record.coursecompleted = 1 + record.save() - with engine.connect() as conn, conn.begin(): - result = conn.execute(query) + else: # pragma: no cover + record = CourseTestResult.objects.get(id=recordid) + record.duration = totaltime_sec_to_string(coursetimeseconds) + record.distance = int(coursemeters) + record.workoutid = workoutid + record.startsecond = startsecond + record.endsecond = endsecond + record.points = points + record.save() - if summary: + if summary: # pragma: no cover try: row = rdata(csvfile=f1) except IOError: # pragma: no cover @@ -784,17 +771,12 @@ def handle_check_race_course(self, summary = row.allstats() row.write_csv(f1, gzip=True) + workout = Workout.objects.get(id=workoutid) + workout.summary = summary + workout.save() - query = "UPDATE `rowers_workout` SET `summary` = '%s' WHERE `id` = %s" % ( - summary, workoutid) - with engine.connect() as conn, conn.begin(): - result = conn.execute(query) - - conn.close() - engine.dispose() - - if successemail: + if successemail: # pragma: no cover handle_sendemail_coursesucceed( useremail, userfirstname, logfile, workoutid ) @@ -804,36 +786,26 @@ def handle_check_race_course(self, return 1 else: # pragma: no cover - query = 'UPDATE rowers_virtualraceresult SET coursecompleted = 0,'\ - ' duration = "{duration}", distance = {distance},'\ - ' workoutid = {workoutid}, startsecond = {startsecond},'\ - ' endsecond = {endsecond}, points={points} WHERE id={recordid}'.format( - recordid=recordid, - duration=totaltime_sec_to_string(0), - distance=0, - points=0.0, - workoutid=workoutid, - startsecond=startsecond, - endsecond=endsecond,) + record = VirtualRaceResult.object.get(id=recordid) + record.duration = totaltime_sec_to_string(0) + record.distance = 0 + record.workoutid = workoutid + record.startsecond = startsecond + record.endsecond = endsecond + record.points = 0 + record.save() if mode == 'coursetest': - query = 'UPDATE rowers_coursetestresult SET coursecompleted = 0,'\ - ' duration = "{duration}", distance = {distance}, workoutid = {workoutid}'\ - ', startsecond = {startsecond}, endsecond = {endsecond}'\ - ', points={points} WHERE id={recordid}'.format( - recordid=recordid, - duration=totaltime_sec_to_string(0), - distance=0, - points=0, - workoutid=workoutid, - startsecond=startsecond, - endsecond=endsecond,) + record = CourseTestResult.objects.get(id=recordid) + record.duration = totaltime_sec_to_string(0) + record.distance = 0 + record.workoutid = workoutid + record.startsecond = startsecond + record.endsecond = endsecond + record.points = 0 + record.save() - with engine.connect() as conn, conn.begin(): - result = conn.execute(query) - conn.close() - engine.dispose() # add times for all gates to log file with open(logfile, 'ab') as f: @@ -1086,10 +1058,7 @@ def handle_calctrimp(id, hrmax, hrmin, debug=False, **kwargs): - if debug: # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: - engine = create_engine(database_url, echo=False) + tss = 0 normp = 0 @@ -1178,21 +1147,14 @@ def handle_calctrimp(id, if hrtss > 1000: # pragma: no cover hrtss = 0 - query = 'UPDATE rowers_workout SET rscore = {tss},'\ - ' normp = {normp}, trimp={trimp}, hrtss={hrtss},'\ - ' normv={normv}, normw={normw} WHERE id={id}'.format( - tss=int(tss), - normp=int(normp), - trimp=int(trimp), - hrtss=int(hrtss), - normv=normv, - normw=normw, - id=id,) - - with engine.connect() as conn, conn.begin(): - _ = conn.execute(query) - conn.close() - engine.dispose() + workout = Workout.objects.get(id=id) + workout.tss = int(tss) + workout.normp = int(normp) + workout.trimp = int(trimp) + workout.hrtss = int(hrtss) + workout.normv = normv + workout.normw = normw + workout.save() return 1 @@ -1974,7 +1936,7 @@ def handle_sendemail_ical(first_name, last_name, email, url, icsfile, **kwargs): try: os.remove(icsfile) - except: + except: # pragma: no cover pass return 1 @@ -2834,23 +2796,13 @@ def handle_update_wps(rid, types, ids, mode, debug=False, **kwargs): except ValueError: # pragma: no cover return 0 + rower = Rower.objects.get(id=rid) if mode == 'water': - query = "UPDATE `rowers_rower` SET `median_wps` = '%s' WHERE `id` = '%s'" % ( - wps_median, rid) + rower.median_wps = wps_median else: - query = "UPDATE `rowers_rower` SET `median_wps_erg` = '%s' WHERE `id` = '%s'" % ( - wps_median, rid) + rower.median_wps_erg = wps_median - if debug: # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: - engine = create_engine(database_url, echo=False) - - with engine.connect() as conn, conn.begin(): - _ = conn.execute(query) - - conn.close() - engine.dispose() + rower.save() return wps_median @@ -2999,23 +2951,8 @@ def handle_nk_async_workout(alldata, userid, nktoken, nkid, delaysec, defaulttim workoutid, error = add_workout_from_data(userid, nkid, data, df) # dologging('nklog.log','NK Workout ID {id}'.format(id=workoutid)) - - if debug: # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: - engine = create_engine(database_url, echo=False) - - query = 'SELECT uploadedtonk from rowers_workout WHERE id ={workoutid}'.format( - workoutid=workoutid) - - newnkid = 0 - with engine.connect() as conn, conn.begin(): - result = conn.execute(query) - tdata = result.fetchall() - if tdata: - newnkid = tdata[0][0] - - conn.close() + workout = Workout.objects.get(id=workoutid) + newnkid = workout.uploadedtonk parkedids = [] try: @@ -3303,22 +3240,8 @@ def handle_c2_async_workout(alldata, userid, c2token, c2id, delaysec, defaulttim workoutid = response.json()['id'] - if debug: # pragma: no cover - engine = create_engine(database_url_debug, echo=False) - else: - engine = create_engine(database_url, echo=False) - - query = 'SELECT uploadedtoc2 from rowers_workout WHERE id ={workoutid}'.format( - workoutid=workoutid) - - newc2id = 0 - with engine.connect() as conn, conn.begin(): - result = conn.execute(query) - tdata = result.fetchall() - if tdata: # pragma: no cover - newc2id = tdata[0][0] - - conn.close() + workout = Workout.objects.get(id=workoutid) + newc2id = workout.uploadedtoc2 parkedids = [] with open('c2blocked.json', 'r') as c2blocked: @@ -3335,14 +3258,10 @@ def handle_c2_async_workout(alldata, userid, c2token, c2id, delaysec, defaulttim json.dump(tdata, c2blocked) # set distance, time - query = "UPDATE `rowers_workout` SET `distance` = '%s', `duration` = '%s' WHERE `id` = '%s'" % ( - distance, duration, workoutid) - - with engine.connect() as conn, conn.begin(): - result = conn.execute(query) - - conn.close() - engine.dispose() + workout = Workout.objects.get(id=workoutid) + workout.distance = distance + workout.duration = duration + workout.save() # summary if 'workout' in data: @@ -3359,14 +3278,9 @@ def handle_c2_async_workout(alldata, userid, c2token, c2id, delaysec, defaulttim summary, sa, results = summaryfromsplitdata( splitdata, data, csvfilename, workouttype=workouttype) - query = "UPDATE `rowers_workout` SET `summary` = '%s' WHERE `id` = %s" % ( - summary, workoutid) - - with engine.connect() as conn, conn.begin(): - result = conn.execute(query) - - conn.close() - engine.dispose() + workout = Workout.objects.get(id=workoutid) + workout.summary = summary + workout.save() from rowingdata.trainingparser import getlist if sa: diff --git a/rowers/tests/test_async_tasks.py b/rowers/tests/test_async_tasks.py index 7c629fe2..9721356a 100644 --- a/rowers/tests/test_async_tasks.py +++ b/rowers/tests/test_async_tasks.py @@ -81,10 +81,13 @@ class AsyncTaskTests(TestCase): def test_polygons(self): polygons = GeoPolygon.objects.all() + polygon = polygons[0] - obj = (polygon.id,polygon.name) - path = tasks.polygon_to_path(obj) - self.assertEqual(len(path),4) + + #obj = (polygon.id,polygon.name) + path = tasks.polygon_to_path(polygon) + + self.assertEqual(len(path),6) def test_summaryfromsplitdata(self): splitdata = [ @@ -495,21 +498,21 @@ class AsyncTaskTests(TestCase): self.assertEqual(res,1) - @patch('rowers.dataprepnodjango.create_engine') + @patch('rowers.dataprep.create_engine') def test_handle_updateergcp(self,mocked_sqlalchemy): f1 = get_random_file()['filename'] res = tasks.handle_updateergcp(1,[f1]) self.assertEqual(res,1) - @patch('rowers.dataprepnodjango.getsmallrowdata_db') + @patch('rowers.dataprep.getsmallrowdata_db') def test_handle_updatecp(self,mocked_getsmallrowdata_db_updatecp): rower_id = 1 workoutids = [1] res = tasks.handle_updatecp(rower_id,workoutids) self.assertEqual(res,1) - @patch('rowers.dataprepnodjango.getsmallrowdata_db') + @patch('rowers.dataprep.getsmallrowdata_db') def test_handle_setcp(self,mocked_getsmallrowdata_db_setcp): strokesdf = pd.read_csv('rowers/tests/testdata/uhfull.csv') filename = 'rowers/tests/testdata/temp/pq.gz' @@ -521,7 +524,7 @@ class AsyncTaskTests(TestCase): except FileNotFoundError: pass - @patch('rowers.dataprepnodjango.getsmallrowdata_db') + @patch('rowers.dataprep.getsmallrowdata_db') def test_handle_update_wps(self,mocked_getsmallrowdata_db_wps): ids = [1,2,3] diff --git a/rowers/tests/test_cpchart.py b/rowers/tests/test_cpchart.py index e4059c32..b7895702 100644 --- a/rowers/tests/test_cpchart.py +++ b/rowers/tests/test_cpchart.py @@ -130,7 +130,7 @@ class CPChartTest(TestCase): # add some tests of complex form data (no hr, no spm, zero spm, etc) - @patch('rowers.dataprepnodjango.create_engine') + @patch('rowers.dataprep.create_engine') def test_agerecords(self, mock_sqlalchemy): # update_records(url='rowers/tests/c2worldrecords.html',verbose=False) diff --git a/rowers/tests/test_plans.py b/rowers/tests/test_plans.py index cc5de299..3c538d54 100644 --- a/rowers/tests/test_plans.py +++ b/rowers/tests/test_plans.py @@ -1554,6 +1554,36 @@ description: "" self.instantplan.save() + self.startdate = (datetime.datetime.now()-datetime.timedelta(days=1)).date() + self.enddate = (datetime.datetime.now()+datetime.timedelta(days=1)).date() + self.preferreddate = datetime.datetime.now().date() + + + self.ps = SessionFactory(startdate=self.startdate,enddate=self.enddate, + sessiontype='session', + sessionmode = 'time', + criterium = 'none', + sessionvalue = 60, + sessionunit='min', + preferreddate=self.preferreddate, + manager=self.u, + interval_string = '5x(800m/5min)' + ) + + self.ps.save() + + + result = plannedsessions.add_rower_session(self.r,self.ps) + + self.step = PlannedSessionStep( + manager = self.u, + name = 'cd', + durationvalue = '50000', + durationtype = 'Distance', + ) + + self.step.save() + def tearDown(self): @@ -1562,6 +1592,72 @@ description: "" except (IOError, FileNotFoundError,OSError): pass + def test_stepadder(self): + login = self.c.login(username=self.u.username, password=self.password) + self.assertTrue(login) + + url = '/rowers/plans/stepeditor/{id}/'.format(id=self.ps.id) + + response = self.c.get(url,follow=True) + self.assertEqual(response.status_code,200) + + url = '/rowers/plans/stepadder/{id}/'.format(id=self.ps.id) + + bdy = json.dumps([self.step.id]) + + response = self.c.post(url, bdy, content_type='application/json', + **{'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}) + self.assertEqual(response.status_code, 200) + + def test_stepdelete(self): + login = self.c.login(username=self.u.username, password=self.password) + self.assertTrue(login) + + url = '/rowers/plans/step/{id}/delete'.format(id=self.step.id) + + response = self.c.get(url,follow=True) + self.assertEqual(response.status_code,200) + + def test_stepedit(self): + login = self.c.login(username=self.u.username, password=self.password) + self.assertTrue(login) + + url = '/rowers/plans/step/{id}/edit/{psid}/'.format( + id=self.step.id, psid=self.ps.id + ) + + response = self.c.get(url) + self.assertEqual(response.status_code,200) + + data = { + 'durationtype': 'RepeatUntilStepsCmplt', + 'durationvalue': '0.0', + #'targettype': None, + 'targetvalue': '8', + 'targetvaluelow': '0', + 'targetvaluehigh': '0', + 'intensity': 'Active', + 'description': 'aap' + } + + form = StepEditorForm(data) + self.assertTrue(form.is_valid()) + + reponse = self.c.post(url, data) + self.assertTrue(response.status_code,200) + + def test_save_plan_yaml_view(self): + login = self.c.login(username=self.u.username, password=self.password) + self.assertTrue(login) + + url = '/rowers/sessions/saveasplan/?when={s}/{e}'.format( + s = self.startdate.strftime("%Y-%m-%d"), + e = self.enddate.strftime("%Y-%m-%d") + ) + + response = self.c.get(url,follow=True) + self.assertEqual(response.status_code,200) + def test_clone_view(self): login = self.c.login(username=self.u.username, password=self.password) self.assertTrue(login) diff --git a/rowers/tests/test_races.py b/rowers/tests/test_races.py index 43e452a4..5e1ad403 100644 --- a/rowers/tests/test_races.py +++ b/rowers/tests/test_races.py @@ -888,8 +888,7 @@ class ChallengesTest(TestCase): self.assertEqual(response.status_code, 200) - @patch('rowers.tasks.create_engine', side_effect=mocked_sqlalchemy_courses) - def notest_virtualevent_check_view(self,mocked_sqlalchemy_courses): + def test_virtualevent_check_view(self): res = tasks.handle_check_race_course( self.wthyro.csvfilename, @@ -898,7 +897,6 @@ class ChallengesTest(TestCase): self.result.id, self.wthyro.user.user.email, self.wthyro.user.user.first_name, - mode='coursetest', ) self.assertEqual(res,1) diff --git a/rowers/views/importviews.py b/rowers/views/importviews.py index 35191735..82e13b48 100644 --- a/rowers/views/importviews.py +++ b/rowers/views/importviews.py @@ -515,7 +515,7 @@ def rower_process_garmincallback(request): # pragma: no cover # Process Rojabo callback @login_required() -def rower_process_rojabocallback(request): # prgrma: no cover +def rower_process_rojabocallback(request): # pragma: no cover # do stuff try: code = request.GET.get('code', None) diff --git a/rowers/views/planviews.py b/rowers/views/planviews.py index fa5f1ac1..709c04bf 100644 --- a/rowers/views/planviews.py +++ b/rowers/views/planviews.py @@ -1409,7 +1409,7 @@ def save_plan_yaml(request, userid=0): steps = ps.steps steps['filename'] = "" workouts.append(steps) - else: + else: # pragma: no cover if ps.sessionmode == 'distance': ps.interval_string = '{d}m'.format(d=ps.sessionvalue) elif ps.sessionmode == 'time': @@ -2994,7 +2994,8 @@ def rower_create_trainingplan(request, id=0): redirect_field_name=None) def stepadder(request, id=0): is_ajax = request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest' - if not is_ajax: + + if not is_ajax: # pragma: no cover return JSONResponse( status=403, data={ 'status': 'false', @@ -3005,7 +3006,7 @@ def stepadder(request, id=0): is_save = request.GET.get('save',0) - if request.method != 'POST': + if request.method != 'POST': # pragma: no cover message = {'status': 'false', 'message': 'this view cannot be accessed through GET'} return JSONResponse(status=403, data=message) @@ -3013,14 +3014,16 @@ def stepadder(request, id=0): try: json_data = json.loads(request.body) post_data = json_data - except (KeyError, JSONDecodeError): + except (KeyError, JSONDecodeError): # pragma: no cover q = request.POST post_data = {k: q.getlist(k) if len( q.getlist(k)) > 1 else v for k, v in q.items()} # only allow local host hostt = request.get_host().split(':') - if hostt[0] not in ['localhost', '127.0.0.1', 'dev.rowsandall.com', 'rowsandall.com']: + + if hostt[0] not in ['localhost', '127.0.0.1', 'dev.rowsandall.com', 'rowsandall.com', + 'testserver']: # pragma: no cover message = {'status': 'false', 'message': 'permission denied for host '+hostt[0]} return JSONResponse(status=403, data=message) @@ -3028,7 +3031,7 @@ def stepadder(request, id=0): if ps.steps: filename = ps.steps.get('filename','') sport = ps.steps.get('sport','rowing') - else: + else: # pragma: no cover filename = '' sport = 'rowing' @@ -3045,10 +3048,10 @@ def stepadder(request, id=0): d = step.asdict() d['stepId'] = nr steps['steps'].append(d) - except PlannedSessionStep.DoesNotExist: + except PlannedSessionStep.DoesNotExist: # pragma: no cover pass - if is_save: + if is_save: # pragma: no cover # save the darn thing ps.steps = steps @@ -3066,9 +3069,12 @@ def stepdelete(request, id=0): step.delete() - backid = request.GET.get('id') + backid = request.GET.get('id',0) - url = reverse(stepeditor,kwargs={'id':backid}) + if backid: # pragma: no cover + url = reverse(stepeditor,kwargs={'id':backid}) + else: + url = reverse('plannedsessions_view') return HttpResponseRedirect(url) @@ -3079,7 +3085,7 @@ def stepedit(request, id=0, psid=0): step = get_object_or_404(PlannedSessionStep, pk=id) try: ps = PlannedSession.objects.get(id=psid) - except PlannedSession.DoesNotExist: + except PlannedSession.DoesNotExist: # pragma: no cover ps = None form = StepEditorForm(instance=step) @@ -3094,7 +3100,7 @@ def stepedit(request, id=0, psid=0): ee = ss.copy() ee.pop('stepId') - if (dd == ee): + if (dd == ee): # pragma: no cover ss['durationType'] = form.cleaned_data['durationtype'] ss['durationValue'] = form.cleaned_data['durationvalue'] ss['targetType'] = form.cleaned_data['targettype'] @@ -3126,15 +3132,15 @@ def stepedit(request, id=0, psid=0): step.name = form.cleaned_data['name'] step.description = form.cleaned_data['description'] - if step.durationtype == 'Time': + if step.durationtype == 'Time': # pragma: no cover step.durationvalue *= 60000 - elif step.durationtype == 'Distance': + elif step.durationtype == 'Distance': # pragma: no cover step.durationvalue *= 100 step.save() - if step.durationtype == 'Time': + if step.durationtype == 'Time': # pragma: no cover form.fields['durationvalue'].initial = step.durationvalue / 60000 elif step.durationtype == 'Distance': form.fields['durationvalue'].initial = step.durationvalue / 100 @@ -3143,7 +3149,7 @@ def stepedit(request, id=0, psid=0): stepdescription = step_to_string(step.asdict(), short=False)[0] if request.method == 'POST': - if 'stepsave_and_return' in request.POST: + if 'stepsave_and_return' in request.POST: # pragma: no cover url = reverse('stepeditor',kwargs = {'id': ps.id}) return HttpResponseRedirect(url) @@ -3204,7 +3210,7 @@ def stepeditor(request, id=0): targetvaluehigh = targetvaluehigh, intensity = intensity, ) - if not archived_steps.count() and durationvalue != 0: + if not archived_steps.count(): s = PlannedSessionStep( manager = request.user, durationtype = durationtype, @@ -3217,14 +3223,14 @@ def stepeditor(request, id=0): name = step.get('wkt_step_name','Step') ) s.save() - else: + else: # pragma: no cover s = archived_steps[0] currentsteps.append(s) form = StepEditorForm() - if request.method == 'POST': + if request.method == 'POST': # pragma: no cover form = StepEditorForm(request.POST) if form.is_valid(): step = form.save(commit=False)