Private
Public Access
1
0
Files
rowsandall/rowers/upload_tasks.py

1247 lines
37 KiB
Python

import os
import time
from uuid import uuid4
import shutil
import requests
from rowingdata import FITParser as FP
from rowingdata.otherparsers import FitSummaryData
import rowingdata
import pandas as pd
import iso8601
import arrow
import numpy as np
import json
from polars.exceptions import (
ColumnNotFoundError, ComputeError, ShapeError
)
import polars as pl
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
from YamJam import yamjam
CFG = yamjam()['rowsandallapp']
try:
os.environ.setdefault("DJANGO_SETTINGS_MODULE",CFG['settings_name'])
except KeyError: # pragma: no cover
os.environ.setdefault("DJANGO_SETTINGS_MODULE","rowsandall_app.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
from rowers.models import (
Workout, GeoPolygon, GeoPoint, GeoCourse,
VirtualRaceResult, CourseTestResult, Rower,
GraphImage, Team, PlannedSession
)
from rowers.session_utils import is_session_complete
from rowers.nkimportutils import (
get_nk_summary, get_nk_allstats, get_nk_intervalstats, getdict, strokeDataToDf,
add_workout_from_data
)
from rowers.mytypes import intervalsmappinginv
from rowers.dataroutines import (
totaltime_sec_to_string,
update_strokedata,
)
from rowers.utils import ewmovingaverage, dologging
from rowers.models import User
import rowers.utils as utils
from rowers.models import create_or_update_syncrecord
from rowers.utils import get_strava_stream
import rowers.mytypes as mytypes
from rowers.celery import app
from celery import shared_task
from django.utils import timezone
from rowingdata import make_cumvalues, make_cumvalues_array
from rowingdata import rowingdata as rdata
SITE_URL = CFG['site_url']
SITE_URL_DEV = CFG['site_url']
PROGRESS_CACHE_SECRET = CFG['progress_cache_secret']
try:
SETTINGS_NAME = CFG['settings_name']
except KeyError: # pragma: no cover
SETTINGS_NAME = 'rowsandall_ap.settings'
NK_API_LOCATION = CFG["nk_api_location"]
TP_CLIENT_ID = CFG["tp_client_id"]
TP_CLIENT_SECRET = CFG["tp_client_secret"]
TP_API_LOCATION = CFG["tp_api_location"]
tpapilocation = TP_API_LOCATION
from rowers.dataflow import upload_handler
@app.task
def handle_assignworkouts(workouts, rowers, remove_workout, debug=False, **kwargs):
for workout in workouts:
uploadoptions = {
'title': workout.name,
'boattype': workout.boattype,
'workouttype': workout.workouttype,
'inboard': workout.inboard,
'oarlength': workout.oarlength,
'summary': workout.summary,
'elapsedTime': 3600.*workout.duration.hour+60*workout.duration.minute+workout.duration.second,
'totalDistance': workout.distance,
'useImpeller': workout.impeller,
'seatNumber': workout.seatnumber,
'boatName': workout.boatname,
'portStarboard': workout.empowerside,
}
for rower in rowers:
failed = False
csvfilename = 'media/{code}.csv'.format(code=uuid4().hex[:16])
try:
with open(csvfilename,'wb') as f:
shutil.copy(workout.csvfilename,csvfilename)
except FileNotFoundError:
try:
with open(csvfilename,'wb') as f:
csvfilename = csvfilename+'.gz'
shutil.copy(workout.csvfilename+'.gz', csvfilename)
except:
failed = True
if not failed:
uploadoptions['user'] = rower.user.id
uploadoptions['file'] = csvfilename
result = upload_handler(uploadoptions, csvfilename)
if remove_workout:
workout.delete()
return 1
@app.task
def handle_post_workout_api(uploadoptions, debug=False, **kwargs): # pragma: no cover
csvfilename = uploadoptions['file']
return upload_handler(uploadoptions, csvfilename)
@app.task
def handle_intervals_getworkout(rower, intervalstoken, workoutid, debug=False, **kwargs):
authorizationstring = str('Bearer '+intervalstoken)
headers = {
'authorization': authorizationstring,
}
url = "https://intervals.icu/api/v1/activity/{}".format(workoutid)
response = requests.get(url, headers=headers)
if response.status_code != 200:
return 0
data = response.json()
try:
workoutsource = data['device_name']
except KeyError:
workoutsource = 'intervals.icu'
if 'garmin' in workoutsource.lower():
title = 'Garmin: '+ title
try:
title = data['name']
except KeyError:
title = 'Intervals workout'
try:
workouttype = intervalsmappinginv[data['type']]
except KeyError:
workouttype = 'water'
try:
rpe = data['icu_rpe']
except KeyError:
rpe = 0
try:
is_commute = data['commute']
if is_commute is None:
is_commute = False
except KeyError:
is_commute = False
try:
subtype = data['sub_type']
if subtype is not None:
subtype = subtype.capitalize()
except KeyError:
subtype = None
try:
is_race = data['race']
if is_race is None:
is_race = False
except KeyError:
is_race = False
url = "https://intervals.icu/api/v1/activity/{workoutid}/fit-file".format(workoutid=workoutid)
response = requests.get(url, headers=headers)
if response.status_code != 200:
return 0
try:
fit_data = response.content
fit_filename = 'media/'+f'{uuid4().hex[:16]}.fit'
with open(fit_filename, 'wb') as fit_file:
fit_file.write(fit_data)
except Exception as e:
return 0
try:
row = FP(fit_filename)
rowdata = rowingdata.rowingdata(df=row.df)
rowsummary = FitSummaryData(fit_filename)
duration = totaltime_sec_to_string(rowdata.duration)
distance = rowdata.df[" Horizontal (meters)"].iloc[-1]
except Exception as e:
return 0
w = Workout(
user=rower,
duration=duration,
uploadedtointervals=workoutid,
)
w.save()
uploadoptions = {
'user': rower.user.id,
'boattype': '1x',
'workouttype': workouttype,
'workoutsource': workoutsource,
'file': fit_filename,
'intervalsid': workoutid,
'title': title,
'rpe': rpe,
'notes': '',
'offline': False,
'id': w.id,
}
response = upload_handler(uploadoptions, fit_filename)
if response['status'] != 'processing':
return 0
try:
paired_event_id = data['paired_event_id']
ws = Workout.objects.filter(uploadedtointervals=workoutid)
for w in ws:
w.sub_type = subtype
w.save()
if is_commute:
for w in ws:
w.is_commute = True
w.sub_type = "Commute"
w.save()
if is_race:
for w in ws:
w.is_race = True
w.save()
if ws.count() > 0:
pss = PlannedSession.objects.filter(rower=rower,intervals_icu_id=paired_event_id)
if pss.count() > 0:
for ps in pss:
for w in ws:
w.plannedsession = ps
w.save()
except KeyError:
pass
except Workout.DoesNotExist:
pass
except PlannedSession.DoesNotExist:
pass
return w.id
def splitstdata(lijst): # pragma: no cover
t = []
latlong = []
while len(lijst) >= 2:
t.append(lijst[0])
latlong.append(lijst[1])
lijst = lijst[2:]
return [np.array(t), np.array(latlong)]
@app.task
def handle_sporttracks_workout_from_data(user, importid, source,
workoutsource, debug=False, **kwargs): # pragma: no cover
r = user.rower
authorizationstring = str('Bearer ' + r.sporttrackstoken)
headers = {'Authorization': authorizationstring,
'user-agent': 'sanderroosendaal',
'Content-Type': 'application/json'}
url = "https://api.sporttracks.mobi/api/v2/fitnessActivities/" + \
str(importid)
s = requests.get(url, headers=headers)
data = s.json()
strokedata = pd.DataFrame.from_dict({
key: pd.Series(value, dtype='object') for key, value in data.items()
})
try:
workouttype = data['type']
except KeyError: # pragma: no cover
workouttype = 'other'
if workouttype not in [x[0] for x in Workout.workouttypes]:
workouttype = 'other'
try:
comments = data['comments']
except:
comments = ''
r = Rower.objects.get(user=user)
rowdatetime = iso8601.parse_date(data['start_time'])
starttimeunix = arrow.get(rowdatetime).timestamp()
try:
title = data['name']
except: # pragma: no cover
title = "Imported data"
try:
res = splitstdata(data['distance'])
distance = res[1]
times_distance = res[0]
except KeyError: # pragma: no cover
try:
res = splitstdata(data['heartrate'])
times_distance = res[0]
distance = 0*times_distance
except KeyError:
return (0, "No distance or heart rate data in the workout")
try:
locs = data['location']
res = splitstdata(locs)
times_location = res[0]
latlong = res[1]
latcoord = []
loncoord = []
for coord in latlong:
lat = coord[0]
lon = coord[1]
latcoord.append(lat)
loncoord.append(lon)
except:
times_location = times_distance
latcoord = np.zeros(len(times_distance))
loncoord = np.zeros(len(times_distance))
if workouttype in mytypes.otwtypes: # pragma: no cover
workouttype = 'rower'
try:
res = splitstdata(data['cadence'])
times_spm = res[0]
spm = res[1]
except KeyError: # pragma: no cover
times_spm = times_distance
spm = 0*times_distance
try:
res = splitstdata(data['heartrate'])
hr = res[1]
times_hr = res[0]
except KeyError:
times_hr = times_distance
hr = 0*times_distance
# create data series and remove duplicates
distseries = pd.Series(distance, index=times_distance)
distseries = distseries.groupby(distseries.index).first()
latseries = pd.Series(latcoord, index=times_location)
latseries = latseries.groupby(latseries.index).first()
lonseries = pd.Series(loncoord, index=times_location)
lonseries = lonseries.groupby(lonseries.index).first()
spmseries = pd.Series(spm, index=times_spm)
spmseries = spmseries.groupby(spmseries.index).first()
hrseries = pd.Series(hr, index=times_hr)
hrseries = hrseries.groupby(hrseries.index).first()
# Create dicts and big dataframe
d = {
' Horizontal (meters)': distseries,
' latitude': latseries,
' longitude': lonseries,
' Cadence (stokes/min)': spmseries,
' HRCur (bpm)': hrseries,
}
df = pd.DataFrame(d)
df = df.groupby(level=0).last()
cum_time = df.index.values
df[' ElapsedTime (sec)'] = cum_time
velo = df[' Horizontal (meters)'].diff()/df[' ElapsedTime (sec)'].diff()
df[' Power (watts)'] = 0.0*velo
nr_rows = len(velo.values)
df[' DriveLength (meters)'] = np.zeros(nr_rows)
df[' StrokeDistance (meters)'] = np.zeros(nr_rows)
df[' DriveTime (ms)'] = np.zeros(nr_rows)
df[' StrokeRecoveryTime (ms)'] = np.zeros(nr_rows)
df[' AverageDriveForce (lbs)'] = np.zeros(nr_rows)
df[' PeakDriveForce (lbs)'] = np.zeros(nr_rows)
df[' lapIdx'] = np.zeros(nr_rows)
unixtime = cum_time+starttimeunix
unixtime[0] = starttimeunix
df['TimeStamp (sec)'] = unixtime
dt = np.diff(cum_time).mean()
wsize = round(5./dt)
velo2 = ewmovingaverage(velo, wsize)
df[' Stroke500mPace (sec/500m)'] = 500./velo2
df = df.fillna(0)
df.sort_values(by='TimeStamp (sec)', ascending=True)
csvfilename = 'media/{code}_{importid}.csv'.format(
importid=importid,
code=uuid4().hex[:16]
)
res = df.to_csv(csvfilename+'.gz', index_label='index',
compression='gzip')
w = Workout(
user=r,
duration=totaltime_sec_to_string(cum_time[-1]),
uploadedtosporttracks=importid,
)
w.save()
uploadoptions = {
'user': user.id,
'file': csvfilename+'.gz',
'title': '',
'workouttype': workouttype,
'boattype': '1x',
'sporttracksid': importid,
'id': w.id,
'title':title,
}
response = upload_handler(uploadoptions, csvfilename+'.gz')
if response['status'] != 'processing':
return 0
return 1
@app.task
def handle_rp3_async_workout(userid, rp3token, rp3id, startdatetime, max_attempts, debug=False, **kwargs):
graphql_url = "https://rp3rowing-app.com/graphql"
timezone = kwargs.get('timezone', 'UTC')
headers = {'Authorization': 'Bearer ' + rp3token}
get_download_link = """{
download(workout_id: """ + str(rp3id) + """, type:csv){
id
status
link
}
}"""
have_link = False
download_url = ''
counter = 0
waittime = 3
while not have_link:
try:
response = requests.post(
url=graphql_url,
headers=headers,
json={'query': get_download_link}
)
dologging('rp3_import.log',response.status_code)
if response.status_code != 200: # pragma: no cover
have_link = True
workout_download_details = pd.json_normalize(
response.json()['data']['download'])
dologging('rp3_import.log', response.json())
except Exception as e: # pragma: no cover
return 0
if workout_download_details.iat[0, 1] == 'ready':
download_url = workout_download_details.iat[0, 2]
have_link = True
dologging('rp3_import.log', download_url)
counter += 1
dologging('rp3_import.log', counter)
if counter > max_attempts: # pragma: no cover
have_link = True
time.sleep(waittime)
if download_url == '': # pragma: no cover
return 0
filename = 'media/RP3Import_'+str(rp3id)+'.csv'
res = requests.get(download_url, headers=headers)
dologging('rp3_import.log','tasks.py '+str(rp3id))
dologging('rp3_import.log',startdatetime)
if not startdatetime: # pragma: no cover
startdatetime = str(timezone.now())
try:
startdatetime = str(startdatetime)
except: # pragma: no cover
pass
if res.status_code != 200: # pragma: no cover
return 0
with open(filename, 'wb') as f:
# dologging('rp3_import.log',res.text)
dologging('rp3_import.log', 'Rp3 ID = {id}'.format(id=rp3id))
f.write(res.content)
w = Workout(
user=User.objects.get(id=userid).rower,
duration='00:00:01',
uploadedtosporttracks=int(rp3id)
)
w.save()
uploadoptions = {
'user': userid,
'file': filename,
'workouttype': 'rower',
'boattype': 'rp3',
'rp3id': int(rp3id),
'startdatetime': startdatetime,
'timezone': timezone,
}
response = upload_handler(uploadoptions, filename)
if response['status'] != 'processing':
return 0
return 1
@app.task
def handle_c2_getworkout(userid, c2token, c2id, defaulttimezone, debug=False, **kwargs):
authorizationstring = str('Bearer ' + c2token)
headers = {'Authorization': authorizationstring,
'user-agent': 'sanderroosendaal',
'Content-Type': 'application/json'}
url = "https://log.concept2.com/api/users/me/results/"+str(c2id)
s = requests.get(url, headers=headers)
if s.status_code != 200: # pragma: no cover
return 0
data = s.json()['data']
alldata = {c2id: data}
return handle_c2_async_workout(alldata, userid, c2token, c2id, 0, defaulttimezone)
# Concept2 logbook sends over split data for each interval
# We use it here to generate a custom summary
# Some users complained about small differences
def summaryfromsplitdata(splitdata, data, filename, sep='|', workouttype='rower'):
workouttype = workouttype.lower()
totaldist = data['distance']
totaltime = data['time']/10.
try:
spm = data['stroke_rate']
except KeyError:
spm = 0
try:
resttime = data['rest_time']/10.
except KeyError: # pragma: no cover
resttime = 0
try:
restdistance = data['rest_distance']
except KeyError: # pragma: no cover
restdistance = 0
try:
avghr = data['heart_rate']['average']
except KeyError: # pragma: no cover
avghr = 0
try:
maxhr = data['heart_rate']['max']
except KeyError: # pragma: no cover
maxhr = 0
try:
avgpace = 500.*totaltime/totaldist
except (ZeroDivisionError, OverflowError): # pragma: no cover
avgpace = 0.
try:
restpace = 500.*resttime/restdistance
except (ZeroDivisionError, OverflowError): # pragma: no cover
restpace = 0.
try:
velo = totaldist/totaltime
avgpower = 2.8*velo**(3.0)
except (ZeroDivisionError, OverflowError): # pragma: no cover
velo = 0
avgpower = 0
if workouttype in ['bike', 'bikeerg']: # pragma: no cover
velo = velo/2.
avgpower = 2.8*velo**(3.0)
velo = velo*2
try:
restvelo = restdistance/resttime
except (ZeroDivisionError, OverflowError): # pragma: no cover
restvelo = 0
restpower = 2.8*restvelo**(3.0)
if workouttype in ['bike', 'bikeerg']: # pragma: no cover
restvelo = restvelo/2.
restpower = 2.8*restvelo**(3.0)
restvelo = restvelo*2
try:
avgdps = totaldist/data['stroke_count']
except (ZeroDivisionError, OverflowError, KeyError):
avgdps = 0
from rowingdata import summarystring, workstring, interval_string
sums = summarystring(totaldist, totaltime, avgpace, spm, avghr, maxhr,
avgdps, avgpower, readFile=filename,
separator=sep)
sums += workstring(totaldist, totaltime, avgpace, spm, avghr, maxhr,
avgdps, avgpower, separator=sep, symbol='W')
sums += workstring(restdistance, resttime, restpace, 0, 0, 0, 0, restpower,
separator=sep,
symbol='R')
sums += '\nWorkout Details\n'
sums += '#-{sep}SDist{sep}-Split-{sep}-SPace-{sep}-Pwr-{sep}SPM-{sep}AvgHR{sep}MaxHR{sep}DPS-\n'.format(
sep=sep
)
intervalnr = 0
sa = []
results = []
try:
timebased = data['workout_type'] in [
'FixedTimeSplits', 'FixedTimeInterval']
except KeyError: # pragma: no cover
timebased = False
for interval in splitdata:
try:
idist = interval['distance']
except KeyError: # pragma: no cover
idist = 0
try:
itime = interval['time']/10.
except KeyError: # pragma: no cover
itime = 0
try:
ipace = 500.*itime/idist
except (ZeroDivisionError, OverflowError): # pragma: no cover
ipace = 180.
try:
ispm = interval['stroke_rate']
except KeyError: # pragma: no cover
ispm = 0
try:
irest_time = interval['rest_time']/10.
except KeyError: # pragma: no cover
irest_time = 0
try:
iavghr = interval['heart_rate']['average']
except KeyError: # pragma: no cover
iavghr = 0
try:
imaxhr = interval['heart_rate']['average']
except KeyError: # pragma: no cover
imaxhr = 0
# create interval values
iarr = [idist, 'meters', 'work']
resarr = [itime]
if timebased: # pragma: no cover
iarr = [itime, 'seconds', 'work']
resarr = [idist]
if irest_time > 0:
iarr += [irest_time, 'seconds', 'rest']
try:
resarr += [interval['rest_distance']]
except KeyError:
resarr += [np.nan]
sa += iarr
results += resarr
if itime != 0:
ivelo = idist/itime
ipower = 2.8*ivelo**(3.0)
if workouttype in ['bike', 'bikeerg']: # pragma: no cover
ipower = 2.8*(ivelo/2.)**(3.0)
else: # pragma: no cover
ivelo = 0
ipower = 0
sums += interval_string(intervalnr, idist, itime, ipace, ispm,
iavghr, imaxhr, 0, ipower, separator=sep)
intervalnr += 1
return sums, sa, results
@app.task
def handle_c2_async_workout(alldata, userid, c2token, c2id, delaysec,
defaulttimezone, debug=False, **kwargs):
time.sleep(delaysec)
dologging('c2_import.log',str(c2id)+' for userid '+str(userid))
data = alldata[c2id]
splitdata = None
distance = data['distance']
try: # pragma: no cover
rest_distance = data['rest_distance']
# rest_time = data['rest_time']/10.
except KeyError:
rest_distance = 0
# rest_time = 0
distance = distance+rest_distance
c2id = data['id']
dologging('c2_import.log',data['type'])
if data['type'] in ['rower','dynamic','slides']:
workouttype = 'rower'
boattype = data['type']
if data['type'] == 'rower':
boattype = 'static'
else:
workouttype = data['type']
boattype = 'static'
# verified = data['verified']
# weightclass = data['weight_class']
try:
has_strokedata = data['stroke_data']
except KeyError: # pragma: no cover
has_strokedata = True
s = 'User {userid}, C2 ID {c2id}'.format(userid=userid, c2id=c2id)
dologging('c2_import.log', s)
dologging('c2_import.log', json.dumps(data))
try:
title = data['name']
except KeyError:
title = ""
try:
t = data['comments'].split('\n', 1)[0]
title += t[:40]
except: # pragma: no cover
title = ''
# Create CSV file name and save data to CSV file
csvfilename = 'media/{code}_{c2id}.csv.gz'.format(
code=uuid4().hex[:16], c2id=c2id)
startdatetime, starttime, workoutdate, duration, starttimeunix, timezone = utils.get_startdatetime_from_c2data(
data
)
s = 'Time zone {timezone}, startdatetime {startdatetime}, duration {duration}'.format(
timezone=timezone, startdatetime=startdatetime,
duration=duration)
dologging('c2_import.log', s)
authorizationstring = str('Bearer ' + c2token)
headers = {'Authorization': authorizationstring,
'user-agent': 'sanderroosendaal',
'Content-Type': 'application/json'}
url = "https://log.concept2.com/api/users/me/results/"+str(c2id)+"/strokes"
try:
s = requests.get(url, headers=headers)
except ConnectionError: # pragma: no cover
return 0
if s.status_code != 200: # pragma: no cover
dologging('c2_import.log', 'No Stroke Data. Status Code {code}'.format(
code=s.status_code))
dologging('c2_import.log', s.text)
has_strokedata = False
if not has_strokedata: # pragma: no cover
df = df_from_summary(data)
else:
# dologging('debuglog.log',json.dumps(s.json()))
try:
strokedata = pd.DataFrame.from_dict(s.json()['data'])
except AttributeError: # pragma: no cover
dologging('c2_import.log', 'No stroke data in stroke data')
return 0
try:
res = make_cumvalues(0.1*strokedata['t'])
cum_time = res[0]
lapidx = res[1]
except KeyError: # pragma: no cover
dologging('c2_import.log', 'No time values in stroke data')
return 0
unixtime = cum_time+starttimeunix
# unixtime[0] = starttimeunix
seconds = 0.1*strokedata.loc[:, 't']
nr_rows = len(unixtime)
try: # pragma: no cover
latcoord = strokedata.loc[:, 'lat']
loncoord = strokedata.loc[:, 'lon']
except:
latcoord = np.zeros(nr_rows)
loncoord = np.zeros(nr_rows)
try:
strokelength = strokedata.loc[:,'strokelength']
except: # pragma: no cover
strokelength = np.zeros(nr_rows)
dist2 = 0.1*strokedata.loc[:, 'd']
cumdist, intervals = make_cumvalues(dist2)
try:
spm = strokedata.loc[:, 'spm']
except KeyError: # pragma: no cover
spm = 0*dist2
try:
hr = strokedata.loc[:, 'hr']
except KeyError: # pragma: no cover
hr = 0*spm
pace = strokedata.loc[:, 'p']/10.
pace = np.clip(pace, 0, 1e4)
pace = pace.replace(0, 300)
velo = 500./pace
power = 2.8*velo**3
if workouttype == 'bike': # pragma: no cover
velo = 1000./pace
dologging('c2_import.log', 'Unix Time Stamp {s}'.format(s=unixtime[0]))
# dologging('debuglog.log',json.dumps(s.json()))
df = pd.DataFrame({'TimeStamp (sec)': unixtime,
' Horizontal (meters)': dist2,
' Cadence (stokes/min)': spm,
' HRCur (bpm)': hr,
' longitude': loncoord,
' latitude': latcoord,
' Stroke500mPace (sec/500m)': pace,
' Power (watts)': power,
' DragFactor': np.zeros(nr_rows),
' DriveLength (meters)': np.zeros(nr_rows),
' StrokeDistance (meters)': strokelength,
' DriveTime (ms)': np.zeros(nr_rows),
' StrokeRecoveryTime (ms)': np.zeros(nr_rows),
' AverageDriveForce (lbs)': np.zeros(nr_rows),
' PeakDriveForce (lbs)': np.zeros(nr_rows),
' lapIdx': lapidx,
' WorkoutState': 4,
' ElapsedTime (sec)': seconds,
'cum_dist': cumdist
})
df.sort_values(by='TimeStamp (sec)', ascending=True)
_ = df.to_csv(csvfilename, index_label='index', compression='gzip')
w = Workout(
user=User.objects.get(id=userid).rower,
duration=duration,
distance=distance,
uploadedtoc2=c2id,
)
w.save()
uploadoptions = {
'user': userid,
'file': csvfilename,
'title': title,
'workouttype': workouttype,
'boattype': boattype,
'c2id': c2id,
'startdatetime': startdatetime.isoformat(),
'timezone': str(timezone)
}
response = upload_handler(uploadoptions, csvfilename)
if response['status'] != 'processing':
return 0
dologging('c2_import.log','workout id {id}'.format(id=w.id))
record = create_or_update_syncrecord(w.user, w, c2id=c2id)
# summary
if 'workout' in data:
if 'splits' in data['workout']: # pragma: no cover
splitdata = data['workout']['splits']
elif 'intervals' in data['workout']: # pragma: no cover
splitdata = data['workout']['intervals']
else: # pragma: no cover
splitdata = False
else:
splitdata = False
if splitdata: # pragma: no cover
summary, sa, results = summaryfromsplitdata(
splitdata, data, csvfilename, workouttype=workouttype)
w.summary = summary
w.save()
from rowingdata.trainingparser import getlist
if sa:
values = getlist(sa)
units = getlist(sa, sel='unit')
types = getlist(sa, sel='type')
rowdata = rdata(csvfile=csvfilename)
if rowdata:
rowdata.updateintervaldata(values, units, types, results)
rowdata.write_csv(csvfilename, gzip=True)
update_strokedata(w.id, rowdata.df)
return 1
@app.task
def handle_split_workout_by_intervals(id, debug=False, **kwargs):
row = Workout.objects.get(id=id)
r = row.user
rowdata = rdata(csvfile=row.csvfilename)
if rowdata == 0:
messages.error(request,"No Data file found for this workout")
return HttpResponseRedirect(url)
try:
new_rowdata = rowdata.split_by_intervals()
except KeyError:
new_rowdata = rowdata
return 0
interval_i = 1
for data in new_rowdata:
filename = 'media/{code}.csv'.format(
code = uuid4().hex[:16]
)
data.write_csv(filename)
uploadoptions = {
'user': r.user.id,
'title': '{title} - interval {i}'.format(title=row.name, i=interval_i),
'file': filename,
'boattype': row.boattype,
'workouttype': row.workouttype,
}
response = upload_handler(uploadoptions, filename)
interval_i = interval_i + 1
return 1
@app.task
def fetch_strava_workout(stravatoken, oauth_data, stravaid, csvfilename, userid, debug=False, **kwargs):
authorizationstring = str('Bearer '+stravatoken)
headers = {'Authorization': authorizationstring,
'user-agent': 'sanderroosendaal',
'Content-Type': 'application/json',
'resolution': 'medium', }
url = "https://www.strava.com/api/v3/activities/"+str(stravaid)
response = requests.get(url, headers=headers)
if response.status_code != 200: # pragma: no cover
dologging('stravalog.log', 'handle_get_strava_file response code {code}\n'.format(
code=response.status_code))
try:
dologging('stravalog.log','Response json {json}\n'.format(json=response.json()))
except:
pass
return 0
try:
workoutsummary = requests.get(url, headers=headers).json()
except: # pragma: no cover
return 0
spm = get_strava_stream(None, 'cadence', stravaid,
authorizationstring=authorizationstring)
hr = get_strava_stream(None, 'heartrate', stravaid,
authorizationstring=authorizationstring)
t = get_strava_stream(None, 'time', stravaid,
authorizationstring=authorizationstring)
velo = get_strava_stream(None, 'velocity_smooth',
stravaid, authorizationstring=authorizationstring)
d = get_strava_stream(None, 'distance', stravaid,
authorizationstring=authorizationstring)
coords = get_strava_stream(
None, 'latlng', stravaid, authorizationstring=authorizationstring)
power = get_strava_stream(None, 'watts', stravaid,
authorizationstring=authorizationstring)
if t is not None:
nr_rows = len(t)
else: # pragma: no cover
try:
duration = int(workoutsummary['elapsed_time'])
except KeyError:
duration = 0
t = pd.Series(range(duration+1))
nr_rows = len(t)
if nr_rows == 0: # pragma: no cover
return 0
if d is None: # pragma: no cover
d = 0*t
if spm is None: # pragma: no cover
spm = np.zeros(nr_rows)
if power is None: # pragma: no cover
power = np.zeros(nr_rows)
if hr is None: # pragma: no cover
hr = np.zeros(nr_rows)
if velo is None: # pragma: no cover
velo = np.zeros(nr_rows)
try:
dt = np.diff(t).mean()
wsize = round(5./dt)
velo2 = ewmovingaverage(velo, wsize)
except ValueError: # pragma: no cover
velo2 = velo
if coords is not None:
try:
lat = coords[:, 0]
lon = coords[:, 1]
except IndexError: # pragma: no cover
lat = np.zeros(len(t))
lon = np.zeros(len(t))
else: # pragma: no cover
lat = np.zeros(len(t))
lon = np.zeros(len(t))
try:
strokelength = velo*60./(spm)
strokelength[np.isinf(strokelength)] = 0.0
except ValueError:
strokelength = np.zeros(len(t))
pace = 500./(1.0*velo2)
pace[np.isinf(pace)] = 0.0
try:
strokedata = pl.DataFrame({'t': 10*t,
'd': 10*d,
'p': 10*pace,
'spm': spm,
'hr': hr,
'lat': lat,
'lon': lon,
'power': power,
'strokelength': strokelength,
})
except ValueError: # pragma: no cover
return 0
except ShapeError:
return 0
try:
workouttype = mytypes.stravamappinginv[workoutsummary['type']]
except KeyError: # pragma: no cover
workouttype = 'other'
if workouttype.lower() == 'rowing': # pragma: no cover
workouttype = 'rower'
try:
if 'summary_polyline' in workoutsummary['map'] and workouttype == 'rower': # pragma: no cover
workouttype = 'water'
except (KeyError,TypeError): # pragma: no cover
pass
try:
rowdatetime = iso8601.parse_date(workoutsummary['date_utc'])
except KeyError:
try:
rowdatetime = iso8601.parse_date(workoutsummary['start_date'])
except KeyError:
rowdatetime = iso8601.parse_date(workoutsummary['date'])
except ParseError: # pragma: no cover
rowdatetime = iso8601.parse_date(workoutsummary['date'])
try:
title = workoutsummary['name']
except KeyError: # pragma: no cover
title = ""
try:
t = workoutsummary['comments'].split('\n', 1)[0]
title += t[:20]
except:
title = ''
starttimeunix = arrow.get(rowdatetime).timestamp()
res = make_cumvalues_array(0.1*strokedata['t'].to_numpy())
cum_time = pl.Series(res[0])
lapidx = pl.Series(res[1])
unixtime = cum_time+starttimeunix
seconds = 0.1*strokedata['t']
nr_rows = len(unixtime)
try:
latcoord = strokedata['lat']
loncoord = strokedata['lon']
if latcoord.std() == 0 and loncoord.std() == 0 and workouttype == 'water': # pragma: no cover
workouttype = 'rower'
except: # pragma: no cover
latcoord = np.zeros(nr_rows)
loncoord = np.zeros(nr_rows)
if workouttype == 'water':
workouttype = 'rower'
try:
strokelength = strokedata['strokelength']
except: # pragma: no cover
strokelength = np.zeros(nr_rows)
dist2 = 0.1*strokedata['d']
try:
spm = strokedata['spm']
except (KeyError, ColumnNotFoundError): # pragma: no cover
spm = 0*dist2
try:
hr = strokedata['hr']
except (KeyError, ColumnNotFoundError): # pragma: no cover
hr = 0*spm
pace = strokedata['p']/10.
pace = np.clip(pace, 0, 1e4)
pace = pl.Series(pace).replace(0, 300)
velo = 500./pace
try:
power = strokedata['power']
except KeyError: # pragma: no cover
power = 2.8*velo**3
# if power.std() == 0 and power.mean() == 0:
# power = 2.8*velo**3
# save csv
# Create data frame with all necessary data to write to csv
df = pl.DataFrame({'TimeStamp (sec)': unixtime,
' Horizontal (meters)': dist2,
' Cadence (stokes/min)': spm,
' HRCur (bpm)': hr,
' longitude': loncoord,
' latitude': latcoord,
' Stroke500mPace (sec/500m)': pace,
' Power (watts)': power,
' DragFactor': np.zeros(nr_rows),
' DriveLength (meters)': np.zeros(nr_rows),
' StrokeDistance (meters)': strokelength,
' DriveTime (ms)': np.zeros(nr_rows),
' StrokeRecoveryTime (ms)': np.zeros(nr_rows),
' AverageDriveForce (lbs)': np.zeros(nr_rows),
' PeakDriveForce (lbs)': np.zeros(nr_rows),
' lapIdx': lapidx,
' ElapsedTime (sec)': seconds,
'cum_dist': dist2,
})
df.sort('TimeStamp (sec)')
row = rowingdata.rowingdata_pl(df=df)
try:
row.write_csv(csvfilename, compressed=False)
except ComputeError:
dologging('stravalog.log','polars not working')
row = rowingdata.rowingdata(df=df.to_pandas())
row.write_csv(csvfilename)
# summary = row.allstats()
# maxdist = df['cum_dist'].max()
duration = row.duration
uploadoptions = {
'user': userid,
'file': csvfilename,
'title': title,
'workouttype': workouttype,
'boattype': '1x',
'stravaid': stravaid,
}
response = upload_handler(uploadoptions, csvfilename)
if response['status'] != 'processing':
return 0
dologging('strava_webhooks.log','fetch_strava_workout posted file with strava id {stravaid} user id {userid}\n'.format(
stravaid=stravaid, userid=userid))
return 1