From f0bfb810a45b00a9aac6dac7faed2fd580fd09d3 Mon Sep 17 00:00:00 2001
From: Sander Roosendaal
Date: Sun, 22 Oct 2017 11:11:30 +0200
Subject: [PATCH 1/2] made a landing page option
---
rowers/forms.py | 4 ++++
rowers/models.py | 9 ++++++---
rowers/templates/list_workouts.html | 4 ++--
rowers/urls.py | 6 ++++--
rowers/utils.py | 6 ++++++
rowers/views.py | 15 ++++++++++++---
6 files changed, 34 insertions(+), 10 deletions(-)
diff --git a/rowers/forms.py b/rowers/forms.py
index 7b7f2cc4..83de2878 100644
--- a/rowers/forms.py
+++ b/rowers/forms.py
@@ -12,6 +12,7 @@ import dataprep
import types
import datetime
from django.forms import formset_factory
+from utils import landingpages
# login form
class LoginForm(forms.Form):
@@ -180,6 +181,9 @@ class UploadOptionsForm(forms.Form):
makeprivate = forms.BooleanField(initial=False,required=False,
label='Make Workout Private')
+ landingpage = forms.ChoiceField(choices=landingpages,
+ initial='workout_edit_view')
+
class Meta:
fields = ['make_plot','plottype','upload_toc2','makeprivate']
diff --git a/rowers/models.py b/rowers/models.py
index 5b7a89ac..fc40651a 100644
--- a/rowers/models.py
+++ b/rowers/models.py
@@ -198,7 +198,7 @@ class TeamRequest(models.Model):
from utils import (
workflowleftpanel,workflowmiddlepanel,
- defaultleft,defaultmiddle
+ defaultleft,defaultmiddle,landingpages
)
# Extension of User with rowing specific data
@@ -281,8 +281,10 @@ class Rower(models.Model):
# Site Settings
workflowleftpanel = TemplateListField(default=defaultleft)
-
workflowmiddlepanel = TemplateListField(default=defaultmiddle)
+ defaultlandingpage = models.CharField(default='workout_edit_view',
+ max_length=200,
+ choices=landingpages)
# Access tokens
c2token = models.CharField(default='',max_length=200,blank=True,null=True)
@@ -889,7 +891,8 @@ class AccountRowerForm(ModelForm):
class Meta:
model = Rower
fields = ['weightcategory','getemailnotifications',
- 'defaulttimezone','showfavoritechartnotes']
+ 'defaulttimezone','showfavoritechartnotes',
+ 'defaultlandingpage']
class UserForm(ModelForm):
class Meta:
diff --git a/rowers/templates/list_workouts.html b/rowers/templates/list_workouts.html
index 9b61d44c..c04b7727 100644
--- a/rowers/templates/list_workouts.html
+++ b/rowers/templates/list_workouts.html
@@ -84,9 +84,9 @@
[RANKING PIECE]
{% endif %}
{% if workout.name != '' %}
- {{ workout.name }}
+ {{ workout.name }}
{% else %}
- No Name
+ No Name
{% endif %}
{% else %}
{% if workout.name != '' %}
diff --git a/rowers/urls.py b/rowers/urls.py
index 1691e9ae..a218c9b3 100644
--- a/rowers/urls.py
+++ b/rowers/urls.py
@@ -191,7 +191,8 @@ urlpatterns = [
url(r'^workout/compare/(?P\d+)/$',views.workout_comparison_list),
url(r'^workout/compare2/(?P\d+)/(?P\d+)/(?P\w+.*)/(?P\w+.*)/$',views.workout_comparison_view),
url(r'^workout/compare/(?P\d+)/(?P\d+-\d+-\d+)/(?P\w+.*)$',views.workout_comparison_list),
- url(r'^workout/(?P\d+)/edit$',views.workout_edit_view),
+ url(r'^workout/(?P\d+)/edit$',views.workout_edit_view,
+ name='workout_edit_view'),
url(r'^workout/(?P\d+)/navionics$',views.workout_edit_view_navionics),
url(r'^workout/(?P\d+)/map$',views.workout_map_view),
url(r'^workout/(?P\d+)/setprivate$',views.workout_setprivate_view),
@@ -329,7 +330,8 @@ urlpatterns = [
url(r'^legal', TemplateView.as_view(template_name='legal.html'),name='legal'),
url(r'^register$',views.rower_register_view),
url(r'^register/thankyou/$', TemplateView.as_view(template_name='registerthankyou.html'), name='registerthankyou'),
- url(r'^workout/(?P\d+)/workflow$',views.workout_workflow_view),
+ url(r'^workout/(?P\d+)/workflow$',views.workout_workflow_view,
+ name='workout_workflow_view'),
url(r'^workout/(?P\d+)/flexchart/(?P\w+.*)/(?P\w+.*)/(?P\w+.*)/(?P\w+)/$',views.workout_flexchart3_view),
url(r'^workout/(?P\d+)/flexchart/(?P\w+.*)/(?P\w+.*)/(?P\w+.*)/(?P\w+.*)$',views.workout_flexchart3_view),
url(r'^workout/(?P\d+)/flexchart/(?P\w+.*)/(?P\w+.*)/(?P\w+.*)$',views.workout_flexchart3_view),
diff --git a/rowers/utils.py b/rowers/utils.py
index 23a04080..27133bf5 100644
--- a/rowers/utils.py
+++ b/rowers/utils.py
@@ -5,6 +5,12 @@ import colorsys
lbstoN = 4.44822
+landingpages = (
+ ('workout_edit_view','Edit View'),
+ ('workout_workflow_view','Workflow View'),
+)
+
+
workflowmiddlepanel = (
('panel_statcharts.html','Static Charts'),
('flexthumbnails.html','Flex Charts'),
diff --git a/rowers/views.py b/rowers/views.py
index 09238cbc..ec138f07 100644
--- a/rowers/views.py
+++ b/rowers/views.py
@@ -4454,6 +4454,7 @@ def workouts_view(request,message='',successmessage='',
return render(request, 'list_workouts.html',
{'workouts': workouts,
+ 'rower':r,
'dateform':dateform,
'startdate':startdate,
'enddate':enddate,
@@ -7587,13 +7588,17 @@ def workout_upload_view(request,
'make_plot':False,
'upload_to_C2':False,
'plottype':'timeplot',
+ 'landingpage':'workout_edit_view',
},
docformoptions={
'workouttype':'rower',
}):
+ r = getrower(request.user)
+
if 'uploadoptions' in request.session:
uploadoptions = request.session['uploadoptions']
+ uploadoptions['landingpage'] = r.defaultlandingpage
else:
request.session['uploadoptions'] = uploadoptions
@@ -7651,7 +7656,6 @@ def workout_upload_view(request,
except KeyError:
upload_totp = False
- r = getrower(request.user)
if request.method == 'POST':
form = DocumentsForm(request.POST,request.FILES)
optionsform = UploadOptionsForm(request.POST)
@@ -7676,6 +7680,7 @@ def workout_upload_view(request,
upload_to_ua = optionsform.cleaned_data['upload_to_MapMyFitness']
upload_to_tp = optionsform.cleaned_data['upload_to_TrainingPeaks']
makeprivate = optionsform.cleaned_data['makeprivate']
+ landingpage = optionsform.cleaned_data['landingpage']
uploadoptions = {
'makeprivate':makeprivate,
@@ -7687,6 +7692,7 @@ def workout_upload_view(request,
'upload_to_RunKeeper':upload_to_rk,
'upload_to_MapMyFitness':upload_to_ua,
'upload_to_TrainingPeaks':upload_to_tp,
+ 'landingpage':r.defaultlandingpage,
}
@@ -7809,8 +7815,8 @@ def workout_upload_view(request,
messages.info(request,message)
else:
messages.error(request,message)
-
- url = reverse(workout_edit_view,
+
+ url = reverse(landingpage,
kwargs = {
'id':w.id,
})
@@ -7826,6 +7832,7 @@ def workout_upload_view(request,
return response
else:
form = DocumentsForm(initial=docformoptions)
+ print uploadoptions
optionsform = UploadOptionsForm(initial=uploadoptions)
return render(request, 'document_form.html',
{'form':form,
@@ -8778,6 +8785,7 @@ def rower_edit_view(request,message=""):
first_name = ucd['first_name']
last_name = ucd['last_name']
email = ucd['email']
+ defaultlandingpage = cd['defaultlandingpage']
weightcategory = cd['weightcategory']
getemailnotifications = cd['getemailnotifications']
defaulttimezone=cd['defaulttimezone']
@@ -8794,6 +8802,7 @@ def rower_edit_view(request,message=""):
r.defaulttimezone=defaulttimezone
r.weightcategory = weightcategory
r.getemailnotifications = getemailnotifications
+ r.defaultlandingpage = defaultlandingpage
r.save()
form = RowerForm(instance=r)
powerform = RowerPowerForm(instance=r)
From dd05cf2d1ba6ab0a77eece753c25bf708150ca50 Mon Sep 17 00:00:00 2001
From: Sander Roosendaal
Date: Sun, 22 Oct 2017 11:33:18 +0200
Subject: [PATCH 2/2] cleaning up
---
rowers/dataprep.py | 1315 +++++++++++++--------------
rowers/forms.py | 3 +-
rowers/templates/document_form.html | 17 +-
rowers/views.py | 30 +-
4 files changed, 678 insertions(+), 687 deletions(-)
diff --git a/rowers/dataprep.py b/rowers/dataprep.py
index 92c5c37b..72d7cb91 100644
--- a/rowers/dataprep.py
+++ b/rowers/dataprep.py
@@ -1,6 +1,6 @@
# All the data preparation, data cleaning and data mangling should
# be defined here
-from rowers.models import Workout, User, Rower,StrokeData
+from rowers.models import Workout, User, Rower, StrokeData
from rowingdata import rowingdata as rrdata
from rowers.tasks import handle_sendemail_unrecognized
@@ -9,25 +9,25 @@ import pytz
from rowingdata import rower as rrower
from rowingdata import main as rmain
-from rowingdata import get_file_type,get_empower_rigging
+from rowingdata import get_file_type, get_empower_rigging
-from pandas import DataFrame,Series
-from pytz import timezone as tz,utc
+from pandas import DataFrame, Series
+from pytz import timezone as tz, utc
from django.utils import timezone
-from time import strftime,strptime,mktime,time,daylight
+from time import strftime, strptime, mktime, time, daylight
import arrow
from django.utils.timezone import get_current_timezone
thetimezone = get_current_timezone()
from rowingdata import (
- TCXParser,RowProParser,ErgDataParser,
+ TCXParser, RowProParser, ErgDataParser,
CoxMateParser,
- BoatCoachParser,RowPerfectParser,BoatCoachAdvancedParser,
- MysteryParser,BoatCoachOTWParser,
- painsledDesktopParser,speedcoachParser,ErgStickParser,
- SpeedCoach2Parser,FITParser,fitsummarydata,
+ BoatCoachParser, RowPerfectParser, BoatCoachAdvancedParser,
+ MysteryParser, BoatCoachOTWParser,
+ painsledDesktopParser, speedcoachParser, ErgStickParser,
+ SpeedCoach2Parser, FITParser, fitsummarydata,
make_cumvalues,
- summarydata,get_file_type,
- )
+ summarydata, get_file_type,
+)
from rowers.models import Team
from rowers.metrics import axes
@@ -39,9 +39,9 @@ import numpy as np
import itertools
import math
from tasks import (
- handle_sendemail_unrecognized,handle_sendemail_breakthrough,
+ handle_sendemail_unrecognized, handle_sendemail_breakthrough,
handle_sendemail_hard
- )
+)
from django.conf import settings
from sqlalchemy import create_engine
@@ -61,7 +61,6 @@ queuelow = django_rq.get_queue('low')
queuehigh = django_rq.get_queue('default')
-
user = settings.DATABASES['default']['USER']
password = settings.DATABASES['default']['PASSWORD']
database_name = settings.DATABASES['default']['NAME']
@@ -74,59 +73,61 @@ database_url = 'mysql://{user}:{password}@{host}:{port}/{database_name}'.format(
database_name=database_name,
host=host,
port=port,
- )
+)
# Use SQLite local database when we're in debug mode
-if settings.DEBUG or user=='':
+if settings.DEBUG or user == '':
# database_url = 'sqlite:///db.sqlite3'
- database_url = 'sqlite:///'+database_name
+ database_url = 'sqlite:///' + database_name
# mapping the DB column names to the CSV file column names
columndict = {
- 'time':'TimeStamp (sec)',
- 'hr':' HRCur (bpm)',
- 'pace':' Stroke500mPace (sec/500m)',
- 'spm':' Cadence (stokes/min)',
- 'power':' Power (watts)',
- 'averageforce':' AverageDriveForce (lbs)',
- 'drivelength':' DriveLength (meters)',
- 'peakforce':' PeakDriveForce (lbs)',
- 'distance':' Horizontal (meters)',
- 'catch':'catch',
- 'finish':'finish',
- 'peakforceangle':'peakforceangle',
- 'wash':'wash',
- 'slip':'slip',
- 'workoutstate':' WorkoutState',
- 'cumdist':'cum_dist',
- }
-
+ 'time': 'TimeStamp (sec)',
+ 'hr': ' HRCur (bpm)',
+ 'pace': ' Stroke500mPace (sec/500m)',
+ 'spm': ' Cadence (stokes/min)',
+ 'power': ' Power (watts)',
+ 'averageforce': ' AverageDriveForce (lbs)',
+ 'drivelength': ' DriveLength (meters)',
+ 'peakforce': ' PeakDriveForce (lbs)',
+ 'distance': ' Horizontal (meters)',
+ 'catch': 'catch',
+ 'finish': 'finish',
+ 'peakforceangle': 'peakforceangle',
+ 'wash': 'wash',
+ 'slip': 'slip',
+ 'workoutstate': ' WorkoutState',
+ 'cumdist': 'cum_dist',
+}
+
from scipy.signal import savgol_filter
import datetime
+
def get_latlon(id):
try:
w = Workout.objects.get(id=id)
except Workout.DoesNotExist:
return False
-
+
rowdata = rdata(w.csvfilename)
try:
try:
- latitude = rowdata.df.ix[:,' latitude']
- longitude = rowdata.df.ix[:,' longitude']
+ latitude = rowdata.df.ix[:, ' latitude']
+ longitude = rowdata.df.ix[:, ' longitude']
except KeyError:
- latitude = 0*rowdata.df.ix[:,'TimeStamp (sec)']
- longitude = 0*rowdata.df.ix[:,'TimeStamp (sec)']
- return [latitude,longitude]
+ latitude = 0 * rowdata.df.ix[:, 'TimeStamp (sec)']
+ longitude = 0 * rowdata.df.ix[:, 'TimeStamp (sec)']
+ return [latitude, longitude]
except AttributeError:
- return [pd.Series([]),pd.Series([])]
-
- return [pd.Series([]),pd.Series([])]
+ return [pd.Series([]), pd.Series([])]
-def get_workouts(ids,userid):
+ return [pd.Series([]), pd.Series([])]
+
+
+def get_workouts(ids, userid):
goodids = []
for id in ids:
w = Workout.objects.get(id=id)
@@ -135,21 +136,20 @@ def get_workouts(ids,userid):
return [Workout.objects.get(id=id) for id in goodids]
-def filter_df(datadf,fieldname,value,largerthan=True):
+
+def filter_df(datadf, fieldname, value, largerthan=True):
try:
x = datadf[fieldname]
except KeyError:
return datadf
-
if largerthan:
mask = datadf[fieldname] < value
else:
mask = datadf[fieldname] >= value
- datadf.loc[mask,fieldname] = np.nan
-
+ datadf.loc[mask, fieldname] = np.nan
return datadf
@@ -160,9 +160,9 @@ def df_resample(datadf):
datadf['timestamps'] = timestamps
newdf = datadf.groupby(['timestamps']).mean()
return newdf
-
-def clean_df_stats(datadf,workstrokesonly=True,ignorehr=True,
+
+def clean_df_stats(datadf, workstrokesonly=True, ignorehr=True,
ignoreadvanced=False):
# clean data remove zeros and negative values
@@ -173,18 +173,17 @@ def clean_df_stats(datadf,workstrokesonly=True,ignorehr=True,
pass
try:
- datadf['peakforceangle'] = datadf['peakforceangle']+1000
+ datadf['peakforceangle'] = datadf['peakforceangle'] + 1000
except KeyError:
pass
-
+
try:
- datadf['hr'] = datadf['hr']+10
+ datadf['hr'] = datadf['hr'] + 10
except KeyError:
pass
- datadf=datadf.clip(lower=0)
- datadf.replace(to_replace=0,value=np.nan,inplace=True)
-
+ datadf = datadf.clip(lower=0)
+ datadf.replace(to_replace=0, value=np.nan, inplace=True)
# return from positive domain to negative
try:
@@ -193,147 +192,131 @@ def clean_df_stats(datadf,workstrokesonly=True,ignorehr=True,
pass
try:
- datadf['peakforceangle'] = datadf['peakforceangle']-1000
+ datadf['peakforceangle'] = datadf['peakforceangle'] - 1000
except KeyError:
pass
try:
- datadf['hr'] = datadf['hr']-10
+ datadf['hr'] = datadf['hr'] - 10
except KeyError:
pass
# clean data for useful ranges per column
if not ignorehr:
try:
- mask = datadf['hr'] < 30
- datadf.loc[mask,'hr'] = np.nan
+ mask = datadf['hr'] < 30
+ datadf.loc[mask, 'hr'] = np.nan
except KeyError:
pass
try:
mask = datadf['spm'] < 10
- datadf.loc[mask,'spm'] = np.nan
+ datadf.loc[mask, 'spm'] = np.nan
except KeyError:
pass
-
try:
- mask = datadf['pace']/1000. > 300.
- datadf.loc[mask,'pace'] = np.nan
+ mask = datadf['pace'] / 1000. > 300.
+ datadf.loc[mask, 'pace'] = np.nan
except KeyError:
pass
try:
mask = datadf['efficiency'] < 0.
- datadf.loc[mask,'efficiency'] = np.nan
- except KeyError:
- pass
-
- try:
- mask = datadf['pace']/1000. < 60.
- datadf.loc[mask,'pace'] = np.nan
- except KeyError:
- pass
-
- try:
- mask = datadf['spm'] > 60
- datadf.loc[mask,'spm'] = np.nan
+ datadf.loc[mask, 'efficiency'] = np.nan
except KeyError:
pass
+ try:
+ mask = datadf['pace'] / 1000. < 60.
+ datadf.loc[mask, 'pace'] = np.nan
+ except KeyError:
+ pass
+
+ try:
+ mask = datadf['spm'] > 60
+ datadf.loc[mask, 'spm'] = np.nan
+ except KeyError:
+ pass
try:
mask = datadf['wash'] < 1
- datadf.loc[mask,'wash'] = np.nan
+ datadf.loc[mask, 'wash'] = np.nan
except KeyError:
pass
-
if not ignoreadvanced:
try:
mask = datadf['rhythm'] < 5
- datadf.loc[mask,'rhythm'] = np.nan
+ datadf.loc[mask, 'rhythm'] = np.nan
except KeyError:
pass
-
try:
mask = datadf['rhythm'] > 70
- datadf.loc[mask,'rhythm'] = np.nan
+ datadf.loc[mask, 'rhythm'] = np.nan
except KeyError:
pass
-
try:
mask = datadf['power'] < 20
- datadf.loc[mask,'power'] = np.nan
+ datadf.loc[mask, 'power'] = np.nan
except KeyError:
pass
-
try:
mask = datadf['drivelength'] < 0.5
- datadf.loc[mask,'drivelength'] = np.nan
+ datadf.loc[mask, 'drivelength'] = np.nan
except KeyError:
pass
-
try:
mask = datadf['forceratio'] < 0.2
- datadf.loc[mask,'forceratio'] = np.nan
+ datadf.loc[mask, 'forceratio'] = np.nan
except KeyError:
pass
-
try:
mask = datadf['forceratio'] > 1.0
- datadf.loc[mask,'forceratio'] = np.nan
+ datadf.loc[mask, 'forceratio'] = np.nan
except KeyError:
pass
-
-
try:
mask = datadf['drivespeed'] < 0.5
- datadf.loc[mask,'drivespeed'] = np.nan
+ datadf.loc[mask, 'drivespeed'] = np.nan
except KeyError:
pass
-
try:
mask = datadf['drivespeed'] > 4
- datadf.loc[mask,'drivespeed'] = np.nan
+ datadf.loc[mask, 'drivespeed'] = np.nan
except KeyError:
pass
-
try:
mask = datadf['driveenergy'] > 2000
- datadf.loc[mask,'driveenergy'] = np.nan
+ datadf.loc[mask, 'driveenergy'] = np.nan
except KeyError:
pass
-
try:
mask = datadf['driveenergy'] < 100
- datadf.loc[mask,'driveenergy'] = np.nan
+ datadf.loc[mask, 'driveenergy'] = np.nan
except KeyError:
pass
-
-
try:
mask = datadf['catch'] > -30.
- datadf.loc[mask,'catch'] = np.nan
+ datadf.loc[mask, 'catch'] = np.nan
except KeyError:
pass
-
- workoutstateswork = [1,4,5,8,9,6,7]
+ workoutstateswork = [1, 4, 5, 8, 9, 6, 7]
workoutstatesrest = [3]
- workoutstatetransition = [0,2,10,11,12,13]
+ workoutstatetransition = [0, 2, 10, 11, 12, 13]
- if workstrokesonly=='True' or workstrokesonly==True:
+ if workstrokesonly == 'True' or workstrokesonly == True:
try:
datadf = datadf[~datadf['workoutstate'].isin(workoutstatesrest)]
except:
@@ -341,13 +324,14 @@ def clean_df_stats(datadf,workstrokesonly=True,ignorehr=True,
return datadf
+
def getstatsfields():
# Get field names and remove those that are not useful in stats
fields = StrokeData._meta.get_fields()
- fielddict = {field.name:field.verbose_name for field in fields}
-
- #fielddict.pop('workoutid')
+ fielddict = {field.name: field.verbose_name for field in fields}
+
+ # fielddict.pop('workoutid')
fielddict.pop('ergpace')
fielddict.pop('hr_an')
fielddict.pop('hr_tr')
@@ -370,57 +354,62 @@ def getstatsfields():
fielddict.pop('hr_bottom')
fielddict.pop('cumdist')
- fieldlist = [field for field,value in fielddict.iteritems()]
+ fieldlist = [field for field, value in fielddict.iteritems()]
- return fieldlist,fielddict
+ return fieldlist, fielddict
# A string representation for time deltas
def niceformat(values):
out = []
for v in values:
- formattedv = strfdelta(v)
- out.append(formattedv)
-
+ formattedv = strfdelta(v)
+ out.append(formattedv)
+
return out
# A nice printable format for time delta values
+
+
def strfdelta(tdelta):
try:
- minutes,seconds = divmod(tdelta.seconds,60)
- tenths = int(tdelta.microseconds/1e5)
+ minutes, seconds = divmod(tdelta.seconds, 60)
+ tenths = int(tdelta.microseconds / 1e5)
except AttributeError:
- minutes,seconds = divmod(tdelta.view(np.int64),60e9)
- seconds,rest = divmod(seconds,1e9)
- tenths = int(rest/1e8)
+ minutes, seconds = divmod(tdelta.view(np.int64), 60e9)
+ seconds, rest = divmod(seconds, 1e9)
+ tenths = int(rest / 1e8)
res = "{minutes:0>2}:{seconds:0>2}.{tenths:0>1}".format(
- minutes=minutes,
- seconds=seconds,
- tenths=tenths,
- )
-
+ minutes=minutes,
+ seconds=seconds,
+ tenths=tenths,
+ )
+
return res
# A nice printable format for pace values
+
+
def nicepaceformat(values):
out = []
for v in values:
- formattedv = strfdelta(v)
- out.append(formattedv)
-
+ formattedv = strfdelta(v)
+ out.append(formattedv)
return out
# Convert seconds to a Time Delta value, replacing NaN with a 5:50 pace
+
+
def timedeltaconv(x):
- if np.isfinite(x) and x != 0 and x>0 and x<175000:
+ if np.isfinite(x) and x != 0 and x > 0 and x < 175000:
dt = datetime.timedelta(seconds=x)
else:
dt = datetime.timedelta(seconds=350.)
-
-
+
return dt
+
def paceformatsecs(values):
out = []
for v in values:
@@ -431,27 +420,29 @@ def paceformatsecs(values):
return out
# Processes painsled CSV file to database
-def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
- dosummary=True,title='Workout',
+
+
+def save_workout_database(f2, r, dosmooth=True, workouttype='rower',
+ dosummary=True, title='Workout',
workoutsource='unknown',
- notes='',totaldist=0,totaltime=0,
+ notes='', totaldist=0, totaltime=0,
summary='',
makeprivate=False,
- oarlength=2.89,inboard=0.88,
+ oarlength=2.89, inboard=0.88,
forceunit='lbs',
consistencychecks=False):
message = None
- powerperc = 100*np.array([r.pw_ut2,
- r.pw_ut1,
- r.pw_at,
- r.pw_tr,r.pw_an])/r.ftp
+ powerperc = 100 * np.array([r.pw_ut2,
+ r.pw_ut1,
+ r.pw_at,
+ r.pw_tr, r.pw_an]) / r.ftp
# make workout and put in database
- rr = rrower(hrmax=r.max,hrut2=r.ut2,
- hrut1=r.ut1,hrat=r.at,
- hrtr=r.tr,hran=r.an,ftp=r.ftp,
- powerperc=powerperc,powerzones=r.powerzones)
- row = rdata(f2,rower=rr)
+ rr = rrower(hrmax=r.max, hrut2=r.ut2,
+ hrut1=r.ut1, hrat=r.at,
+ hrtr=r.tr, hran=r.an, ftp=r.ftp,
+ powerperc=powerperc, powerzones=r.powerzones)
+ row = rdata(f2, rower=rr)
dtavg = row.df['TimeStamp (sec)'].diff().mean()
@@ -461,16 +452,17 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
os.remove(f2)
except:
pass
- return new_workout_from_df(r,newdf,
+ return new_workout_from_df(r, newdf,
title=title)
try:
checks = row.check_consistency()
allchecks = 1
- for key,value in checks.iteritems():
+ for key, value in checks.iteritems():
if not value:
allchecks = 0
if consistencychecks:
- a_messages.error(r.user,'Failed consistency check: '+key+', autocorrected')
+ a_messages.error(
+ r.user, 'Failed consistency check: ' + key + ', autocorrected')
else:
pass
# a_messages.error(r.user,'Failed consistency check: '+key+', not corrected')
@@ -480,92 +472,92 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
if not allchecks and consistencychecks:
# row.repair()
pass
-
-
+
if row == 0:
- return (0,'Error: CSV data file not found')
+ return (0, 'Error: CSV data file not found')
if dosmooth:
# auto smoothing
pace = row.df[' Stroke500mPace (sec/500m)'].values
- velo = 500./pace
-
+ velo = 500. / pace
+
f = row.df['TimeStamp (sec)'].diff().mean()
- if f !=0 and not np.isnan(f):
- windowsize = 2*(int(10./(f)))+1
+ if f != 0 and not np.isnan(f):
+ windowsize = 2 * (int(10. / (f))) + 1
else:
windowsize = 1
if not 'originalvelo' in row.df:
- row.df['originalvelo'] = velo
+ row.df['originalvelo'] = velo
- if windowsize > 3 and windowsize 3 and windowsize < len(velo):
+ velo2 = savgol_filter(velo, windowsize, 3)
else:
- velo2 = velo
+ velo2 = velo
velo3 = pd.Series(velo2)
- velo3 = velo3.replace([-np.inf,np.inf],np.nan)
+ velo3 = velo3.replace([-np.inf, np.inf], np.nan)
velo3 = velo3.fillna(method='ffill')
-
- pace2 = 500./abs(velo3)
-
+
+ pace2 = 500. / abs(velo3)
+
row.df[' Stroke500mPace (sec/500m)'] = pace2
row.df = row.df.fillna(0)
- row.write_csv(f2,gzip=True)
+ row.write_csv(f2, gzip=True)
try:
os.remove(f2)
except:
pass
-
+
# recalculate power data
if workouttype == 'rower' or workouttype == 'dynamic' or workouttype == 'slides':
- try:
- row.erg_recalculatepower()
- row.write_csv(f2,gzip=True)
- except:
- pass
-
+ try:
+ row.erg_recalculatepower()
+ row.write_csv(f2, gzip=True)
+ except:
+ pass
+
averagehr = row.df[' HRCur (bpm)'].mean()
maxhr = row.df[' HRCur (bpm)'].max()
-
+
if totaldist == 0:
totaldist = row.df['cum_dist'].max()
if totaltime == 0:
- totaltime = row.df['TimeStamp (sec)'].max()-row.df['TimeStamp (sec)'].min()
+ totaltime = row.df['TimeStamp (sec)'].max(
+ ) - row.df['TimeStamp (sec)'].min()
try:
- totaltime = totaltime+row.df.ix[0,' ElapsedTime (sec)']
+ totaltime = totaltime + row.df.ix[0, ' ElapsedTime (sec)']
except KeyError:
pass
if np.isnan(totaltime):
totaltime = 0
- hours = int(totaltime/3600.)
- if hours>23:
+ hours = int(totaltime / 3600.)
+ if hours > 23:
message = 'Warning: The workout duration was longer than 23 hours. '
hours = 23
- minutes = int((totaltime - 3600.*hours)/60.)
- if minutes>59:
+ minutes = int((totaltime - 3600. * hours) / 60.)
+ if minutes > 59:
minutes = 59
if not message:
message = 'Warning: there is something wrong with the workout duration'
-
- seconds = int(totaltime - 3600.*hours - 60.*minutes)
+
+ seconds = int(totaltime - 3600. * hours - 60. * minutes)
if seconds > 59:
seconds = 59
if not message:
message = 'Warning: there is something wrong with the workout duration'
- tenths = int(10*(totaltime - 3600.*hours - 60.*minutes - seconds))
+ tenths = int(10 * (totaltime - 3600. * hours - 60. * minutes - seconds))
if tenths > 9:
tenths = 9
if not message:
message = 'Warning: there is something wrong with the workout duration'
-
- duration = "%s:%s:%s.%s" % (hours,minutes,seconds,tenths)
+
+ duration = "%s:%s:%s.%s" % (hours, minutes, seconds, tenths)
if dosummary:
summary = row.allstats()
@@ -573,7 +565,6 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
#summary += '\n'
#summary += row.intervalstats()
-
#workoutstartdatetime = row.rowdatetime
timezone_str = 'UTC'
try:
@@ -581,15 +572,13 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
except ValueError:
workoutstartdatetime = row.rowdatetime
-
-
try:
latavg = row.df[' latitude'].mean()
lonavg = row.df[' longitude'].mean()
-
+
tf = TimezoneFinder()
try:
- timezone_str = tf.timezone_at(lng=lonavg,lat=latavg)
+ timezone_str = tf.timezone_at(lng=lonavg, lat=latavg)
except ValueError:
timezone_str = 'UTC'
if timezone_str == None:
@@ -604,12 +593,10 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
except ValueError:
workoutstartdatetime = workoutstartdatetime.astimezone(
pytz.timezone(timezone_str)
- )
+ )
except KeyError:
timezone_str = r.defaulttimezone
-
-
workoutdate = workoutstartdatetime.astimezone(
pytz.timezone(timezone_str)
).strftime('%Y-%m-%d')
@@ -623,7 +610,7 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
privacy = 'visible'
# checking for inf values
-
+
totaldist = np.nan_to_num(totaldist)
maxhr = np.nan_to_num(maxhr)
averagehr = np.nan_to_num(averagehr)
@@ -631,59 +618,57 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
# check for duplicate start times and duration
ws = Workout.objects.filter(startdatetime=workoutstartdatetime,
distance=totaldist,
- user=r)
+ user=r)
if (len(ws) != 0):
- message = "Warning: This workout probably already exists in the database"
+ message = "Warning: This workout probably already exists in the database"
privacy = 'hidden'
-
-
-
- w = Workout(user=r,name=title,date=workoutdate,
- workouttype=workouttype,
- duration=duration,distance=totaldist,
- weightcategory=r.weightcategory,
- starttime=workoutstarttime,
+ w = Workout(user=r, name=title, date=workoutdate,
+ workouttype=workouttype,
+ duration=duration, distance=totaldist,
+ weightcategory=r.weightcategory,
+ starttime=workoutstarttime,
workoutsource=workoutsource,
forceunit=forceunit,
- csvfilename=f2,notes=notes,summary=summary,
- maxhr=maxhr,averagehr=averagehr,
- startdatetime=workoutstartdatetime,
- inboard=inboard,oarlength=oarlength,
+ csvfilename=f2, notes=notes, summary=summary,
+ maxhr=maxhr, averagehr=averagehr,
+ startdatetime=workoutstartdatetime,
+ inboard=inboard, oarlength=oarlength,
timezone=timezone_str,
privacy=privacy)
-
w.save()
isbreakthrough = False
ishard = False
if workouttype == 'water':
- df = getsmallrowdata_db(['power','workoutid','time'],ids=[w.id])
+ df = getsmallrowdata_db(['power', 'workoutid', 'time'], ids=[w.id])
# delta,cpvalues,avgpower = datautils.getsinglecp(row.df)
thesecs = totaltime
- maxt = 1.05*thesecs
+ maxt = 1.05 * thesecs
if maxt > 0:
logarr = datautils.getlogarr(maxt)
dfgrouped = df.groupby(['workoutid'])
- delta,cpvalues,avgpower = datautils.getcp(dfgrouped,logarr)
-
- res,btvalues,res2 = utils.isbreakthrough(delta,cpvalues,r.p0,r.p1,r.p2,r.p3,r.cpratio)
+ delta, cpvalues, avgpower = datautils.getcp(dfgrouped, logarr)
+
+ res, btvalues, res2 = utils.isbreakthrough(
+ delta, cpvalues, r.p0, r.p1, r.p2, r.p3, r.cpratio)
else:
res = 0
res2 = 0
if res:
isbreakthrough = True
- res = datautils.updatecp(delta,cpvalues,r)
+ res = datautils.updatecp(delta, cpvalues, r)
if res2 and not isbreakthrough:
ishard = True
# submit email task to send email about breakthrough workout
if isbreakthrough:
- a_messages.info(r.user,'It looks like you have a new breakthrough workout')
+ a_messages.info(
+ r.user, 'It looks like you have a new breakthrough workout')
if settings.DEBUG and r.getemailnotifications:
- res = handle_sendemail_breakthrough.delay(w.id,r.user.email,
- r.user.first_name,
+ res = handle_sendemail_breakthrough.delay(w.id, r.user.email,
+ r.user.first_name,
r.user.last_name,
btvalues=btvalues.to_json())
elif r.getemailnotifications:
@@ -700,12 +685,12 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
pass
# submit email task to send email about breakthrough workout
if ishard:
- a_messages.info(r.user,'That was a pretty hard workout')
+ a_messages.info(r.user, 'That was a pretty hard workout')
if settings.DEBUG and r.getemailnotifications:
- res = handle_sendemail_hard.delay(w.id,r.user.email,
- r.user.first_name,
- r.user.last_name,
- btvalues=btvalues.to_json())
+ res = handle_sendemail_hard.delay(w.id, r.user.email,
+ r.user.first_name,
+ r.user.last_name,
+ btvalues=btvalues.to_json())
elif r.getemailnotifications:
try:
res = queuehigh.enqueue(
@@ -725,100 +710,100 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
w.team.add(t)
# put stroke data in database
- res = dataprep(row.df,id=w.id,bands=True,
- barchart=True,otwpower=True,empower=True,inboard=inboard)
+ res = dataprep(row.df, id=w.id, bands=True,
+ barchart=True, otwpower=True, empower=True, inboard=inboard)
- return (w.id,message)
+ return (w.id, message)
-def handle_nonpainsled(f2,fileformat,summary=''):
+
+def handle_nonpainsled(f2, fileformat, summary=''):
oarlength = 2.89
inboard = 0.88
# handle RowPro:
if (fileformat == 'rp'):
- row = RowProParser(f2)
- # handle TCX
- if (fileformat == 'tcx'):
- row = TCXParser(f2)
-
+ row = RowProParser(f2)
+ # handle TCX
+ if (fileformat == 'tcx'):
+ row = TCXParser(f2)
+
# handle Mystery
if (fileformat == 'mystery'):
- row = MysteryParser(f2)
+ row = MysteryParser(f2)
# handle RowPerfect
if (fileformat == 'rowperfect3'):
row = RowPerfectParser(f2)
-
+
# handle ErgData
if (fileformat == 'ergdata'):
- row = ErgDataParser(f2)
+ row = ErgDataParser(f2)
-
# handle CoxMate
if (fileformat == 'coxmate'):
- row = CoxMateParser(f2)
+ row = CoxMateParser(f2)
# handle Mike
if (fileformat == 'bcmike'):
row = BoatCoachAdvancedParser(f2)
-
+
# handle BoatCoach
if (fileformat == 'boatcoach'):
- row = BoatCoachParser(f2)
+ row = BoatCoachParser(f2)
# handle BoatCoach OTW
if (fileformat == 'boatcoachotw'):
row = BoatCoachOTWParser(f2)
-
+
# handle painsled desktop
if (fileformat == 'painsleddesktop'):
- row = painsledDesktopParser(f2)
+ row = painsledDesktopParser(f2)
# handle speed coach GPS
if (fileformat == 'speedcoach'):
- row = speedcoachParser(f2)
+ row = speedcoachParser(f2)
- # handle speed coach GPS 2
+ # handle speed coach GPS 2
if (fileformat == 'speedcoach2'):
- row = SpeedCoach2Parser(f2)
+ row = SpeedCoach2Parser(f2)
try:
- oarlength,inboard = get_empower_rigging(f2)
+ oarlength, inboard = get_empower_rigging(f2)
summary = row.allstats()
except:
pass
-
# handle ErgStick
if (fileformat == 'ergstick'):
- row = ErgStickParser(f2)
-
+ row = ErgStickParser(f2)
+
# handle FIT
if (fileformat == 'fit'):
- row = FITParser(f2)
+ row = FITParser(f2)
try:
- s = fitsummarydata(f2)
+ s = fitsummarydata(f2)
s.setsummary()
- summary = s.summarytext
+ summary = s.summarytext
except:
pass
-
f_to_be_deleted = f2
# should delete file
- f2 = f2[:-4]+'o.csv'
- row.write_csv(f2,gzip=True)
-
- #os.remove(f2)
- try:
- os.remove(f_to_be_deleted)
- except:
- os.remove(f_to_be_deleted+'.gz')
+ f2 = f2[:-4] + 'o.csv'
+ row.write_csv(f2, gzip=True)
- return (f2,summary,oarlength,inboard)
+ # os.remove(f2)
+ try:
+ os.remove(f_to_be_deleted)
+ except:
+ os.remove(f_to_be_deleted + '.gz')
+
+ return (f2, summary, oarlength, inboard)
# Create new workout from file and store it in the database
# This routine should be used everywhere in views.py and mailprocessing.py
# Currently there is code duplication
-def new_workout_from_file(r,f2,
+
+
+def new_workout_from_file(r, f2,
workouttype='rower',
title='Workout',
makeprivate=False,
@@ -829,132 +814,128 @@ def new_workout_from_file(r,f2,
except IOError:
os.remove(f2)
message = "Rowsandall could not process this file. The extension is supported but the file seems corrupt. Contact info@rowsandall.com if you think this is incorrect."
- return (0,message,f2)
-
+ return (0, message, f2)
+
summary = ''
oarlength = 2.89
inboard = 0.88
- if len(fileformat)==3 and fileformat[0]=='zip':
+ if len(fileformat) == 3 and fileformat[0] == 'zip':
f_to_be_deleted = f2
title = os.path.basename(f2)
- if settings.DEBUG:
- res = handle_zip_file.delay(
- r.user.email,title,f2
+ if settings.DEBUG:
+ res = handle_zip_file.delay(
+ r.user.email, title, f2
)
- else:
- res = queuelow.enqueue(
+ else:
+ res = queuelow.enqueue(
handle_zip_file,
r.user.email,
title,
f2
)
- return -1,message,f2
+ return -1, message, f2
# Some people try to upload Concept2 logbook summaries
if fileformat == 'c2log':
os.remove(f2)
message = "This C2 logbook summary does not contain stroke data. Please download the Export Stroke Data file from the workout details on the C2 logbook."
- return (0,message,f2)
+ return (0, message, f2)
if fileformat == 'nostrokes':
os.remove(f2)
message = "It looks like this file doesn't contain stroke data."
- return (0,message,f2)
+ return (0, message, f2)
# Some people upload corrupted zip files
if fileformat == 'notgzip':
os.remove(f2)
message = "Rowsandall could not process this file. The extension is supported but the file seems corrupt. Contact info@rowsandall.com if you think this is incorrect."
- return (0,message,f2)
+ return (0, message, f2)
-
# Some people try to upload RowPro summary logs
if fileformat == 'rowprolog':
os.remove(f2)
message = "This RowPro logbook summary does not contain stroke data. Please use the Stroke Data CSV file for the individual workout in your log."
- return (0,message,f2)
+ return (0, message, f2)
# Sometimes people try an unsupported file type.
# Send an email to info@rowsandall.com with the file attached
# for me to check if it is a bug, or a new file type
# worth supporting
if fileformat == 'unknown':
- message = "We couldn't recognize the file type"
- if settings.DEBUG:
- res = handle_sendemail_unrecognized.delay(f2,
- r.user.email)
+ message = "We couldn't recognize the file type"
+ if settings.DEBUG:
+ res = handle_sendemail_unrecognized.delay(f2,
+ r.user.email)
+
+ else:
+ res = queuehigh.enqueue(handle_sendemail_unrecognized,
+ f2, r.user.email)
+ return (0, message, f2)
- else:
- res = queuehigh.enqueue(handle_sendemail_unrecognized,
- f2,r.user.email)
- return (0,message,f2)
-
# handle non-Painsled by converting it to painsled compatible CSV
if (fileformat != 'csv'):
try:
- f2,summary,oarlength,inboard = handle_nonpainsled(f2,
- fileformat,
- summary=summary)
+ f2, summary, oarlength, inboard = handle_nonpainsled(f2,
+ fileformat,
+ summary=summary)
except:
errorstring = str(sys.exc_info()[0])
- message = 'Something went wrong: '+errorstring
- return (0,message,'')
-
-
+ message = 'Something went wrong: ' + errorstring
+ return (0, message, '')
dosummary = (fileformat != 'fit')
- id,message = save_workout_database(f2,r,
- workouttype=workouttype,
- makeprivate=makeprivate,
- dosummary=dosummary,
- workoutsource=fileformat,
- summary=summary,
- inboard=inboard,oarlength=oarlength,
- title=title)
+ id, message = save_workout_database(f2, r,
+ workouttype=workouttype,
+ makeprivate=makeprivate,
+ dosummary=dosummary,
+ workoutsource=fileformat,
+ summary=summary,
+ inboard=inboard, oarlength=oarlength,
+ title=title)
- return (id,message,f2)
+ return (id, message, f2)
-def split_workout(r,parent,splitsecond,splitmode):
- data,row = getrowdata_db(id=parent.id)
- latitude,longitude = get_latlon(parent.id)
+
+def split_workout(r, parent, splitsecond, splitmode):
+ data, row = getrowdata_db(id=parent.id)
+ latitude, longitude = get_latlon(parent.id)
if not latitude.empty and not longitude.empty:
data[' latitude'] = latitude
data[' longitude'] = longitude
- data['time'] = data['time']/1000.
-
- data1 = data[data['time']<=splitsecond].copy()
- data2 = data[data['time']>splitsecond].copy()
+ data['time'] = data['time'] / 1000.
+
+ data1 = data[data['time'] <= splitsecond].copy()
+ data2 = data[data['time'] > splitsecond].copy()
data1 = data1.sort_values(['time'])
- data1 = data1.interpolate(method='linear',axis=0,limit_direction='both',
- limit=10)
- data1.fillna(method='bfill',inplace=True)
+ data1 = data1.interpolate(method='linear', axis=0, limit_direction='both',
+ limit=10)
+ data1.fillna(method='bfill', inplace=True)
- # Some new stuff to try out
- data1 = data1.groupby('time',axis=0).mean()
+ # Some new stuff to try out
+ data1 = data1.groupby('time', axis=0).mean()
data1['time'] = data1.index
- data1.reset_index(drop=True,inplace=True)
-
+ data1.reset_index(drop=True, inplace=True)
data2 = data2.sort_values(['time'])
- data2 = data2.interpolate(method='linear',axis=0,limit_direction='both',
- limit=10)
- data2.fillna(method='bfill',inplace=True)
+ data2 = data2.interpolate(method='linear', axis=0, limit_direction='both',
+ limit=10)
+ data2.fillna(method='bfill', inplace=True)
- # Some new stuff to try out
- data2 = data2.groupby('time',axis=0).mean()
+ # Some new stuff to try out
+ data2 = data2.groupby('time', axis=0).mean()
data2['time'] = data2.index
- data2.reset_index(drop=True,inplace=True)
-
- data1['pace'] = data1['pace']/1000.
- data2['pace'] = data2['pace']/1000.
-
-
- data1.drop_duplicates(subset='time',inplace=True)
- data2.drop_duplicates(subset='time',inplace=True)
+ data2.reset_index(drop=True, inplace=True)
+
+ data1['pace'] = data1['pace'] / 1000.
+ data2['pace'] = data2['pace'] / 1000.
+
+ data1.drop_duplicates(subset='time', inplace=True)
+ data2.drop_duplicates(subset='time', inplace=True)
messages = []
ids = []
@@ -965,17 +946,17 @@ def split_workout(r,parent,splitsecond,splitmode):
else:
setprivate = False
- id,message = new_workout_from_df(r,data1,
- title=parent.name+' (1)',
- parent=parent,
- setprivate=setprivate,
- forceunit='N')
+ id, message = new_workout_from_df(r, data1,
+ title=parent.name + ' (1)',
+ parent=parent,
+ setprivate=setprivate,
+ forceunit='N')
messages.append(message)
ids.append(id)
if 'keep second' in splitmode:
- data2['cumdist'] = data2['cumdist'] - data2.ix[0,'cumdist']
- data2['distance'] = data2['distance'] - data2.ix[0,'distance']
- data2['time'] = data2['time'] - data2.ix[0,'time']
+ data2['cumdist'] = data2['cumdist'] - data2.ix[0, 'cumdist']
+ data2['distance'] = data2['distance'] - data2.ix[0, 'distance']
+ data2['time'] = data2['time'] - data2.ix[0, 'time']
if 'secondprivate' in splitmode:
setprivate = True
else:
@@ -983,19 +964,18 @@ def split_workout(r,parent,splitsecond,splitmode):
dt = datetime.timedelta(seconds=splitsecond)
- id,message = new_workout_from_df(r,data2,
- title=parent.name+' (2)',
- parent=parent,
- setprivate=setprivate,
- dt=dt,forceunit='N')
+ id, message = new_workout_from_df(r, data2,
+ title=parent.name + ' (2)',
+ parent=parent,
+ setprivate=setprivate,
+ dt=dt, forceunit='N')
messages.append(message)
ids.append(id)
-
if not 'keep original' in splitmode:
if 'keep second' in splitmode or 'keep first' in splitmode:
parent.delete()
- messages.append('Deleted Workout: '+parent.name)
+ messages.append('Deleted Workout: ' + parent.name)
else:
messages.append('That would delete your workout')
ids.append(parent.id)
@@ -1003,12 +983,14 @@ def split_workout(r,parent,splitsecond,splitmode):
parent.privacy = 'hidden'
parent.save()
- return ids,messages
-
+ return ids, messages
+
# Create new workout from data frame and store it in the database
# This routine should be used everywhere in views.py and mailprocessing.py
# Currently there is code duplication
-def new_workout_from_df(r,df,
+
+
+def new_workout_from_df(r, df,
title='New Workout',
parent=None,
setprivate=False,
@@ -1022,62 +1004,59 @@ def new_workout_from_df(r,df,
oarlength = parent.oarlength
inboard = parent.inboard
workouttype = parent.workouttype
- notes=parent.notes
- summary=parent.summary
+ notes = parent.notes
+ summary = parent.summary
if parent.privacy == 'hidden':
- makeprivate=True
+ makeprivate = True
else:
- makeprivate=False
-
- startdatetime=parent.startdatetime+dt
- else:
+ makeprivate = False
+
+ startdatetime = parent.startdatetime + dt
+ else:
oarlength = 2.89
inboard = 0.88
workouttype = 'rower'
- notes=''
- summary=''
- makeprivate=False
+ notes = ''
+ summary = ''
+ makeprivate = False
startdatetime = timezone.now()
if setprivate:
- makeprivate=True
+ makeprivate = True
-
timestr = strftime("%Y%m%d-%H%M%S")
- csvfilename ='media/df_'+timestr+'.csv'
+ csvfilename = 'media/df_' + timestr + '.csv'
if forceunit == 'N':
# change to lbs for now
df['peakforce'] /= lbstoN
df['averageforce'] /= lbstoN
- df.rename(columns = columndict,inplace=True)
+ df.rename(columns=columndict, inplace=True)
#starttimeunix = mktime(startdatetime.utctimetuple())
starttimeunix = arrow.get(startdatetime).timestamp
df[' ElapsedTime (sec)'] = df['TimeStamp (sec)']
- df['TimeStamp (sec)'] = df['TimeStamp (sec)']+starttimeunix
+ df['TimeStamp (sec)'] = df['TimeStamp (sec)'] + starttimeunix
row = rrdata(df=df)
- row.write_csv(csvfilename,gzip=True)
-
- #res = df.to_csv(csvfilename+'.gz',index_label='index',
+ row.write_csv(csvfilename, gzip=True)
+
+ # res = df.to_csv(csvfilename+'.gz',index_label='index',
# compression='gzip')
- id,message = save_workout_database(csvfilename,r,
- workouttype=workouttype,
- title=title,
- notes=notes,
- oarlength=oarlength,
- inboard=inboard,
- makeprivate=makeprivate,
- dosmooth=False,
- consistencychecks=False)
-
-
- return (id,message)
+ id, message = save_workout_database(csvfilename, r,
+ workouttype=workouttype,
+ title=title,
+ notes=notes,
+ oarlength=oarlength,
+ inboard=inboard,
+ makeprivate=makeprivate,
+ dosmooth=False,
+ consistencychecks=False)
+ return (id, message)
# Compare the data from the CSV file and the database
@@ -1092,7 +1071,7 @@ def compare_data(id):
except AttributeError:
rowdata = 0
l1 = 0
-
+
engine = create_engine(database_url, echo=False)
query = sa.text('SELECT COUNT(*) FROM strokedata WHERE workoutid={id};'.format(
id=id,
@@ -1107,30 +1086,32 @@ def compare_data(id):
engine.dispose()
lfile = l1
ldb = l2
- return l1==l2 and l1 != 0,ldb,lfile
+ return l1 == l2 and l1 != 0, ldb, lfile
# Repair data for workouts where the CSV file is lost (or the DB entries
# don't exist)
+
+
def repair_data(verbose=False):
ws = Workout.objects.all()
for w in ws:
if verbose:
sys.stdout.write(".")
- test,ldb,lfile = compare_data(w.id)
+ test, ldb, lfile = compare_data(w.id)
if not test:
if verbose:
- print w.id,lfile,ldb
+ print w.id, lfile, ldb
try:
rowdata = rdata(w.csvfilename)
if rowdata and len(rowdata.df):
- update_strokedata(w.id,rowdata.df)
+ update_strokedata(w.id, rowdata.df)
except IOError, AttributeError:
pass
- if lfile==0:
+ if lfile == 0:
# if not ldb - delete workout
-
+
try:
data = read_df_sql(w.id)
try:
@@ -1139,34 +1120,38 @@ def repair_data(verbose=False):
datalength = 0
if datalength != 0:
- data.rename(columns = columndict,inplace=True)
- res = data.to_csv(w.csvfilename+'.gz',
+ data.rename(columns=columndict, inplace=True)
+ res = data.to_csv(w.csvfilename + '.gz',
index_label='index',
compression='gzip')
print 'adding csv file'
else:
- print w.id,' No stroke records anywhere'
+ print w.id, ' No stroke records anywhere'
w.delete()
except:
print 'failed'
print str(sys.exc_info()[0])
pass
-
+
# A wrapper around the rowingdata class, with some error catching
-def rdata(file,rower=rrower()):
+
+
+def rdata(file, rower=rrower()):
try:
- res = rrdata(csvfile=file,rower=rower)
- except IOError,IndexError:
+ res = rrdata(csvfile=file, rower=rower)
+ except IOError, IndexError:
try:
- res = rrdata(csvfile=file+'.gz',rower=rower)
- except IOError,IndexError:
- res = 0
+ res = rrdata(csvfile=file + '.gz', rower=rower)
+ except IOError, IndexError:
+ res = 0
except:
res = 0
return res
# Remove all stroke data for workout ID from database
+
+
def delete_strokedata(id):
engine = create_engine(database_url, echo=False)
query = sa.text('DELETE FROM strokedata WHERE workoutid={id};'.format(
@@ -1181,80 +1166,86 @@ def delete_strokedata(id):
engine.dispose()
# Replace stroke data in DB with data from CSV file
-def update_strokedata(id,df):
+
+
+def update_strokedata(id, df):
delete_strokedata(id)
- rowdata = dataprep(df,id=id,bands=True,barchart=True,otwpower=True)
+ rowdata = dataprep(df, id=id, bands=True, barchart=True, otwpower=True)
# Test that all data are of a numerical time
-def testdata(time,distance,pace,spm):
- t1 = np.issubdtype(time,np.number)
- t2 = np.issubdtype(distance,np.number)
- t3 = np.issubdtype(pace,np.number)
- t4 = np.issubdtype(spm,np.number)
+
+
+def testdata(time, distance, pace, spm):
+ t1 = np.issubdtype(time, np.number)
+ t2 = np.issubdtype(distance, np.number)
+ t3 = np.issubdtype(pace, np.number)
+ t4 = np.issubdtype(spm, np.number)
return t1 and t2 and t3 and t4
# Get data from DB for one workout (fetches all data). If data
# is not in DB, read from CSV file (and create DB entry)
-def getrowdata_db(id=0,doclean=False,convertnewtons=True):
+
+
+def getrowdata_db(id=0, doclean=False, convertnewtons=True):
data = read_df_sql(id)
- data['x_right'] = data['x_right']/1.0e6
+ data['x_right'] = data['x_right'] / 1.0e6
if data.empty:
- rowdata,row = getrowdata(id=id)
+ rowdata, row = getrowdata(id=id)
if rowdata:
- data = dataprep(rowdata.df,id=id,bands=True,barchart=True,otwpower=True)
+ data = dataprep(rowdata.df, id=id, bands=True,
+ barchart=True, otwpower=True)
else:
- data = pd.DataFrame() # returning empty dataframe
+ data = pd.DataFrame() # returning empty dataframe
else:
row = Workout.objects.get(id=id)
if data['efficiency'].mean() == 0 and data['power'].mean() != 0:
data = add_efficiency(id=id)
-
+
if doclean:
- data = clean_df_stats(data,ignorehr=True)
+ data = clean_df_stats(data, ignorehr=True)
-
- return data,row
+ return data, row
# Fetch a subset of the data from the DB
-def getsmallrowdata_db(columns,ids=[],doclean=True,workstrokesonly=True):
+
+
+def getsmallrowdata_db(columns, ids=[], doclean=True, workstrokesonly=True):
prepmultipledata(ids)
- data = read_cols_df_sql(ids,columns)
+ data = read_cols_df_sql(ids, columns)
# convert newtons
-
-
if doclean:
- data = clean_df_stats(data,ignorehr=True,
+ data = clean_df_stats(data, ignorehr=True,
workstrokesonly=workstrokesonly)
-
-
return data
# Fetch both the workout and the workout stroke data (from CSV file)
+
+
def getrowdata(id=0):
# check if valid ID exists (workout exists)
row = Workout.objects.get(id=id)
f1 = row.csvfilename
-
+
# get user
r = row.user
u = r.user
- rr = rrower(hrmax=r.max,hrut2=r.ut2,
- hrut1=r.ut1,hrat=r.at,
- hrtr=r.tr,hran=r.an,ftp=r.ftp)
+ rr = rrower(hrmax=r.max, hrut2=r.ut2,
+ hrut1=r.ut1, hrat=r.at,
+ hrtr=r.tr, hran=r.an, ftp=r.ftp)
- rowdata = rdata(f1,rower=rr)
+ rowdata = rdata(f1, rower=rr)
- return rowdata,row
+ return rowdata, row
# Checks if all rows for a list of workout IDs have entries in the
# stroke_data table. If this is not the case, it creates the stroke
@@ -1262,7 +1253,9 @@ def getrowdata(id=0):
# In theory, this should never yield any work, but it's a good
# safety net for programming errors elsewhere in the app
# Also used heavily when I moved from CSV file only to CSV+Stroke data
-def prepmultipledata(ids,verbose=False):
+
+
+def prepmultipledata(ids, verbose=False):
query = sa.text('SELECT DISTINCT workoutid FROM strokedata')
engine = create_engine(database_url, echo=False)
@@ -1276,74 +1269,81 @@ def prepmultipledata(ids,verbose=False):
ids2 = [int(id) for id in ids]
except ValueError:
ids2 = ids
-
- res = list(set(ids2)-set(res))
+
+ res = list(set(ids2) - set(res))
for id in res:
- rowdata,row = getrowdata(id=id)
+ rowdata, row = getrowdata(id=id)
if verbose:
print id
if rowdata and len(rowdata.df):
- data = dataprep(rowdata.df,id=id,bands=True,barchart=True,otwpower=True)
+ data = dataprep(rowdata.df, id=id, bands=True,
+ barchart=True, otwpower=True)
return res
# Read a set of columns for a set of workout ids, returns data as a
# pandas dataframe
-def read_cols_df_sql(ids,columns,convertnewtons=True):
+
+
+def read_cols_df_sql(ids, columns, convertnewtons=True):
# drop columns that are not in offical list
-# axx = [ax[0] for ax in axes]
+ # axx = [ax[0] for ax in axes]
axx = [f.name for f in StrokeData._meta.get_fields()]
-
+
for c in columns:
if not c in axx:
columns.remove(c)
-
- columns = list(columns)+['distance','spm','workoutid']
+
+ columns = list(columns) + ['distance', 'spm', 'workoutid']
columns = [x for x in columns if x != 'None']
columns = list(set(columns))
cls = ''
engine = create_engine(database_url, echo=False)
for column in columns:
- cls += column+', '
+ cls += column + ', '
cls = cls[:-2]
if len(ids) == 0:
query = sa.text('SELECT {columns} FROM strokedata WHERE workoutid=0'.format(
- columns = cls,
- ))
+ columns=cls,
+ ))
elif len(ids) == 1:
query = sa.text('SELECT {columns} FROM strokedata WHERE workoutid={id}'.format(
- id = ids[0],
- columns = cls,
- ))
+ id=ids[0],
+ columns=cls,
+ ))
else:
query = sa.text('SELECT {columns} FROM strokedata WHERE workoutid IN {ids}'.format(
- columns = cls,
- ids = tuple(ids),
+ columns=cls,
+ ids=tuple(ids),
))
-
connection = engine.raw_connection()
- df = pd.read_sql_query(query,engine)
+ df = pd.read_sql_query(query, engine)
df = df.fillna(value=0)
if 'peakforce' in columns:
- funits = ((w.id,w.forceunit) for w in Workout.objects.filter(id__in=ids))
- for id,u in funits:
- if u=='lbs':
- mask = df['workoutid']==id
- df.loc[mask,'peakforce'] = df.loc[mask,'peakforce']*lbstoN
+ funits = ((w.id, w.forceunit)
+ for w in Workout.objects.filter(id__in=ids))
+ for id, u in funits:
+ if u == 'lbs':
+ mask = df['workoutid'] == id
+ df.loc[mask, 'peakforce'] = df.loc[mask, 'peakforce'] * lbstoN
if 'averageforce' in columns:
- funits = ((w.id,w.forceunit) for w in Workout.objects.filter(id__in=ids))
- for id,u in funits:
- if u=='lbs':
- mask = df['workoutid']==id
- df.loc[mask,'averageforce'] = df.loc[mask,'averageforce']*lbstoN
+ funits = ((w.id, w.forceunit)
+ for w in Workout.objects.filter(id__in=ids))
+ for id, u in funits:
+ if u == 'lbs':
+ mask = df['workoutid'] == id
+ df.loc[mask, 'averageforce'] = df.loc[mask,
+ 'averageforce'] * lbstoN
engine.dispose()
return df
-
+
# Read stroke data from the DB for a Workout ID. Returns a pandas dataframe
+
+
def read_df_sql(id):
engine = create_engine(database_url, echo=False)
@@ -1354,15 +1354,15 @@ def read_df_sql(id):
df = df.fillna(value=0)
funit = Workout.objects.get(id=id).forceunit
-
- if funit=='lbs':
+
+ if funit == 'lbs':
try:
- df['peakforce'] = df['peakforce']*lbstoN
+ df['peakforce'] = df['peakforce'] * lbstoN
except KeyError:
pass
try:
- df['averageforce'] = df['averageforce']*lbstoN
+ df['averageforce'] = df['averageforce'] * lbstoN
except KeyError:
pass
@@ -1370,7 +1370,9 @@ def read_df_sql(id):
# Get the necessary data from the strokedata table in the DB.
# For the flex plot
-def smalldataprep(therows,xparam,yparam1,yparam2):
+
+
+def smalldataprep(therows, xparam, yparam1, yparam2):
df = pd.DataFrame()
if yparam2 == 'None':
yparam2 = 'power'
@@ -1384,14 +1386,14 @@ def smalldataprep(therows,xparam,yparam1,yparam2):
try:
rowdata = dataprep(rrdata(f1).df)
-
+
rowdata = pd.DataFrame({xparam: rowdata[xparam],
yparam1: rowdata[yparam1],
yparam2: rowdata[yparam2],
'distance': rowdata['distance'],
'spm': rowdata['spm'],
- }
- )
+ }
+ )
if workout.forceunit == 'lbs':
try:
rowdata['peakforce'] *= lbstoN
@@ -1402,18 +1404,18 @@ def smalldataprep(therows,xparam,yparam1,yparam2):
rowdata['averageforce'] *= lbstoN
except KeyError:
pass
-
- df = pd.concat([df,rowdata],ignore_index=True)
+
+ df = pd.concat([df, rowdata], ignore_index=True)
except IOError:
try:
- rowdata = dataprep(rrdata(f1+'.gz').df)
+ rowdata = dataprep(rrdata(f1 + '.gz').df)
rowdata = pd.DataFrame({xparam: rowdata[xparam],
yparam1: rowdata[yparam1],
yparam2: rowdata[yparam2],
'distance': rowdata['distance'],
'spm': rowdata['spm'],
- }
- )
+ }
+ )
if workout.forceunit == 'lbs':
try:
rowdata['peakforce'] *= lbstoN
@@ -1424,99 +1426,99 @@ def smalldataprep(therows,xparam,yparam1,yparam2):
rowdata['averageforce'] *= lbstoN
except KeyError:
pass
- df = pd.concat([df,rowdata],ignore_index=True)
+ df = pd.concat([df, rowdata], ignore_index=True)
except IOError:
pass
-
return df
# data fusion
-def datafusion(id1,id2,columns,offset):
+
+
+def datafusion(id1, id2, columns, offset):
workout1 = Workout.objects.get(id=id1)
workout2 = Workout.objects.get(id=id2)
-
- df1,w1 = getrowdata_db(id=id1)
- df1 = df1.drop([#'cumdist',
- 'hr_ut2',
- 'hr_ut1',
- 'hr_at',
- 'hr_tr',
- 'hr_an',
- 'hr_max',
- 'ftime',
- 'fpace',
- 'workoutid',
- 'id'],
- 1,errors='ignore')
-
+
+ df1, w1 = getrowdata_db(id=id1)
+ df1 = df1.drop([ # 'cumdist',
+ 'hr_ut2',
+ 'hr_ut1',
+ 'hr_at',
+ 'hr_tr',
+ 'hr_an',
+ 'hr_max',
+ 'ftime',
+ 'fpace',
+ 'workoutid',
+ 'id'],
+ 1, errors='ignore')
+
# Add coordinates to DataFrame
- latitude,longitude = get_latlon(id1)
-
+ latitude, longitude = get_latlon(id1)
+
df1[' latitude'] = latitude
df1[' longitude'] = longitude
-
- df2 = getsmallrowdata_db(['time']+columns,ids=[id2],doclean=False)
+ df2 = getsmallrowdata_db(['time'] + columns, ids=[id2], doclean=False)
forceunit = 'N'
-
- offsetmillisecs = offset.seconds*1000+offset.microseconds/1000.
- offsetmillisecs += offset.days*(3600*24*1000)
- df2['time'] = df2['time']+offsetmillisecs
-
- keep1 = {c:c for c in set(df1.columns)}
+ offsetmillisecs = offset.seconds * 1000 + offset.microseconds / 1000.
+ offsetmillisecs += offset.days * (3600 * 24 * 1000)
+ df2['time'] = df2['time'] + offsetmillisecs
+
+ keep1 = {c: c for c in set(df1.columns)}
for c in columns:
keep1.pop(c)
-
-
+
for c in df1.columns:
if not c in keep1:
- df1 = df1.drop(c,1,errors='ignore')
+ df1 = df1.drop(c, 1, errors='ignore')
- df = pd.concat([df1,df2],ignore_index=True)
+ df = pd.concat([df1, df2], ignore_index=True)
df = df.sort_values(['time'])
- df = df.interpolate(method='linear',axis=0,limit_direction='both',
+ df = df.interpolate(method='linear', axis=0, limit_direction='both',
limit=10)
- df.fillna(method='bfill',inplace=True)
+ df.fillna(method='bfill', inplace=True)
- # Some new stuff to try out
- df = df.groupby('time',axis=0).mean()
+ # Some new stuff to try out
+ df = df.groupby('time', axis=0).mean()
df['time'] = df.index
- df.reset_index(drop=True,inplace=True)
-
- df['time'] = df['time']/1000.
- df['pace'] = df['pace']/1000.
- df['cum_dist'] = df['cumdist']
-
- return df,forceunit
+ df.reset_index(drop=True, inplace=True)
-def fix_newtons(id=0,limit=3000):
+ df['time'] = df['time'] / 1000.
+ df['pace'] = df['pace'] / 1000.
+ df['cum_dist'] = df['cumdist']
+
+ return df, forceunit
+
+
+def fix_newtons(id=0, limit=3000):
# rowdata,row = getrowdata_db(id=id,doclean=False,convertnewtons=False)
- rowdata = getsmallrowdata_db(['peakforce'],ids=[id],doclean=False)
+ rowdata = getsmallrowdata_db(['peakforce'], ids=[id], doclean=False)
try:
#avgforce = rowdata['averageforce']
peakforce = rowdata['peakforce']
if peakforce.mean() > limit:
w = Workout.objects.get(id=id)
- print "fixing ",id
+ print "fixing ", id
rowdata = rdata(w.csvfilename)
if rowdata and len(rowdata.df):
- update_strokedata(w.id,rowdata.df)
+ update_strokedata(w.id, rowdata.df)
except KeyError:
pass
-def add_efficiency(id=0):
- rowdata,row = getrowdata_db(id=id,doclean=False,convertnewtons=False)
- power = rowdata['power']
- pace = rowdata['pace']/1.0e3
- velo = 500./pace
- ergpw = 2.8*velo**3
- efficiency = 100.*ergpw/power
- efficiency = efficiency.replace([-np.inf,np.inf],np.nan)
+def add_efficiency(id=0):
+ rowdata, row = getrowdata_db(id=id, doclean=False, convertnewtons=False)
+ power = rowdata['power']
+ pace = rowdata['pace'] / 1.0e3
+ velo = 500. / pace
+ ergpw = 2.8 * velo**3
+ efficiency = 100. * ergpw / power
+
+ efficiency = efficiency.replace([-np.inf, np.inf], np.nan)
efficiency.fillna(method='ffill')
rowdata['efficiency'] = efficiency
delete_strokedata(id)
@@ -1524,79 +1526,80 @@ def add_efficiency(id=0):
rowdata['workoutid'] = id
engine = create_engine(database_url, echo=False)
with engine.connect() as conn, conn.begin():
- rowdata.to_sql('strokedata',engine,if_exists='append',index=False)
+ rowdata.to_sql('strokedata', engine,
+ if_exists='append', index=False)
conn.close()
- engine.dispose()
+ engine.dispose()
return rowdata
# This is the main routine.
# it reindexes, sorts, filters, and smooths the data, then
# saves it to the stroke_data table in the database
# Takes a rowingdata object's DataFrame as input
-def dataprep(rowdatadf,id=0,bands=True,barchart=True,otwpower=True,
- empower=True,inboard=0.88,forceunit='lbs'):
+
+
+def dataprep(rowdatadf, id=0, bands=True, barchart=True, otwpower=True,
+ empower=True, inboard=0.88, forceunit='lbs'):
if rowdatadf.empty:
return 0
- rowdatadf.set_index([range(len(rowdatadf))],inplace=True)
- t = rowdatadf.ix[:,'TimeStamp (sec)']
- t = pd.Series(t-rowdatadf.ix[0,'TimeStamp (sec)'])
+ rowdatadf.set_index([range(len(rowdatadf))], inplace=True)
+ t = rowdatadf.ix[:, 'TimeStamp (sec)']
+ t = pd.Series(t - rowdatadf.ix[0, 'TimeStamp (sec)'])
- row_index = rowdatadf.ix[:,' Stroke500mPace (sec/500m)'] > 3000
- rowdatadf.loc[row_index,' Stroke500mPace (sec/500m)'] = 3000.
+ row_index = rowdatadf.ix[:, ' Stroke500mPace (sec/500m)'] > 3000
+ rowdatadf.loc[row_index, ' Stroke500mPace (sec/500m)'] = 3000.
- p = rowdatadf.ix[:,' Stroke500mPace (sec/500m)']
- hr = rowdatadf.ix[:,' HRCur (bpm)']
- spm = rowdatadf.ix[:,' Cadence (stokes/min)']
- cumdist = rowdatadf.ix[:,'cum_dist']
- power = rowdatadf.ix[:,' Power (watts)']
- averageforce = rowdatadf.ix[:,' AverageDriveForce (lbs)']
- drivelength = rowdatadf.ix[:,' DriveLength (meters)']
+ p = rowdatadf.ix[:, ' Stroke500mPace (sec/500m)']
+ hr = rowdatadf.ix[:, ' HRCur (bpm)']
+ spm = rowdatadf.ix[:, ' Cadence (stokes/min)']
+ cumdist = rowdatadf.ix[:, 'cum_dist']
+ power = rowdatadf.ix[:, ' Power (watts)']
+ averageforce = rowdatadf.ix[:, ' AverageDriveForce (lbs)']
+ drivelength = rowdatadf.ix[:, ' DriveLength (meters)']
try:
- workoutstate = rowdatadf.ix[:,' WorkoutState']
+ workoutstate = rowdatadf.ix[:, ' WorkoutState']
except KeyError:
- workoutstate = 0*hr
-
- peakforce = rowdatadf.ix[:,' PeakDriveForce (lbs)']
+ workoutstate = 0 * hr
- forceratio = averageforce/peakforce
+ peakforce = rowdatadf.ix[:, ' PeakDriveForce (lbs)']
+
+ forceratio = averageforce / peakforce
forceratio = forceratio.fillna(value=0)
try:
- drivetime = rowdatadf.ix[:,' DriveTime (ms)']
- recoverytime = rowdatadf.ix[:,' StrokeRecoveryTime (ms)']
- rhythm = 100.*drivetime/(recoverytime+drivetime)
+ drivetime = rowdatadf.ix[:, ' DriveTime (ms)']
+ recoverytime = rowdatadf.ix[:, ' StrokeRecoveryTime (ms)']
+ rhythm = 100. * drivetime / (recoverytime + drivetime)
rhythm = rhythm.fillna(value=0)
except:
- rhythm = 0.0*forceratio
-
+ rhythm = 0.0 * forceratio
+
f = rowdatadf['TimeStamp (sec)'].diff().mean()
if f != 0:
- windowsize = 2*(int(10./(f)))+1
+ windowsize = 2 * (int(10. / (f))) + 1
else:
windowsize = 1
if windowsize <= 3:
- windowsize = 5
+ windowsize = 5
- if windowsize > 3 and windowsize 3 and windowsize < len(hr):
+ spm = savgol_filter(spm, windowsize, 3)
+ hr = savgol_filter(hr, windowsize, 3)
+ drivelength = savgol_filter(drivelength, windowsize, 3)
+ forceratio = savgol_filter(forceratio, windowsize, 3)
try:
t2 = t.fillna(method='ffill').apply(lambda x: timedeltaconv(x))
except TypeError:
- t2 = 0*t
+ t2 = 0 * t
-
p2 = p.fillna(method='ffill').apply(lambda x: timedeltaconv(x))
try:
- drivespeed = drivelength/rowdatadf[' DriveTime (ms)']*1.0e3
+ drivespeed = drivelength / rowdatadf[' DriveTime (ms)'] * 1.0e3
except TypeError:
- drivespeed = 0.0*rowdatadf['TimeStamp (sec)']
-
+ drivespeed = 0.0 * rowdatadf['TimeStamp (sec)']
drivespeed = drivespeed.fillna(value=0)
@@ -1604,159 +1607,155 @@ def dataprep(rowdatadf,id=0,bands=True,barchart=True,otwpower=True,
driveenergy = rowdatadf['driveenergy']
except KeyError:
if forceunit == 'lbs':
- driveenergy = drivelength*averageforce*lbstoN
+ driveenergy = drivelength * averageforce * lbstoN
else:
- drivenergy = drivelength*averageforce
-
- distance = rowdatadf.ix[:,'cum_dist']
- velo = 500./p
+ drivenergy = drivelength * averageforce
- distanceperstroke = 60.*velo/spm
+ distance = rowdatadf.ix[:, 'cum_dist']
+ velo = 500. / p
+
+ distanceperstroke = 60. * velo / spm
-
data = DataFrame(
- dict(
- time = t*1e3,
- hr = hr,
- pace = p*1e3,
- spm = spm,
- cumdist = cumdist,
- ftime = niceformat(t2),
- fpace = nicepaceformat(p2),
- driveenergy=driveenergy,
- power=power,
+ dict(
+ time=t * 1e3,
+ hr=hr,
+ pace=p * 1e3,
+ spm=spm,
+ cumdist=cumdist,
+ ftime=niceformat(t2),
+ fpace=nicepaceformat(p2),
+ driveenergy=driveenergy,
+ power=power,
workoutstate=workoutstate,
- averageforce=averageforce,
- drivelength=drivelength,
- peakforce=peakforce,
+ averageforce=averageforce,
+ drivelength=drivelength,
+ peakforce=peakforce,
forceratio=forceratio,
- distance=distance,
- drivespeed=drivespeed,
+ distance=distance,
+ drivespeed=drivespeed,
rhythm=rhythm,
distanceperstroke=distanceperstroke,
- )
- )
+ )
+ )
if bands:
- # HR bands
- data['hr_ut2'] = rowdatadf.ix[:,'hr_ut2']
- data['hr_ut1'] = rowdatadf.ix[:,'hr_ut1']
- data['hr_at'] = rowdatadf.ix[:,'hr_at']
- data['hr_tr'] = rowdatadf.ix[:,'hr_tr']
- data['hr_an'] = rowdatadf.ix[:,'hr_an']
- data['hr_max'] = rowdatadf.ix[:,'hr_max']
- data['hr_bottom'] = 0.0*data['hr']
+ # HR bands
+ data['hr_ut2'] = rowdatadf.ix[:, 'hr_ut2']
+ data['hr_ut1'] = rowdatadf.ix[:, 'hr_ut1']
+ data['hr_at'] = rowdatadf.ix[:, 'hr_at']
+ data['hr_tr'] = rowdatadf.ix[:, 'hr_tr']
+ data['hr_an'] = rowdatadf.ix[:, 'hr_an']
+ data['hr_max'] = rowdatadf.ix[:, 'hr_max']
+ data['hr_bottom'] = 0.0 * data['hr']
try:
- tel = rowdatadf.ix[:,' ElapsedTime (sec)']
+ tel = rowdatadf.ix[:, ' ElapsedTime (sec)']
except KeyError:
rowdatadf[' ElapsedTime (sec)'] = rowdatadf['TimeStamp (sec)']
if barchart:
- # time increments for bar chart
- time_increments = rowdatadf.ix[:,' ElapsedTime (sec)'].diff()
- time_increments[0] = time_increments[1]
- time_increments = 0.5*time_increments+0.5*np.abs(time_increments)
- x_right = (t2+time_increments.apply(lambda x:timedeltaconv(x)))
+ # time increments for bar chart
+ time_increments = rowdatadf.ix[:, ' ElapsedTime (sec)'].diff()
+ time_increments[0] = time_increments[1]
+ time_increments = 0.5 * time_increments + 0.5 * np.abs(time_increments)
+ x_right = (t2 + time_increments.apply(lambda x: timedeltaconv(x)))
- data['x_right'] = x_right
+ data['x_right'] = x_right
if empower:
try:
- wash = rowdatadf.ix[:,'wash']
+ wash = rowdatadf.ix[:, 'wash']
except KeyError:
- wash = 0*power
+ wash = 0 * power
try:
- catch = rowdatadf.ix[:,'catch']
+ catch = rowdatadf.ix[:, 'catch']
except KeyError:
- catch = 0*power
+ catch = 0 * power
try:
- finish = rowdatadf.ix[:,'finish']
+ finish = rowdatadf.ix[:, 'finish']
except KeyError:
- finish = 0*power
+ finish = 0 * power
try:
- peakforceangle = rowdatadf.ix[:,'peakforceangle']
+ peakforceangle = rowdatadf.ix[:, 'peakforceangle']
except KeyError:
- peakforceangle = 0*power
-
+ peakforceangle = 0 * power
if data['driveenergy'].mean() == 0:
try:
- driveenergy = rowdatadf.ix[:,'driveenergy']
+ driveenergy = rowdatadf.ix[:, 'driveenergy']
except KeyError:
- driveenergy = power*60/spm
+ driveenergy = power * 60 / spm
else:
driveenergy = data['driveenergy']
-
- arclength = (inboard-0.05)*(np.radians(finish)-np.radians(catch))
- if arclength.mean()>0:
+ arclength = (inboard - 0.05) * (np.radians(finish) - np.radians(catch))
+ if arclength.mean() > 0:
drivelength = arclength
elif drivelength.mean() == 0:
- drivelength = driveenergy/(averageforce*4.44822)
+ drivelength = driveenergy / (averageforce * 4.44822)
try:
- slip = rowdatadf.ix[:,'slip']
+ slip = rowdatadf.ix[:, 'slip']
except KeyError:
- slip = 0*power
-
+ slip = 0 * power
+
try:
- totalangle = finish-catch
- effectiveangle = finish-wash-catch-slip
+ totalangle = finish - catch
+ effectiveangle = finish - wash - catch - slip
except ValueError:
- totalangle = 0*power
- effectiveangle = 0*power
+ totalangle = 0 * power
+ effectiveangle = 0 * power
- if windowsize > 3 and windowsize 3 and windowsize < len(slip):
try:
- wash = savgol_filter(wash,windowsize,3)
+ wash = savgol_filter(wash, windowsize, 3)
except TypeError:
pass
try:
- slip = savgol_filter(slip,windowsize,3)
+ slip = savgol_filter(slip, windowsize, 3)
except TypeError:
pass
try:
- catch = savgol_filter(catch,windowsize,3)
+ catch = savgol_filter(catch, windowsize, 3)
except TypeError:
pass
try:
- finish = savgol_filter(finish,windowsize,3)
+ finish = savgol_filter(finish, windowsize, 3)
except TypeError:
pass
try:
- peakforceangle = savgol_filter(peakforceangle,windowsize,3)
+ peakforceangle = savgol_filter(peakforceangle, windowsize, 3)
except TypeError:
pass
try:
- driveenergy = savgol_filter(driveenergy,windowsize,3)
+ driveenergy = savgol_filter(driveenergy, windowsize, 3)
except TypeError:
pass
try:
- drivelength = savgol_filter(drivelength,windowsize,3)
+ drivelength = savgol_filter(drivelength, windowsize, 3)
except TypeError:
pass
try:
- totalangle = savgol_filter(totalangle,windowsize,3)
+ totalangle = savgol_filter(totalangle, windowsize, 3)
except TypeError:
pass
try:
- effectiveangle = savgol_filter(effectiveangle,windowsize,3)
+ effectiveangle = savgol_filter(effectiveangle, windowsize, 3)
except TypeError:
pass
- velo = 500./p
+ velo = 500. / p
+ ergpw = 2.8 * velo**3
+ efficiency = 100. * ergpw / power
- ergpw = 2.8*velo**3
- efficiency = 100.*ergpw/power
-
- efficiency = efficiency.replace([-np.inf,np.inf],np.nan)
+ efficiency = efficiency.replace([-np.inf, np.inf], np.nan)
efficiency.fillna(method='ffill')
-
+
try:
data['wash'] = wash
data['catch'] = catch
@@ -1770,39 +1769,39 @@ def dataprep(rowdatadf,id=0,bands=True,barchart=True,otwpower=True,
data['efficiency'] = efficiency
except ValueError:
pass
-
+
if otwpower:
- try:
- nowindpace = rowdatadf.ix[:,'nowindpace']
- except KeyError:
- nowindpace = p
- try:
- equivergpower = rowdatadf.ix[:,'equivergpower']
- except KeyError:
- equivergpower = 0*p+50.
-
- nowindpace2 = nowindpace.apply(lambda x: timedeltaconv(x))
- ergvelo = (equivergpower/2.8)**(1./3.)
+ try:
+ nowindpace = rowdatadf.ix[:, 'nowindpace']
+ except KeyError:
+ nowindpace = p
+ try:
+ equivergpower = rowdatadf.ix[:, 'equivergpower']
+ except KeyError:
+ equivergpower = 0 * p + 50.
- ergpace = 500./ergvelo
- ergpace[ergpace == np.inf] = 240.
- ergpace2 = ergpace.apply(lambda x: timedeltaconv(x))
+ nowindpace2 = nowindpace.apply(lambda x: timedeltaconv(x))
+ ergvelo = (equivergpower / 2.8)**(1. / 3.)
- data['ergpace'] = ergpace*1e3
- data['nowindpace'] = nowindpace*1e3
- data['equivergpower'] = equivergpower
- data['fergpace'] = nicepaceformat(ergpace2)
- data['fnowindpace'] = nicepaceformat(nowindpace2)
+ ergpace = 500. / ergvelo
+ ergpace[ergpace == np.inf] = 240.
+ ergpace2 = ergpace.apply(lambda x: timedeltaconv(x))
- data = data.replace([-np.inf,np.inf],np.nan)
+ data['ergpace'] = ergpace * 1e3
+ data['nowindpace'] = nowindpace * 1e3
+ data['equivergpower'] = equivergpower
+ data['fergpace'] = nicepaceformat(ergpace2)
+ data['fnowindpace'] = nicepaceformat(nowindpace2)
+
+ data = data.replace([-np.inf, np.inf], np.nan)
data = data.fillna(method='ffill')
-
+
# write data if id given
if id != 0:
data['workoutid'] = id
engine = create_engine(database_url, echo=False)
with engine.connect() as conn, conn.begin():
- data.to_sql('strokedata',engine,if_exists='append',index=False)
+ data.to_sql('strokedata', engine, if_exists='append', index=False)
conn.close()
- engine.dispose()
+ engine.dispose()
return data
diff --git a/rowers/forms.py b/rowers/forms.py
index 83de2878..34b14e4d 100644
--- a/rowers/forms.py
+++ b/rowers/forms.py
@@ -182,7 +182,8 @@ class UploadOptionsForm(forms.Form):
label='Make Workout Private')
landingpage = forms.ChoiceField(choices=landingpages,
- initial='workout_edit_view')
+ initial='workout_edit_view',
+ label='Landing Page')
class Meta:
fields = ['make_plot','plottype','upload_toc2','makeprivate']
diff --git a/rowers/templates/document_form.html b/rowers/templates/document_form.html
index 53a2c06d..3f065765 100644
--- a/rowers/templates/document_form.html
+++ b/rowers/templates/document_form.html
@@ -38,23 +38,10 @@
You can select one static plot to be generated immediately for
this workout. You can select to export to major fitness
platforms automatically.
- If you check "make private", this workout will not be visible to your followers and will not show up in your teams' workouts list.
+ If you check "make private", this workout will not be visible to your followers and will not show up in your teams' workouts list. With the Landing Page option, you can select to which (workout related) page you will be
+ taken after a successfull upload.
-
- Valid file types are:
-
- - Painsled iOS Stroke Export (CSV)
- - Painsled desktop version Stroke Export (CSV)
- - A TCX file with location data (lat,long) - with or without Heart Rate value, for example from RiM or CrewNerd
- - RowPro CSV export
- - SpeedCoach GPS and SpeedCoach GPS 2 CSV export
- - ErgData CSV export
- - ErgStick CSV export
- - BoatCoach CSV export
- - A FIT file with location data (experimental)
-
-
diff --git a/rowers/views.py b/rowers/views.py
index ec138f07..8b2c5b75 100644
--- a/rowers/views.py
+++ b/rowers/views.py
@@ -7627,34 +7627,39 @@ def workout_upload_view(request,
plottype = 'timeplot'
try:
- upload_toc2 = uploadoptions['upload_to_C2']
+ landingpage = uploadoptions['landingpage']
except KeyError:
- upload_toc2 = False
+ landingpage = r.defaultlandingpage
try:
- upload_tostrava = uploadoptions['upload_to_Strava']
+ upload_to_c2 = uploadoptions['upload_to_C2']
except KeyError:
- upload_tostrava = False
+ upload_to_c2 = False
try:
- upload_tost = uploadoptions['upload_to_SportTracks']
+ upload_to_strava = uploadoptions['upload_to_Strava']
except KeyError:
- upload_tost = False
+ upload_to_strava = False
try:
- upload_tork = uploadoptions['upload_to_RunKeeper']
+ upload_to_st = uploadoptions['upload_to_SportTracks']
except KeyError:
- upload_tork = False
+ upload_to_st = False
try:
- upload_toua = uploadoptions['upload_to_MapMyFitness']
+ upload_to_rk = uploadoptions['upload_to_RunKeeper']
except KeyError:
- upload_toua = False
+ upload_to_rk = False
try:
- upload_totp = uploadoptions['upload_to_TrainingPeaks']
+ upload_to_ua = uploadoptions['upload_to_MapMyFitness']
except KeyError:
- upload_totp = False
+ upload_to_ua = False
+
+ try:
+ upload_to_tp = uploadoptions['upload_to_TrainingPeaks']
+ except KeyError:
+ upload_to_tp = False
if request.method == 'POST':
form = DocumentsForm(request.POST,request.FILES)
@@ -7832,7 +7837,6 @@ def workout_upload_view(request,
return response
else:
form = DocumentsForm(initial=docformoptions)
- print uploadoptions
optionsform = UploadOptionsForm(initial=uploadoptions)
return render(request, 'document_form.html',
{'form':form,