From f287f9af2199eec2e61aac08200537161995cbc2 Mon Sep 17 00:00:00 2001
From: Sander Roosendaal
Date: Sat, 11 Feb 2017 15:03:28 +0100
Subject: [PATCH 1/6] moved workout creation to dataprep for workout_upload
---
rowers/dataprep.py | 102 +++++++++++--
rowers/teams.py | 2 +
rowers/urls.py | 2 +-
rowers/views.py | 358 ++++++++++-----------------------------------
4 files changed, 168 insertions(+), 296 deletions(-)
diff --git a/rowers/dataprep.py b/rowers/dataprep.py
index 12744d7f..906b66fe 100644
--- a/rowers/dataprep.py
+++ b/rowers/dataprep.py
@@ -15,11 +15,23 @@ from pytz import timezone as tz,utc
from django.utils.timezone import get_current_timezone
thetimezone = get_current_timezone()
+from rowingdata import (
+ TCXParser,RowProParser,ErgDataParser,TCXParserNoHR,
+ BoatCoachParser,RowPerfectParser,BoatCoachAdvancedParser,
+ MysteryParser,
+ painsledDesktopParser,speedcoachParser,ErgStickParser,
+ SpeedCoach2Parser,FITParser,fitsummarydata,
+ make_cumvalues,
+ summarydata,get_file_type,
+ )
+import os
import pandas as pd
import numpy as np
import itertools
+from tasks import handle_sendemail_unrecognized
+
from django.conf import settings
from sqlalchemy import create_engine
import sqlalchemy as sa
@@ -212,10 +224,46 @@ def new_workout_from_file(r,f2,
workouttype='rower',
title='Workout',
notes=''):
-
+ message = None
fileformat = get_file_type(f2)
summary = ''
- # handle non-Painsled
+ if len(fileformat)==3 and fileformat[0]=='zip':
+ f_to_be_deleted = f2
+ with zipfile.ZipFile(f2) as z:
+ # for now, we're getting only the first file
+ # from the NK zip file (issue #69 on bitbucket)
+ f2 = z.extract(z.namelist()[0],path='media/')
+ fileformat = fileformat[2]
+ os.remove(f_to_be_deleted)
+
+ # Some people try to upload Concept2 logbook summaries
+ if fileformat == 'c2log':
+ os.remove(f2)
+ message = "This C2 logbook summary does not contain stroke data. Please download the Export Stroke Data file from the workout details on the C2 logbook."
+ return (0,message)
+
+ # Some people try to upload RowPro summary logs
+ if fileformat == 'rowprolog':
+ os.remove(f2)
+ message = "This RowPro logbook summary does not contain stroke data. Please use the Stroke Data CSV file for the individual workout in your log."
+ return (0,message)
+
+ # Sometimes people try an unsupported file type.
+ # Send an email to info@rowsandall.com with the file attached
+ # for me to check if it is a bug, or a new file type
+ # worth supporting
+ if fileformat == 'unknown':
+ message = "We couldn't recognize the file type"
+ if settings.DEBUG:
+ res = handle_sendemail_unrecognized.delay(f2,
+ request.user.email)
+
+ else:
+ res = queuehigh.enqueue(handle_sendemail_unrecognized,
+ f2,request.user.email)
+ return (0,'message')
+
+ # handle non-Painsled by converting it to painsled compatible CSV
if (fileformat != 'csv'):
# handle RowPro:
if (fileformat == 'rp'):
@@ -232,10 +280,18 @@ def new_workout_from_file(r,f2,
if (fileformat == 'tcxnohr'):
row = TCXParserNoHR(f2)
+ # handle RowPerfect
+ if (fileformat == 'rowperfect3'):
+ row = RowPerfectParser(f2)
+
# handle ErgData
if (fileformat == 'ergdata'):
row = ErgDataParser(f2)
+ # handle Mike
+ if (fileformat == 'bcmike'):
+ row = BoatCoachAdvancedParser(f2)
+
# handle BoatCoach
if (fileformat == 'boatcoach'):
row = BoatCoachParser(f2)
@@ -251,7 +307,10 @@ def new_workout_from_file(r,f2,
# handle speed coach GPS 2
if (fileformat == 'speedcoach2'):
row = SpeedCoach2Parser(f2)
- summary = row.allstats()
+ try:
+ summary = row.allstats()
+ except:
+ pass
# handle ErgStick
@@ -266,24 +325,30 @@ def new_workout_from_file(r,f2,
summary = s.summarytext
- f_to_be_deleted = f2
- # should delete file
- f2 = f2[:-4]+'o.csv'
- row.write_csv(f2,gzip=True)
+ f_to_be_deleted = f2
+ # should delete file
+ f2 = f2[:-4]+'o.csv'
+ row.write_csv(f2,gzip=True)
- #os.remove(f2)
- try:
- os.remove(f_to_be_deleted)
- except:
- os.remove(f_to_be_deleted+'.gz')
+ #os.remove(f2)
+ try:
+ os.remove(f_to_be_deleted)
+ except:
+ os.remove(f_to_be_deleted+'.gz')
+
+ powerperc = 100*np.array([r.pw_ut2,
+ r.pw_ut1,
+ r.pw_at,
+ r.pw_tr,r.pw_an])/r.ftp
# make workout and put in database
rr = rrower(hrmax=r.max,hrut2=r.ut2,
hrut1=r.ut1,hrat=r.at,
- hrtr=r.tr,hran=r.an,ftp=r.ftp)
+ hrtr=r.tr,hran=r.an,ftp=r.ftp,
+ powerperc=powerperc,powerzones=r.powerzones)
row = rdata(f2,rower=rr)
if row == 0:
- return HttpResponse("Error: CSV Data File Not Found")
+ return (0,'Error: CSV data file not found')
# auto smoothing
pace = row.df[' Stroke500mPace (sec/500m)'].values
@@ -336,6 +401,10 @@ def new_workout_from_file(r,f2,
totaltime = totaltime+row.df.ix[0,' ElapsedTime (sec)']
hours = int(totaltime/3600.)
+ if hours>23:
+ message = 'Warning: The workout duration was longer than 23 hours'
+ hours = 23
+
minutes = int((totaltime - 3600.*hours)/60.)
seconds = int(totaltime - 3600.*hours - 60.*minutes)
tenths = int(10*(totaltime - 3600.*hours - 60.*minutes - seconds))
@@ -362,9 +431,10 @@ def new_workout_from_file(r,f2,
w.save()
# put stroke data in database
- res = dataprep(row.df,id=w.id,bands=True,barchart=True,otwpower=True,empower=True)
+ res = dataprep(row.df,id=w.id,bands=True,
+ barchart=True,otwpower=True,empower=True)
- return True
+ return (w.id,message)
# Compare the data from the CSV file and the database
# Currently only calculates number of strokes. To be expanded with
diff --git a/rowers/teams.py b/rowers/teams.py
index 4ebc24e1..0e896db5 100644
--- a/rowers/teams.py
+++ b/rowers/teams.py
@@ -31,6 +31,8 @@ from rowers.tasks import (
inviteduration = 14 # days
def update_team(t,name,manager,private,notes):
+ if t.manager != manager:
+ return (0,'You are not the manager of this team')
try:
t.name = name
t.manager = manager
diff --git a/rowers/urls.py b/rowers/urls.py
index 2ee4b855..0d88ebe3 100644
--- a/rowers/urls.py
+++ b/rowers/urls.py
@@ -154,7 +154,7 @@ urlpatterns = [
url(r'^workout/(?P\d+)/export/c/(?P\w+.*)/s/(?P\w+.*)$',views.workout_edit_view),
url(r'^workout/(?P\d+)/edit/c/(?P.+.*)$',views.workout_edit_view),
url(r'^workout/(?P\d+)/edit/s/(?P.+.*)$',views.workout_edit_view),
- url(r'^workout/(\d+)/edit$',views.workout_edit_view),
+ url(r'^workout/(?P\d+)/edit$',views.workout_edit_view),
url(r'^workout/(?P\d+)/advanced/c/(?P.+.*)$',views.workout_advanced_view),
url(r'^workout/(?P\d+)/advanced/s/(?P.+.*)$',views.workout_advanced_view),
url(r'^workout/(?P\d+)/geeky$',views.workout_geeky_view),
diff --git a/rowers/views.py b/rowers/views.py
index 7cee538d..89ad45d6 100644
--- a/rowers/views.py
+++ b/rowers/views.py
@@ -4053,6 +4053,7 @@ def workout_getc2workout_view(request,c2id):
# This is the main view for processing uploaded files
@login_required()
def workout_upload_view(request,message=""):
+ r = Rower.objects.get(user=request.user)
if request.method == 'POST':
form = DocumentsForm(request.POST,request.FILES)
optionsform = UploadOptionsForm(request.POST)
@@ -4070,283 +4071,82 @@ def workout_upload_view(request,message=""):
f1 = res[0] # file name
f2 = res[1] # file name incl media directory
-
- # get file type (ErgData, NK, BoatCoach, etc
- fileformat = get_file_type(f2)
- if len(fileformat)==3 and fileformat[0]=='zip':
- f_to_be_deleted = f2
- with zipfile.ZipFile(f2) as z:
- # for now, we're getting only the first file
- # from the NK zip file (issue #69 on bitbucket)
- f2 = z.extract(z.namelist()[0],path='media/')
- fileformat = fileformat[2]
- os.remove(f_to_be_deleted)
- # Some people try to upload Concept2 logbook summaries
- if fileformat == 'c2log':
- os.remove(f2)
- message = "This C2 logbook summary does not contain stroke data. Please download the Export Stroke Data file from the workout details on the C2 logbook."
+
+ id,message = dataprep.new_workout_from_file(r,f2,
+ workouttype=workouttype,
+ title = t,
+ notes='')
+ if not id:
url = reverse(workout_upload_view,
args=[str(message)])
response = HttpResponseRedirect(url)
return response
- # Some people try to upload RowPro summary logs
- if fileformat == 'rowprolog':
- os.remove(f2)
- message = "This RowPro logbook summary does not contain stroke data. Please use the Stroke Data CSV file for the individual workout in your log."
- url = reverse(workout_upload_view,
- args=[str(message)])
- response = HttpResponseRedirect(url)
- return response
+ else:
+ if message:
+ url = reverse(workout_edit_view,
+ kwargs = {
+ 'id':id,
+ 'message':message,
+ })
+ else:
+ url = reverse(workout_edit_view,
+ kwargs = {
+ 'id':id,
+ })
+
+ response = HttpResponseRedirect(url)
+ w = Workout.objects.get(id=id)
- # Sometimes people try an unsupported file type.
- # Send an email to info@rowsandall.com with the file attached
- # for me to check if it is a bug, or a new file type
- # worth supporting
- if fileformat == 'unknown':
- message = "We couldn't recognize the file type"
- url = reverse(workout_upload_view,
- args=[str(message)])
- response = HttpResponseRedirect(url)
- if settings.DEBUG:
- res = handle_sendemail_unrecognized.delay(f2,
- request.user.email)
-
- else:
- res = queuehigh.enqueue(handle_sendemail_unrecognized,
- f2,request.user.email)
-
- return response
-
- summary = ''
- # handle non-Painsled by converting it to painsled
- # compatible CSV
- try:
- if (fileformat != 'csv'):
- # handle RowPro:
- if (fileformat == 'rp'):
- row = RowProParser(f2)
- # handle TCX
- if (fileformat == 'tcx'):
- row = TCXParser(f2)
-
- # handle Mystery
- if (fileformat == 'mystery'):
- row = MysteryParser(f2)
-
- # handle RowPerfect
- if (fileformat == 'rowperfect3'):
- row = RowPerfectParser(f2)
-
- # handle TCX no HR
- if (fileformat == 'tcxnohr'):
- row = TCXParserNoHR(f2)
-
- # handle ErgData
- if (fileformat == 'ergdata'):
- row = ErgDataParser(f2)
-
- # handle Mike
- if (fileformat == 'bcmike'):
- row = BoatCoachAdvancedParser(f2)
-
- # handle BoatCoach
- if (fileformat == 'boatcoach'):
- row = BoatCoachParser(f2)
-
- # handle painsled desktop
- if (fileformat == 'painsleddesktop'):
- row = painsledDesktopParser(f2)
-
- # handle speed coach GPS
- if (fileformat == 'speedcoach'):
- row = speedcoachParser(f2)
-
- # handle speed coach GPS 2
- if (fileformat == 'speedcoach2'):
- row = SpeedCoach2Parser(f2)
- try:
- summary = row.allstats()
- except:
- pass
-
- # handle ErgStick
- if (fileformat == 'ergstick'):
- row = ErgStickParser(f2)
-
- # handle FIT
- if (fileformat == 'fit'):
- row = FITParser(f2)
- # The FIT files have nice lap/split summaries
- # so we make use of it
- s = fitsummarydata(f2)
- s.setsummary()
- summary = s.summarytext
-
- # Save the Painsled compatible CSV file and delete
- # the uploaded file
- f_to_be_deleted = f2
- # should delete file
- f2 = f2[:-4]+'o.csv'
- row.write_csv(f2,gzip=True)
-
- try:
- os.remove(f_to_be_deleted)
- except:
- os.remove(f_to_be_deleted+'.gz')
-
- # make Workout object and put in database
+ if (make_plot):
+ imagename = f1[:-4]+'.png'
+ fullpathimagename = 'static/plots/'+imagename
+ u = request.user
r = Rower.objects.get(user=request.user)
- powerperc = 100*np.array([r.pw_ut2,
+ powerperc = 100*np.array([r.pw_ut2,
r.pw_ut1,
r.pw_at,
r.pw_tr,r.pw_an])/r.ftp
- rr = rrower(hrmax=r.max,hrut2=r.ut2,
- hrut1=r.ut1,hrat=r.at,
- hrtr=r.tr,hran=r.an,ftp=r.ftp,
- powerperc=powerperc,powerzones=r.powerzones)
- row = rdata(f2,rower=rr)
- if row == 0:
- return HttpResponse("Error: CSV Data File Not Found")
+ hrpwrdata = {
+ 'hrmax':r.max,
+ 'hrut2':r.ut2,
+ 'hrut1':r.ut1,
+ 'hrat':r.at,
+ 'hrtr':r.tr,
+ 'hran':r.an,
+ 'ftp':r.ftp,
+ 'powerperc':serialize_list(powerperc),
+ 'powerzones':serialize_list(r.powerzones),
+ }
- # auto smoothing
- pace = row.df[' Stroke500mPace (sec/500m)'].values
- velo = 500./pace
-
- f = row.df['TimeStamp (sec)'].diff().mean()
- windowsize = 2*(int(10./(f)))+1
- if not 'originalvelo' in row.df:
- row.df['originalvelo'] = velo
+ # make plot - asynchronous task
+ plotnrs = {
+ 'timeplot':1,
+ 'distanceplot':2,
+ 'pieplot':3,
+ }
- if windowsize > 3 and windowsize
Date: Sat, 11 Feb 2017 15:51:42 +0100
Subject: [PATCH 2/6] Web Import now uses dataprep
---
rowers/dataprep.py | 213 ++++++++++++++++++++++++---------------------
rowers/views.py | 208 +++++++++++++------------------------------
2 files changed, 176 insertions(+), 245 deletions(-)
diff --git a/rowers/dataprep.py b/rowers/dataprep.py
index 906b66fe..9b52d54c 100644
--- a/rowers/dataprep.py
+++ b/rowers/dataprep.py
@@ -217,6 +217,116 @@ def timedeltaconv(x):
return dt
+# Processes painsled CSV file to database
+def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
+ dosummary=True,title='Workout',
+ notes='',totaldist=0,totaltime=0):
+ message = None
+ powerperc = 100*np.array([r.pw_ut2,
+ r.pw_ut1,
+ r.pw_at,
+ r.pw_tr,r.pw_an])/r.ftp
+
+ # make workout and put in database
+ rr = rrower(hrmax=r.max,hrut2=r.ut2,
+ hrut1=r.ut1,hrat=r.at,
+ hrtr=r.tr,hran=r.an,ftp=r.ftp,
+ powerperc=powerperc,powerzones=r.powerzones)
+ row = rdata(f2,rower=rr)
+ if row == 0:
+ return (0,'Error: CSV data file not found')
+
+ if dosmooth:
+ # auto smoothing
+ pace = row.df[' Stroke500mPace (sec/500m)'].values
+ velo = 500./pace
+
+ f = row.df['TimeStamp (sec)'].diff().mean()
+ windowsize = 2*(int(10./(f)))+1
+ if not 'originalvelo' in row.df:
+ row.df['originalvelo'] = velo
+
+ if windowsize > 3 and windowsize23:
+ message = 'Warning: The workout duration was longer than 23 hours'
+ hours = 23
+
+ minutes = int((totaltime - 3600.*hours)/60.)
+ seconds = int(totaltime - 3600.*hours - 60.*minutes)
+ tenths = int(10*(totaltime - 3600.*hours - 60.*minutes - seconds))
+ duration = "%s:%s:%s.%s" % (hours,minutes,seconds,tenths)
+
+ if dosummary:
+ summary = row.summary()
+ summary += '\n'
+ summary += row.intervalstats()
+
+ workoutdate = row.rowdatetime.strftime('%Y-%m-%d')
+ workoutstarttime = row.rowdatetime.strftime('%H:%M:%S')
+ workoutstartdatetime = thetimezone.localize(row.rowdatetime).astimezone(utc)
+
+ # check for duplicate start times
+ ws = Workout.objects.filter(starttime=workoutstarttime,
+ user=r)
+ if (len(ws) != 0):
+ message = "Warning: This workout probably already exists in the database"
+
+ w = Workout(user=r,name=title,date=workoutdate,
+ workouttype=workouttype,
+ duration=duration,distance=totaldist,
+ weightcategory=r.weightcategory,
+ starttime=workoutstarttime,
+ csvfilename=f2,notes=notes,summary=summary,
+ maxhr=maxhr,averagehr=averagehr,
+ startdatetime=workoutstartdatetime)
+
+ w.save()
+ # put stroke data in database
+ res = dataprep(row.df,id=w.id,bands=True,
+ barchart=True,otwpower=True,empower=True)
+
+ return (w.id,message)
+
# Create new workout from file and store it in the database
# This routine should be used everywhere in views.py and mailprocessing.py
# Currently there is code duplication
@@ -336,105 +446,14 @@ def new_workout_from_file(r,f2,
except:
os.remove(f_to_be_deleted+'.gz')
- powerperc = 100*np.array([r.pw_ut2,
- r.pw_ut1,
- r.pw_at,
- r.pw_tr,r.pw_an])/r.ftp
- # make workout and put in database
- rr = rrower(hrmax=r.max,hrut2=r.ut2,
- hrut1=r.ut1,hrat=r.at,
- hrtr=r.tr,hran=r.an,ftp=r.ftp,
- powerperc=powerperc,powerzones=r.powerzones)
- row = rdata(f2,rower=rr)
- if row == 0:
- return (0,'Error: CSV data file not found')
+ dosummary = (fileformat != 'fit')
+ id,message = save_workout_database(f2,r,
+ workouttype=workouttype,
+ dosummary=dosummary,
+ title=title)
- # auto smoothing
- pace = row.df[' Stroke500mPace (sec/500m)'].values
- velo = 500./pace
-
- f = row.df['TimeStamp (sec)'].diff().mean()
- windowsize = 2*(int(10./(f)))+1
- if not 'originalvelo' in row.df:
- row.df['originalvelo'] = velo
-
- if windowsize > 3 and windowsize23:
- message = 'Warning: The workout duration was longer than 23 hours'
- hours = 23
-
- minutes = int((totaltime - 3600.*hours)/60.)
- seconds = int(totaltime - 3600.*hours - 60.*minutes)
- tenths = int(10*(totaltime - 3600.*hours - 60.*minutes - seconds))
- duration = "%s:%s:%s.%s" % (hours,minutes,seconds,tenths)
-
- workoutdate = row.rowdatetime.strftime('%Y-%m-%d')
- workoutstarttime = row.rowdatetime.strftime('%H:%M:%S')
- workoutstartdatetime = thetimezone.localize(row.rowdatetime).astimezone(utc)
-
- # check for duplicate start times
- ws = Workout.objects.filter(starttime=workoutstarttime,
- user=r)
- if (len(ws) != 0):
- message = "Warning: This workout probably already exists in the database"
-
- w = Workout(user=r,name=title,date=workoutdate,
- workouttype=workouttype,
- duration=duration,distance=totaldist,
- weightcategory=r.weightcategory,
- starttime=workoutstarttime,
- csvfilename=f2,notes=notes,summary=summary,
- maxhr=maxhr,averagehr=averagehr,
- startdatetime=workoutstartdatetime)
-
- w.save()
- # put stroke data in database
- res = dataprep(row.df,id=w.id,bands=True,
- barchart=True,otwpower=True,empower=True)
-
- return (w.id,message)
+ return (id,message)
# Compare the data from the CSV file and the database
# Currently only calculates number of strokes. To be expanded with
diff --git a/rowers/views.py b/rowers/views.py
index 89ad45d6..112cd2ce 100644
--- a/rowers/views.py
+++ b/rowers/views.py
@@ -67,13 +67,8 @@ from shutil import copyfile
from rowingdata import rower as rrower
from rowingdata import main as rmain
from rowingdata import rowingdata as rrdata
-from rowingdata import TCXParser,RowProParser,ErgDataParser,TCXParserNoHR
-from rowingdata import BoatCoachParser,RowPerfectParser,BoatCoachAdvancedParser
-from rowingdata import MysteryParser
-from rowingdata import painsledDesktopParser,speedcoachParser,ErgStickParser
-from rowingdata import SpeedCoach2Parser,FITParser,fitsummarydata
from rowingdata import make_cumvalues
-from rowingdata import summarydata,get_file_type
+from rowingdata import summarydata
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
@@ -451,69 +446,27 @@ def add_workout_from_strokedata(user,importid,data,strokedata,
compression='gzip')
- # make workout
- powerperc = 100*np.array([r.pw_ut2,
- r.pw_ut1,
- r.pw_at,
- r.pw_tr,r.pw_an])/r.ftp
-
- rr = rrower(hrmax=r.max,hrut2=r.ut2,
- hrut1=r.ut1,hrat=r.at,
- hrtr=r.tr,hran=r.an,ftp=r.ftp,
- powerperc=powerperc,
- powerzones=r.powerzones,
- )
- row = rdata(csvfilename,rower=rr)
-
- averagehr = row.df[' HRCur (bpm)'].mean()
- maxhr = row.df[' HRCur (bpm)'].max()
- totaldist = row.df['cum_dist'].max()
- totaltime = row.df['TimeStamp (sec)'].max()-row.df['TimeStamp (sec)'].min()
- totaltime = totaltime+row.df.ix[0,' ElapsedTime (sec)']
-
# with Concept2
if source=='c2':
try:
totaldist = data['distance']
totaltime = data['time']/10.
except KeyError:
- pass
+ totaldist = 0
+ totaltime = 0
+ else:
+ totaldist = 0
+ totaltime = 0
+
+ id,message = dataprep.save_workout_database(csvfilename,r,
+ workouttype=workouttype,
+ title=title,notes=comments,
+ totaldist=totaldist,
+ totaltime=totaltime)
+
- hours = int(totaltime/3600.)
- minutes = int((totaltime - 3600.*hours)/60.)
- seconds = int(totaltime - 3600.*hours - 60.*minutes)
- tenths = int(10*(totaltime - 3600.*hours - 60.*minutes - seconds))
-
- duration = "%s:%s:%s.%s" % (hours,minutes,seconds,tenths)
-
-
- summary = row.summary()
- summary += '\n'
- summary += row.intervalstats()
-
- workoutdate = row.rowdatetime.strftime('%Y-%m-%d')
- workoutstarttime = row.rowdatetime.strftime('%H:%M:%S')
-
- # check for duplicate start times
- ws = Workout.objects.filter(starttime=workoutstarttime,
- user=r)
- if (len(ws) != 0):
- warnings.warn("Probably a duplicate workout",UserWarning)
-
- # Create the Workout object
- w = Workout(user=r,name=title,
- date=workoutdate,workouttype=workouttype,
- duration=duration,distance=totaldist,
- weightcategory=r.weightcategory,
- starttime=workoutstarttime,
- csvfilename=csvfilename,notes=comments,
- uploadedtoc2=0,summary=summary,
- averagehr=averagehr,maxhr=maxhr,
- startdatetime=rowdatetime)
- w.save()
-
- return w.id
+ return id,message
# Create workout from SportTracks Data, which are slightly different
# than Strava or Concept2 data
@@ -582,7 +535,8 @@ def add_workout_from_stdata(user,importid,data):
times_location = times_distance
latcoord = np.zeros(len(times_distance))
loncoord = np.zeros(len(times_distance))
-
+ if workouttype == 'water':
+ workouttype = 'rower'
try:
res = splitstdata(data['cadence'])
@@ -668,89 +622,17 @@ def add_workout_from_stdata(user,importid,data):
timestr = strftime("%Y%m%d-%H%M%S")
- # auto smoothing
- pace = df[' Stroke500mPace (sec/500m)'].values
- velo = 500./pace
-
- f = df['TimeStamp (sec)'].diff().mean()
- windowsize = 2*(int(10./(f)))+1
-
- df['originalvelo'] = velo
-
- if windowsize > 3 and windowsize
Date: Sat, 11 Feb 2017 17:46:15 +0100
Subject: [PATCH 3/6] mailprocessing using dataprep (not tested)
---
rowers/dataprep.py | 145 +++++++++++----------
rowers/mailprocessing.py | 141 +++-----------------
rowers/management/commands/processemail.py | 5 +-
3 files changed, 92 insertions(+), 199 deletions(-)
diff --git a/rowers/dataprep.py b/rowers/dataprep.py
index 9b52d54c..43af9582 100644
--- a/rowers/dataprep.py
+++ b/rowers/dataprep.py
@@ -327,6 +327,80 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
return (w.id,message)
+def handle_nonpainsled(f2,fileformat,summary=''):
+ # handle RowPro:
+ if (fileformat == 'rp'):
+ row = RowProParser(f2)
+ # handle TCX
+ if (fileformat == 'tcx'):
+ row = TCXParser(f2)
+
+ # handle Mystery
+ if (fileformat == 'mystery'):
+ row = MysteryParser(f2)
+
+ # handle TCX no HR
+ if (fileformat == 'tcxnohr'):
+ row = TCXParserNoHR(f2)
+
+ # handle RowPerfect
+ if (fileformat == 'rowperfect3'):
+ row = RowPerfectParser(f2)
+
+ # handle ErgData
+ if (fileformat == 'ergdata'):
+ row = ErgDataParser(f2)
+
+ # handle Mike
+ if (fileformat == 'bcmike'):
+ row = BoatCoachAdvancedParser(f2)
+
+ # handle BoatCoach
+ if (fileformat == 'boatcoach'):
+ row = BoatCoachParser(f2)
+
+ # handle painsled desktop
+ if (fileformat == 'painsleddesktop'):
+ row = painsledDesktopParser(f2)
+
+ # handle speed coach GPS
+ if (fileformat == 'speedcoach'):
+ row = speedcoachParser(f2)
+
+ # handle speed coach GPS 2
+ if (fileformat == 'speedcoach2'):
+ row = SpeedCoach2Parser(f2)
+ try:
+ summary = row.allstats()
+ except:
+ pass
+
+
+ # handle ErgStick
+ if (fileformat == 'ergstick'):
+ row = ErgStickParser(f2)
+
+ # handle FIT
+ if (fileformat == 'fit'):
+ row = FITParser(f2)
+ s = fitsummarydata(f2)
+ s.setsummary()
+ summary = s.summarytext
+
+
+ f_to_be_deleted = f2
+ # should delete file
+ f2 = f2[:-4]+'o.csv'
+ row.write_csv(f2,gzip=True)
+
+ #os.remove(f2)
+ try:
+ os.remove(f_to_be_deleted)
+ except:
+ os.remove(f_to_be_deleted+'.gz')
+
+ return (f2,summary)
+
# Create new workout from file and store it in the database
# This routine should be used everywhere in views.py and mailprocessing.py
# Currently there is code duplication
@@ -375,76 +449,7 @@ def new_workout_from_file(r,f2,
# handle non-Painsled by converting it to painsled compatible CSV
if (fileformat != 'csv'):
- # handle RowPro:
- if (fileformat == 'rp'):
- row = RowProParser(f2)
- # handle TCX
- if (fileformat == 'tcx'):
- row = TCXParser(f2)
-
- # handle Mystery
- if (fileformat == 'mystery'):
- row = MysteryParser(f2)
-
- # handle TCX no HR
- if (fileformat == 'tcxnohr'):
- row = TCXParserNoHR(f2)
-
- # handle RowPerfect
- if (fileformat == 'rowperfect3'):
- row = RowPerfectParser(f2)
-
- # handle ErgData
- if (fileformat == 'ergdata'):
- row = ErgDataParser(f2)
-
- # handle Mike
- if (fileformat == 'bcmike'):
- row = BoatCoachAdvancedParser(f2)
-
- # handle BoatCoach
- if (fileformat == 'boatcoach'):
- row = BoatCoachParser(f2)
-
- # handle painsled desktop
- if (fileformat == 'painsleddesktop'):
- row = painsledDesktopParser(f2)
-
- # handle speed coach GPS
- if (fileformat == 'speedcoach'):
- row = speedcoachParser(f2)
-
- # handle speed coach GPS 2
- if (fileformat == 'speedcoach2'):
- row = SpeedCoach2Parser(f2)
- try:
- summary = row.allstats()
- except:
- pass
-
-
- # handle ErgStick
- if (fileformat == 'ergstick'):
- row = ErgStickParser(f2)
-
- # handle FIT
- if (fileformat == 'fit'):
- row = FITParser(f2)
- s = fitsummarydata(f2)
- s.setsummary()
- summary = s.summarytext
-
-
- f_to_be_deleted = f2
- # should delete file
- f2 = f2[:-4]+'o.csv'
- row.write_csv(f2,gzip=True)
-
- #os.remove(f2)
- try:
- os.remove(f_to_be_deleted)
- except:
- os.remove(f_to_be_deleted+'.gz')
+ f2,summary = handle_nonpainsled(f2,fileformat,summary=summary)
dosummary = (fileformat != 'fit')
diff --git a/rowers/mailprocessing.py b/rowers/mailprocessing.py
index 19338906..592a8dfe 100644
--- a/rowers/mailprocessing.py
+++ b/rowers/mailprocessing.py
@@ -159,15 +159,19 @@ def processattachments_debug():
# Need to move the code to a subroutine used both in views.py and here
def make_new_workout_from_email(rr,f2,name,cntr=0):
workouttype = 'rower'
- f2 = f2.name
- fileformat = get_file_type('media/'+f2)
+
+ try:
+ f2 = f2.name
+ fileformat = get_file_type('media/'+f2)
+ except IOError:
+ f2 = f2.name+'.gz'
+ fileformat = get_file_type('media/'+f2)
if len(fileformat)==3 and fileformat[0]=='zip':
f_to_be_deleted = f2
with zipfile.ZipFile('media/'+f2) as z:
f2 = z.extract(z.namelist()[0],path='media/')[6:]
fileformat = fileformat[2]
- print f2
if fileformat == 'unknown':
if settings.DEBUG:
@@ -182,64 +186,13 @@ def make_new_workout_from_email(rr,f2,name,cntr=0):
summary = ''
# handle non-Painsled
- if (fileformat != 'csv'):
- # handle RowPro:
- if (fileformat == 'rp'):
- row = RowProParser('media/'+f2)
-
- # handle TCX
- if (fileformat == 'tcx'):
- row = TCXParser('media/'+f2)
-
- # handle Mystery
- if (fileformat == 'mystery'):
- row = MysteryParser('media/'+f2)
-
- # handle TCX no HR
- if (fileformat == 'tcxnohr'):
- row = TCXParserNoHR('media/'+f2)
-
- # handle ErgData
- if (fileformat == 'ergdata'):
- row = ErgDataParser('media/'+f2)
-
- # handle BoatCoach
- if (fileformat == 'boatcoach'):
- row = BoatCoachParser('media/'+f2)
-
- # handle painsled desktop
- if (fileformat == 'painsleddesktop'):
- row = painsledDesktopParser('media/'+f2)
-
- # handle speed coach GPS
- if (fileformat == 'speedcoach'):
- row = speedcoachParser('media/'+f2)
-
- # handle speed coach GPS 2
- if (fileformat == 'speedcoach2'):
- row = SpeedCoach2Parser('media/'+f2)
-
- # handle ErgStick
- if (fileformat == 'ergstick'):
- row = ErgStickParser('media/'+f2)
-
- # handle FIT
- if (fileformat == 'fit'):
- row = FITParser('media/'+f2)
- s = fitsummarydata('media/'+f2)
- s.setsummary()
- summary = s.summarytext
-
- timestr = time.strftime("%Y%m%d-%H%M%S")
- filename = timestr+str(cntr)+'o.csv'
- row.write_csv('media/'+filename,gzip=True)
- f2 = filename
+ f3,summary = dataprep.handle_nonpainsled('media/'+f2,fileformat,summary)
# make workout and put in database
#r = rrower(hrmax=rr.max,hrut2=rr.ut2,
# hrut1=rr.ut1,hrat=rr.at,
# hrtr=rr.tr,hran=rr.an,ftp=r.ftp)
- row = rdata('media/'+f2) #,rower=r)
+ row = rdata(f3) #,rower=r)
if row == 0:
return 0
@@ -248,83 +201,21 @@ def make_new_workout_from_email(rr,f2,name,cntr=0):
timestr = time.strftime("%Y%m%d-%H%M%S")
f2 = 'media/'+timestr+str(cntr)+'o.csv'
- # auto smoothing
- pace = row.df[' Stroke500mPace (sec/500m)'].values
- velo = 500./pace
-
- f = row.df['TimeStamp (sec)'].diff().mean()
- windowsize = 2*(int(10./(f)))+1
-
- if not 'originalvelo' in row.df:
- row.df['originalvelo'] = velo
-
- if windowsize > 3:
- velo2 = savgol_filter(velo,windowsize,3)
- else:
- velo2 = velo
-
- pace2 = 500./abs(velo2)
- row.df[' Stroke500mPace (sec/500m)'] = pace2
-
- row.df = row.df.fillna(0)
row.write_csv(f2,gzip=True)
+ dosummary = (fileformat != 'fit')
- # recalculate power data
- if workouttype == 'rower' or workouttype == 'dynamic' or workouttype == 'slides':
- try:
- row.erg_recalculatepower()
- # row.spm_fromtimestamps()
- row.write_csv(f2,gzip=True)
- except:
- pass
-
- if fileformat != 'fit':
- summary = row.summary()
- summary += '\n'
- summary += row.intervalstats_painsled()
-
- averagehr = row.df[' HRCur (bpm)'].mean()
- maxhr = row.df[' HRCur (bpm)'].max()
-
- totaldist = row.df['cum_dist'].max()
- totaltime = row.df['TimeStamp (sec)'].max()-row.df['TimeStamp (sec)'].min()
- totaltime = totaltime+row.df.ix[0,' ElapsedTime (sec)']
-
-
- hours = int(totaltime/3600.)
- minutes = int((totaltime - 3600.*hours)/60.)
- seconds = int(totaltime - 3600.*hours - 60.*minutes)
- tenths = int(10*(totaltime - 3600.*hours - 60.*minutes - seconds))
-
-
- duration = "%s:%s:%s.%s" % (hours,minutes,seconds,tenths)
-
- workoutdate = row.rowdatetime.strftime('%Y-%m-%d')
- workoutstarttime = row.rowdatetime.strftime('%H:%M:%S')
-
- notes = 'imported through email'
if name == '':
name = 'imported through email'
- w = Workout(user=rr,name=name,date=workoutdate,
- workouttype=workouttype,
- duration=duration,distance=totaldist,
- weightcategory=rr.weightcategory,
- starttime=workoutstarttime,
- csvfilename=f2,notes=notes,summary=summary,
- maxhr=maxhr,averagehr=averagehr,
- startdatetime=row.rowdatetime)
+ id,message = dataprep.save_workout_database(f2,rr,
+ workouttype=workouttype,
+ dosummary=dosummary,
+ title=name,
+ notes='imported through email')
- w.save()
- # put stroke data in database
- res = dataprep.dataprep(row.df,id=w.id,
- bands=True,barchart=True,
- otwpower=True,empower=True)
-
-
- return w.id
+ return id
diff --git a/rowers/management/commands/processemail.py b/rowers/management/commands/processemail.py
index b92692bb..6aab0eab 100644
--- a/rowers/management/commands/processemail.py
+++ b/rowers/management/commands/processemail.py
@@ -22,10 +22,7 @@ from rowsandall_app.settings import BASE_DIR
from rowingdata import rower as rrower
from rowingdata import main as rmain
from rowingdata import rowingdata as rrdata
-from rowingdata import TCXParser,RowProParser,ErgDataParser,TCXParserNoHR
-from rowingdata import MysteryParser
-from rowingdata import painsledDesktopParser,speedcoachParser,ErgStickParser
-from rowingdata import SpeedCoach2Parser,FITParser,fitsummarydata
+
from rowingdata import make_cumvalues
from rowingdata import summarydata,get_file_type
From 296d69ed1c7997aeb0ff8d53add47f25ed71967f Mon Sep 17 00:00:00 2001
From: Sander Roosendaal
Date: Sun, 12 Feb 2017 20:40:38 +0100
Subject: [PATCH 4/6] tested processemail management command
---
rowers/mailprocessing.py | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/rowers/mailprocessing.py b/rowers/mailprocessing.py
index 592a8dfe..3d540645 100644
--- a/rowers/mailprocessing.py
+++ b/rowers/mailprocessing.py
@@ -186,8 +186,14 @@ def make_new_workout_from_email(rr,f2,name,cntr=0):
summary = ''
# handle non-Painsled
- f3,summary = dataprep.handle_nonpainsled('media/'+f2,fileformat,summary)
-
+ if fileformat != 'csv':
+ f3,summary = dataprep.handle_nonpainsled('media/'+f2,fileformat,summary)
+ else:
+ f3 = 'media/'+f2
+
+
+
+
# make workout and put in database
#r = rrower(hrmax=rr.max,hrut2=rr.ut2,
# hrut1=rr.ut1,hrat=rr.at,
@@ -196,11 +202,11 @@ def make_new_workout_from_email(rr,f2,name,cntr=0):
if row == 0:
return 0
+
# change filename
if f2[:5] != 'media':
timestr = time.strftime("%Y%m%d-%H%M%S")
f2 = 'media/'+timestr+str(cntr)+'o.csv'
-
row.write_csv(f2,gzip=True)
dosummary = (fileformat != 'fit')
From 99a44271a5a0287084f85e29d9c3463824b984c6 Mon Sep 17 00:00:00 2001
From: Sander Roosendaal
Date: Sun, 12 Feb 2017 21:08:38 +0100
Subject: [PATCH 5/6] adding team to workout upon creation
---
rowers/dataprep.py | 9 +++++++++
rowers/models.py | 9 +++++++--
2 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/rowers/dataprep.py b/rowers/dataprep.py
index 43af9582..0bf6098e 100644
--- a/rowers/dataprep.py
+++ b/rowers/dataprep.py
@@ -25,6 +25,8 @@ from rowingdata import (
summarydata,get_file_type,
)
+from rowers.models import Team
+
import os
import pandas as pd
import numpy as np
@@ -320,7 +322,14 @@ def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
maxhr=maxhr,averagehr=averagehr,
startdatetime=workoutstartdatetime)
+
w.save()
+
+ ts = Team.objects.filter(rower=r)
+
+ for t in ts:
+ w.team.add(t)
+
# put stroke data in database
res = dataprep(row.df,id=w.id,bands=True,
barchart=True,otwpower=True,empower=True)
diff --git a/rowers/models.py b/rowers/models.py
index c67f050c..068e875c 100644
--- a/rowers/models.py
+++ b/rowers/models.py
@@ -377,9 +377,14 @@ class Workout(models.Model):
date = self.date
name = self.name
- str = date.strftime('%Y-%m-%d')+'_'+name
+ try:
+ stri = date.strftime('%Y-%m-%d')+'_'+name
+ except AttributeError:
+ stri = str(date)+'_'+name
+
- return str
+
+ return stri
# delete files belonging to workout instance
# related GraphImage objects should be deleted automatically
From 19037a9a81a5be9b202d9c0d48cff0a71fdd1e58 Mon Sep 17 00:00:00 2001
From: Sander Roosendaal
Date: Mon, 13 Feb 2017 16:20:11 +0100
Subject: [PATCH 6/6] repaired celery, c2 error, finish merge
---
rowers/c2stuff.py | 33 +++++++++++++++++++--------------
rowers/celery.py | 4 +++-
rowers/forms.py | 8 ++++++--
rowers/templates/teams.html | 18 ++++++++++++++++++
rowers/views.py | 19 ++++++++++++++++++-
rowsandall_app/settings_dev.py | 4 +++-
6 files changed, 67 insertions(+), 19 deletions(-)
diff --git a/rowers/c2stuff.py b/rowers/c2stuff.py
index 0e27bad9..d455f67a 100644
--- a/rowers/c2stuff.py
+++ b/rowers/c2stuff.py
@@ -447,7 +447,6 @@ def get_username(access_token):
me_json = response.json()
-
return me_json['data']['username']
# Get user id, having access token
@@ -463,9 +462,12 @@ def get_userid(access_token):
me_json = response.json()
-
-
- return me_json['data']['id']
+ try:
+ res = me_json['data']['id']
+ except KeyError:
+ res = 0
+
+ return res
# For debugging purposes
def process_callback(request):
@@ -515,16 +517,19 @@ def workout_c2_upload(user,w):
def rower_c2_token_refresh(user):
r = Rower.objects.get(user=user)
res = do_refresh_token(r.c2refreshtoken)
- access_token = res[0]
- expires_in = res[1]
- refresh_token = res[2]
- expirydatetime = timezone.now()+timedelta(seconds=expires_in)
+ if res[0]:
+ access_token = res[0]
+ expires_in = res[1]
+ refresh_token = res[2]
+ expirydatetime = timezone.now()+timedelta(seconds=expires_in)
- r = Rower.objects.get(user=user)
- r.c2token = access_token
- r.tokenexpirydate = expirydatetime
- r.c2refreshtoken = refresh_token
+ r = Rower.objects.get(user=user)
+ r.c2token = access_token
+ r.tokenexpirydate = expirydatetime
+ r.c2refreshtoken = refresh_token
- r.save()
- return r.c2token
+ r.save()
+ return r.c2token
+ else:
+ return None
diff --git a/rowers/celery.py b/rowers/celery.py
index 7eb1d00e..8e41c855 100644
--- a/rowers/celery.py
+++ b/rowers/celery.py
@@ -8,7 +8,7 @@ from celery import Celery
# on Windows, so I use Celery on my notebook.
# set the default Django settings module for the 'celery' program.
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rowsandall_app.settings')
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rowsandall_app.settings_dev')
from django.conf import settings # noqa
@@ -25,6 +25,8 @@ app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
+database_url = 'sqlite:///db.sqlite3'
+
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
diff --git a/rowers/forms.py b/rowers/forms.py
index 8111b1bb..0644ff5b 100644
--- a/rowers/forms.py
+++ b/rowers/forms.py
@@ -32,10 +32,14 @@ class CNsummaryForm(forms.Form):
class SummaryStringForm(forms.Form):
intervalstring = forms.CharField(max_length=255,label='Workout Description')
+# little window to type a Team invitation code
+class TeamInviteCodeForm(forms.Form):
+ code = forms.CharField(max_length=10,label='Team Code',
+ )
+
# Used for testing the POST API for StrokeData
class StrokeDataForm(forms.Form):
- strokedata = forms.CharField(label='payload',
- widget=forms.Textarea)
+ strokedata = forms.CharField(label='payload',widget=forms.Textarea)
# The form used for uploading files
class DocumentsForm(forms.Form):
diff --git a/rowers/templates/teams.html b/rowers/templates/teams.html
index 5d23245c..cf798b52 100644
--- a/rowers/templates/teams.html
+++ b/rowers/templates/teams.html
@@ -100,7 +100,25 @@
{% endfor %}
+
+ Manual code redeem
+
+
+
+
+
+
{% else %}
diff --git a/rowers/views.py b/rowers/views.py
index 112cd2ce..8beb0f07 100644
--- a/rowers/views.py
+++ b/rowers/views.py
@@ -647,7 +647,7 @@ def c2_open(user):
if res[0] != None:
thetoken = res[0]
else:
- thetoken = r.c2token
+ raise C2NoTokenError("User has no token")
else:
thetoken = r.c2token
@@ -5081,8 +5081,22 @@ def team_leave_view(request,id=0):
response = HttpResponseRedirect(url)
return response
+from rowers.forms import TeamInviteCodeForm
+
@login_required()
def rower_teams_view(request,message='',successmessage=''):
+ if request.method == 'POST':
+ form = TeamInviteCodeForm(request.POST)
+ if form.is_valid():
+ code = form.cleaned_data['code']
+ res,text = teams.process_invite_code(request.user,code)
+ if res:
+ successmessage = text
+ else:
+ message = text
+ else:
+ form = TeamInviteCodeForm()
+
r = Rower.objects.get(user=request.user)
ts = Team.objects.filter(rower=r)
myteams = Team.objects.filter(manager=request.user)
@@ -5093,6 +5107,8 @@ def rower_teams_view(request,message='',successmessage=''):
requests = TeamRequest.objects.filter(user=request.user)
myrequests = TeamRequest.objects.filter(team__in=myteams)
myinvites = TeamInvite.objects.filter(team__in=myteams)
+
+ print form
return render(request, 'teams.html',
{
@@ -5102,6 +5118,7 @@ def rower_teams_view(request,message='',successmessage=''):
'otherteams':otherteams,
'requests':requests,
'myrequests':myrequests,
+ 'form':form,
'message':message,
'successmessage':successmessage,
'myinvites':myinvites,
diff --git a/rowsandall_app/settings_dev.py b/rowsandall_app/settings_dev.py
index 602d958e..8ebb5caf 100644
--- a/rowsandall_app/settings_dev.py
+++ b/rowsandall_app/settings_dev.py
@@ -14,7 +14,9 @@ from settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
- 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),},
+ 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
+ 'HOST': 'localhost'
+ },
# 'TEST': {
# 'CHARSET': 'utf8',
# 'COLLATION': 'utf8_general_ci',