diff --git a/rowers/c2stuff.py b/rowers/c2stuff.py
index 0e27bad9..d455f67a 100644
--- a/rowers/c2stuff.py
+++ b/rowers/c2stuff.py
@@ -447,7 +447,6 @@ def get_username(access_token):
me_json = response.json()
-
return me_json['data']['username']
# Get user id, having access token
@@ -463,9 +462,12 @@ def get_userid(access_token):
me_json = response.json()
-
-
- return me_json['data']['id']
+ try:
+ res = me_json['data']['id']
+ except KeyError:
+ res = 0
+
+ return res
# For debugging purposes
def process_callback(request):
@@ -515,16 +517,19 @@ def workout_c2_upload(user,w):
def rower_c2_token_refresh(user):
r = Rower.objects.get(user=user)
res = do_refresh_token(r.c2refreshtoken)
- access_token = res[0]
- expires_in = res[1]
- refresh_token = res[2]
- expirydatetime = timezone.now()+timedelta(seconds=expires_in)
+ if res[0]:
+ access_token = res[0]
+ expires_in = res[1]
+ refresh_token = res[2]
+ expirydatetime = timezone.now()+timedelta(seconds=expires_in)
- r = Rower.objects.get(user=user)
- r.c2token = access_token
- r.tokenexpirydate = expirydatetime
- r.c2refreshtoken = refresh_token
+ r = Rower.objects.get(user=user)
+ r.c2token = access_token
+ r.tokenexpirydate = expirydatetime
+ r.c2refreshtoken = refresh_token
- r.save()
- return r.c2token
+ r.save()
+ return r.c2token
+ else:
+ return None
diff --git a/rowers/celery.py b/rowers/celery.py
index 7eb1d00e..8e41c855 100644
--- a/rowers/celery.py
+++ b/rowers/celery.py
@@ -8,7 +8,7 @@ from celery import Celery
# on Windows, so I use Celery on my notebook.
# set the default Django settings module for the 'celery' program.
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rowsandall_app.settings')
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rowsandall_app.settings_dev')
from django.conf import settings # noqa
@@ -25,6 +25,8 @@ app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
+database_url = 'sqlite:///db.sqlite3'
+
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
diff --git a/rowers/dataprep.py b/rowers/dataprep.py
index 12744d7f..0bf6098e 100644
--- a/rowers/dataprep.py
+++ b/rowers/dataprep.py
@@ -15,11 +15,25 @@ from pytz import timezone as tz,utc
from django.utils.timezone import get_current_timezone
thetimezone = get_current_timezone()
+from rowingdata import (
+ TCXParser,RowProParser,ErgDataParser,TCXParserNoHR,
+ BoatCoachParser,RowPerfectParser,BoatCoachAdvancedParser,
+ MysteryParser,
+ painsledDesktopParser,speedcoachParser,ErgStickParser,
+ SpeedCoach2Parser,FITParser,fitsummarydata,
+ make_cumvalues,
+ summarydata,get_file_type,
+ )
+from rowers.models import Team
+
+import os
import pandas as pd
import numpy as np
import itertools
+from tasks import handle_sendemail_unrecognized
+
from django.conf import settings
from sqlalchemy import create_engine
import sqlalchemy as sa
@@ -205,115 +219,55 @@ def timedeltaconv(x):
return dt
-# Create new workout from file and store it in the database
-# This routine should be used everywhere in views.py and mailprocessing.py
-# Currently there is code duplication
-def new_workout_from_file(r,f2,
- workouttype='rower',
- title='Workout',
- notes=''):
-
- fileformat = get_file_type(f2)
- summary = ''
- # handle non-Painsled
- if (fileformat != 'csv'):
- # handle RowPro:
- if (fileformat == 'rp'):
- row = RowProParser(f2)
- # handle TCX
- if (fileformat == 'tcx'):
- row = TCXParser(f2)
-
- # handle Mystery
- if (fileformat == 'mystery'):
- row = MysteryParser(f2)
-
- # handle TCX no HR
- if (fileformat == 'tcxnohr'):
- row = TCXParserNoHR(f2)
-
- # handle ErgData
- if (fileformat == 'ergdata'):
- row = ErgDataParser(f2)
-
- # handle BoatCoach
- if (fileformat == 'boatcoach'):
- row = BoatCoachParser(f2)
-
- # handle painsled desktop
- if (fileformat == 'painsleddesktop'):
- row = painsledDesktopParser(f2)
-
- # handle speed coach GPS
- if (fileformat == 'speedcoach'):
- row = speedcoachParser(f2)
-
- # handle speed coach GPS 2
- if (fileformat == 'speedcoach2'):
- row = SpeedCoach2Parser(f2)
- summary = row.allstats()
-
-
- # handle ErgStick
- if (fileformat == 'ergstick'):
- row = ErgStickParser(f2)
-
- # handle FIT
- if (fileformat == 'fit'):
- row = FITParser(f2)
- s = fitsummarydata(f2)
- s.setsummary()
- summary = s.summarytext
-
-
- f_to_be_deleted = f2
- # should delete file
- f2 = f2[:-4]+'o.csv'
- row.write_csv(f2,gzip=True)
-
- #os.remove(f2)
- try:
- os.remove(f_to_be_deleted)
- except:
- os.remove(f_to_be_deleted+'.gz')
+# Processes painsled CSV file to database
+def save_workout_database(f2,r,dosmooth=True,workouttype='rower',
+ dosummary=True,title='Workout',
+ notes='',totaldist=0,totaltime=0):
+ message = None
+ powerperc = 100*np.array([r.pw_ut2,
+ r.pw_ut1,
+ r.pw_at,
+ r.pw_tr,r.pw_an])/r.ftp
# make workout and put in database
rr = rrower(hrmax=r.max,hrut2=r.ut2,
hrut1=r.ut1,hrat=r.at,
- hrtr=r.tr,hran=r.an,ftp=r.ftp)
+ hrtr=r.tr,hran=r.an,ftp=r.ftp,
+ powerperc=powerperc,powerzones=r.powerzones)
row = rdata(f2,rower=rr)
if row == 0:
- return HttpResponse("Error: CSV Data File Not Found")
+ return (0,'Error: CSV data file not found')
- # auto smoothing
- pace = row.df[' Stroke500mPace (sec/500m)'].values
- velo = 500./pace
+ if dosmooth:
+ # auto smoothing
+ pace = row.df[' Stroke500mPace (sec/500m)'].values
+ velo = 500./pace
- f = row.df['TimeStamp (sec)'].diff().mean()
- windowsize = 2*(int(10./(f)))+1
- if not 'originalvelo' in row.df:
- row.df['originalvelo'] = velo
+ f = row.df['TimeStamp (sec)'].diff().mean()
+ windowsize = 2*(int(10./(f)))+1
+ if not 'originalvelo' in row.df:
+ row.df['originalvelo'] = velo
- if windowsize > 3 and windowsize 3 and windowsize23:
+ message = 'Warning: The workout duration was longer than 23 hours'
+ hours = 23
+
minutes = int((totaltime - 3600.*hours)/60.)
seconds = int(totaltime - 3600.*hours - 60.*minutes)
tenths = int(10*(totaltime - 3600.*hours - 60.*minutes - seconds))
duration = "%s:%s:%s.%s" % (hours,minutes,seconds,tenths)
+ if dosummary:
+ summary = row.summary()
+ summary += '\n'
+ summary += row.intervalstats()
+
workoutdate = row.rowdatetime.strftime('%Y-%m-%d')
workoutstarttime = row.rowdatetime.strftime('%H:%M:%S')
workoutstartdatetime = thetimezone.localize(row.rowdatetime).astimezone(utc)
@@ -360,11 +322,152 @@ def new_workout_from_file(r,f2,
maxhr=maxhr,averagehr=averagehr,
startdatetime=workoutstartdatetime)
- w.save()
- # put stroke data in database
- res = dataprep(row.df,id=w.id,bands=True,barchart=True,otwpower=True,empower=True)
- return True
+ w.save()
+
+ ts = Team.objects.filter(rower=r)
+
+ for t in ts:
+ w.team.add(t)
+
+ # put stroke data in database
+ res = dataprep(row.df,id=w.id,bands=True,
+ barchart=True,otwpower=True,empower=True)
+
+ return (w.id,message)
+
+def handle_nonpainsled(f2,fileformat,summary=''):
+ # handle RowPro:
+ if (fileformat == 'rp'):
+ row = RowProParser(f2)
+ # handle TCX
+ if (fileformat == 'tcx'):
+ row = TCXParser(f2)
+
+ # handle Mystery
+ if (fileformat == 'mystery'):
+ row = MysteryParser(f2)
+
+ # handle TCX no HR
+ if (fileformat == 'tcxnohr'):
+ row = TCXParserNoHR(f2)
+
+ # handle RowPerfect
+ if (fileformat == 'rowperfect3'):
+ row = RowPerfectParser(f2)
+
+ # handle ErgData
+ if (fileformat == 'ergdata'):
+ row = ErgDataParser(f2)
+
+ # handle Mike
+ if (fileformat == 'bcmike'):
+ row = BoatCoachAdvancedParser(f2)
+
+ # handle BoatCoach
+ if (fileformat == 'boatcoach'):
+ row = BoatCoachParser(f2)
+
+ # handle painsled desktop
+ if (fileformat == 'painsleddesktop'):
+ row = painsledDesktopParser(f2)
+
+ # handle speed coach GPS
+ if (fileformat == 'speedcoach'):
+ row = speedcoachParser(f2)
+
+ # handle speed coach GPS 2
+ if (fileformat == 'speedcoach2'):
+ row = SpeedCoach2Parser(f2)
+ try:
+ summary = row.allstats()
+ except:
+ pass
+
+
+ # handle ErgStick
+ if (fileformat == 'ergstick'):
+ row = ErgStickParser(f2)
+
+ # handle FIT
+ if (fileformat == 'fit'):
+ row = FITParser(f2)
+ s = fitsummarydata(f2)
+ s.setsummary()
+ summary = s.summarytext
+
+
+ f_to_be_deleted = f2
+ # should delete file
+ f2 = f2[:-4]+'o.csv'
+ row.write_csv(f2,gzip=True)
+
+ #os.remove(f2)
+ try:
+ os.remove(f_to_be_deleted)
+ except:
+ os.remove(f_to_be_deleted+'.gz')
+
+ return (f2,summary)
+
+# Create new workout from file and store it in the database
+# This routine should be used everywhere in views.py and mailprocessing.py
+# Currently there is code duplication
+def new_workout_from_file(r,f2,
+ workouttype='rower',
+ title='Workout',
+ notes=''):
+ message = None
+ fileformat = get_file_type(f2)
+ summary = ''
+ if len(fileformat)==3 and fileformat[0]=='zip':
+ f_to_be_deleted = f2
+ with zipfile.ZipFile(f2) as z:
+ # for now, we're getting only the first file
+ # from the NK zip file (issue #69 on bitbucket)
+ f2 = z.extract(z.namelist()[0],path='media/')
+ fileformat = fileformat[2]
+ os.remove(f_to_be_deleted)
+
+ # Some people try to upload Concept2 logbook summaries
+ if fileformat == 'c2log':
+ os.remove(f2)
+ message = "This C2 logbook summary does not contain stroke data. Please download the Export Stroke Data file from the workout details on the C2 logbook."
+ return (0,message)
+
+ # Some people try to upload RowPro summary logs
+ if fileformat == 'rowprolog':
+ os.remove(f2)
+ message = "This RowPro logbook summary does not contain stroke data. Please use the Stroke Data CSV file for the individual workout in your log."
+ return (0,message)
+
+ # Sometimes people try an unsupported file type.
+ # Send an email to info@rowsandall.com with the file attached
+ # for me to check if it is a bug, or a new file type
+ # worth supporting
+ if fileformat == 'unknown':
+ message = "We couldn't recognize the file type"
+ if settings.DEBUG:
+ res = handle_sendemail_unrecognized.delay(f2,
+ request.user.email)
+
+ else:
+ res = queuehigh.enqueue(handle_sendemail_unrecognized,
+ f2,request.user.email)
+ return (0,'message')
+
+ # handle non-Painsled by converting it to painsled compatible CSV
+ if (fileformat != 'csv'):
+ f2,summary = handle_nonpainsled(f2,fileformat,summary=summary)
+
+
+ dosummary = (fileformat != 'fit')
+ id,message = save_workout_database(f2,r,
+ workouttype=workouttype,
+ dosummary=dosummary,
+ title=title)
+
+ return (id,message)
# Compare the data from the CSV file and the database
# Currently only calculates number of strokes. To be expanded with
diff --git a/rowers/forms.py b/rowers/forms.py
index 8111b1bb..0644ff5b 100644
--- a/rowers/forms.py
+++ b/rowers/forms.py
@@ -32,10 +32,14 @@ class CNsummaryForm(forms.Form):
class SummaryStringForm(forms.Form):
intervalstring = forms.CharField(max_length=255,label='Workout Description')
+# little window to type a Team invitation code
+class TeamInviteCodeForm(forms.Form):
+ code = forms.CharField(max_length=10,label='Team Code',
+ )
+
# Used for testing the POST API for StrokeData
class StrokeDataForm(forms.Form):
- strokedata = forms.CharField(label='payload',
- widget=forms.Textarea)
+ strokedata = forms.CharField(label='payload',widget=forms.Textarea)
# The form used for uploading files
class DocumentsForm(forms.Form):
diff --git a/rowers/mailprocessing.py b/rowers/mailprocessing.py
index 19338906..3d540645 100644
--- a/rowers/mailprocessing.py
+++ b/rowers/mailprocessing.py
@@ -159,15 +159,19 @@ def processattachments_debug():
# Need to move the code to a subroutine used both in views.py and here
def make_new_workout_from_email(rr,f2,name,cntr=0):
workouttype = 'rower'
- f2 = f2.name
- fileformat = get_file_type('media/'+f2)
+
+ try:
+ f2 = f2.name
+ fileformat = get_file_type('media/'+f2)
+ except IOError:
+ f2 = f2.name+'.gz'
+ fileformat = get_file_type('media/'+f2)
if len(fileformat)==3 and fileformat[0]=='zip':
f_to_be_deleted = f2
with zipfile.ZipFile('media/'+f2) as z:
f2 = z.extract(z.namelist()[0],path='media/')[6:]
fileformat = fileformat[2]
- print f2
if fileformat == 'unknown':
if settings.DEBUG:
@@ -182,149 +186,42 @@ def make_new_workout_from_email(rr,f2,name,cntr=0):
summary = ''
# handle non-Painsled
- if (fileformat != 'csv'):
- # handle RowPro:
- if (fileformat == 'rp'):
- row = RowProParser('media/'+f2)
+ if fileformat != 'csv':
+ f3,summary = dataprep.handle_nonpainsled('media/'+f2,fileformat,summary)
+ else:
+ f3 = 'media/'+f2
- # handle TCX
- if (fileformat == 'tcx'):
- row = TCXParser('media/'+f2)
- # handle Mystery
- if (fileformat == 'mystery'):
- row = MysteryParser('media/'+f2)
-
- # handle TCX no HR
- if (fileformat == 'tcxnohr'):
- row = TCXParserNoHR('media/'+f2)
-
- # handle ErgData
- if (fileformat == 'ergdata'):
- row = ErgDataParser('media/'+f2)
-
- # handle BoatCoach
- if (fileformat == 'boatcoach'):
- row = BoatCoachParser('media/'+f2)
-
- # handle painsled desktop
- if (fileformat == 'painsleddesktop'):
- row = painsledDesktopParser('media/'+f2)
-
- # handle speed coach GPS
- if (fileformat == 'speedcoach'):
- row = speedcoachParser('media/'+f2)
-
- # handle speed coach GPS 2
- if (fileformat == 'speedcoach2'):
- row = SpeedCoach2Parser('media/'+f2)
-
- # handle ErgStick
- if (fileformat == 'ergstick'):
- row = ErgStickParser('media/'+f2)
-
- # handle FIT
- if (fileformat == 'fit'):
- row = FITParser('media/'+f2)
- s = fitsummarydata('media/'+f2)
- s.setsummary()
- summary = s.summarytext
-
- timestr = time.strftime("%Y%m%d-%H%M%S")
- filename = timestr+str(cntr)+'o.csv'
- row.write_csv('media/'+filename,gzip=True)
- f2 = filename
-
+
+
# make workout and put in database
#r = rrower(hrmax=rr.max,hrut2=rr.ut2,
# hrut1=rr.ut1,hrat=rr.at,
# hrtr=rr.tr,hran=rr.an,ftp=r.ftp)
- row = rdata('media/'+f2) #,rower=r)
+ row = rdata(f3) #,rower=r)
if row == 0:
return 0
+
# change filename
if f2[:5] != 'media':
timestr = time.strftime("%Y%m%d-%H%M%S")
f2 = 'media/'+timestr+str(cntr)+'o.csv'
-
- # auto smoothing
- pace = row.df[' Stroke500mPace (sec/500m)'].values
- velo = 500./pace
-
- f = row.df['TimeStamp (sec)'].diff().mean()
- windowsize = 2*(int(10./(f)))+1
-
- if not 'originalvelo' in row.df:
- row.df['originalvelo'] = velo
-
- if windowsize > 3:
- velo2 = savgol_filter(velo,windowsize,3)
- else:
- velo2 = velo
-
- pace2 = 500./abs(velo2)
- row.df[' Stroke500mPace (sec/500m)'] = pace2
-
- row.df = row.df.fillna(0)
row.write_csv(f2,gzip=True)
+ dosummary = (fileformat != 'fit')
- # recalculate power data
- if workouttype == 'rower' or workouttype == 'dynamic' or workouttype == 'slides':
- try:
- row.erg_recalculatepower()
- # row.spm_fromtimestamps()
- row.write_csv(f2,gzip=True)
- except:
- pass
-
- if fileformat != 'fit':
- summary = row.summary()
- summary += '\n'
- summary += row.intervalstats_painsled()
-
- averagehr = row.df[' HRCur (bpm)'].mean()
- maxhr = row.df[' HRCur (bpm)'].max()
-
- totaldist = row.df['cum_dist'].max()
- totaltime = row.df['TimeStamp (sec)'].max()-row.df['TimeStamp (sec)'].min()
- totaltime = totaltime+row.df.ix[0,' ElapsedTime (sec)']
-
-
- hours = int(totaltime/3600.)
- minutes = int((totaltime - 3600.*hours)/60.)
- seconds = int(totaltime - 3600.*hours - 60.*minutes)
- tenths = int(10*(totaltime - 3600.*hours - 60.*minutes - seconds))
-
-
- duration = "%s:%s:%s.%s" % (hours,minutes,seconds,tenths)
-
- workoutdate = row.rowdatetime.strftime('%Y-%m-%d')
- workoutstarttime = row.rowdatetime.strftime('%H:%M:%S')
-
- notes = 'imported through email'
if name == '':
name = 'imported through email'
- w = Workout(user=rr,name=name,date=workoutdate,
- workouttype=workouttype,
- duration=duration,distance=totaldist,
- weightcategory=rr.weightcategory,
- starttime=workoutstarttime,
- csvfilename=f2,notes=notes,summary=summary,
- maxhr=maxhr,averagehr=averagehr,
- startdatetime=row.rowdatetime)
+ id,message = dataprep.save_workout_database(f2,rr,
+ workouttype=workouttype,
+ dosummary=dosummary,
+ title=name,
+ notes='imported through email')
- w.save()
- # put stroke data in database
- res = dataprep.dataprep(row.df,id=w.id,
- bands=True,barchart=True,
- otwpower=True,empower=True)
-
-
- return w.id
+ return id
diff --git a/rowers/management/commands/processemail.py b/rowers/management/commands/processemail.py
index b92692bb..6aab0eab 100644
--- a/rowers/management/commands/processemail.py
+++ b/rowers/management/commands/processemail.py
@@ -22,10 +22,7 @@ from rowsandall_app.settings import BASE_DIR
from rowingdata import rower as rrower
from rowingdata import main as rmain
from rowingdata import rowingdata as rrdata
-from rowingdata import TCXParser,RowProParser,ErgDataParser,TCXParserNoHR
-from rowingdata import MysteryParser
-from rowingdata import painsledDesktopParser,speedcoachParser,ErgStickParser
-from rowingdata import SpeedCoach2Parser,FITParser,fitsummarydata
+
from rowingdata import make_cumvalues
from rowingdata import summarydata,get_file_type
diff --git a/rowers/models.py b/rowers/models.py
index c67f050c..068e875c 100644
--- a/rowers/models.py
+++ b/rowers/models.py
@@ -377,9 +377,14 @@ class Workout(models.Model):
date = self.date
name = self.name
- str = date.strftime('%Y-%m-%d')+'_'+name
+ try:
+ stri = date.strftime('%Y-%m-%d')+'_'+name
+ except AttributeError:
+ stri = str(date)+'_'+name
+
- return str
+
+ return stri
# delete files belonging to workout instance
# related GraphImage objects should be deleted automatically
diff --git a/rowers/teams.py b/rowers/teams.py
index 4ebc24e1..0e896db5 100644
--- a/rowers/teams.py
+++ b/rowers/teams.py
@@ -31,6 +31,8 @@ from rowers.tasks import (
inviteduration = 14 # days
def update_team(t,name,manager,private,notes):
+ if t.manager != manager:
+ return (0,'You are not the manager of this team')
try:
t.name = name
t.manager = manager
diff --git a/rowers/templates/teams.html b/rowers/templates/teams.html
index 5d23245c..cf798b52 100644
--- a/rowers/templates/teams.html
+++ b/rowers/templates/teams.html
@@ -100,7 +100,25 @@
{% endfor %}
+
+ Manual code redeem
+
+
+
+
+
+
{% else %}
diff --git a/rowers/urls.py b/rowers/urls.py
index 2ee4b855..0d88ebe3 100644
--- a/rowers/urls.py
+++ b/rowers/urls.py
@@ -154,7 +154,7 @@ urlpatterns = [
url(r'^workout/(?P\d+)/export/c/(?P\w+.*)/s/(?P\w+.*)$',views.workout_edit_view),
url(r'^workout/(?P\d+)/edit/c/(?P.+.*)$',views.workout_edit_view),
url(r'^workout/(?P\d+)/edit/s/(?P.+.*)$',views.workout_edit_view),
- url(r'^workout/(\d+)/edit$',views.workout_edit_view),
+ url(r'^workout/(?P\d+)/edit$',views.workout_edit_view),
url(r'^workout/(?P\d+)/advanced/c/(?P.+.*)$',views.workout_advanced_view),
url(r'^workout/(?P\d+)/advanced/s/(?P.+.*)$',views.workout_advanced_view),
url(r'^workout/(?P\d+)/geeky$',views.workout_geeky_view),
diff --git a/rowers/views.py b/rowers/views.py
index 7cee538d..8beb0f07 100644
--- a/rowers/views.py
+++ b/rowers/views.py
@@ -67,13 +67,8 @@ from shutil import copyfile
from rowingdata import rower as rrower
from rowingdata import main as rmain
from rowingdata import rowingdata as rrdata
-from rowingdata import TCXParser,RowProParser,ErgDataParser,TCXParserNoHR
-from rowingdata import BoatCoachParser,RowPerfectParser,BoatCoachAdvancedParser
-from rowingdata import MysteryParser
-from rowingdata import painsledDesktopParser,speedcoachParser,ErgStickParser
-from rowingdata import SpeedCoach2Parser,FITParser,fitsummarydata
from rowingdata import make_cumvalues
-from rowingdata import summarydata,get_file_type
+from rowingdata import summarydata
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
@@ -451,69 +446,27 @@ def add_workout_from_strokedata(user,importid,data,strokedata,
compression='gzip')
- # make workout
- powerperc = 100*np.array([r.pw_ut2,
- r.pw_ut1,
- r.pw_at,
- r.pw_tr,r.pw_an])/r.ftp
-
- rr = rrower(hrmax=r.max,hrut2=r.ut2,
- hrut1=r.ut1,hrat=r.at,
- hrtr=r.tr,hran=r.an,ftp=r.ftp,
- powerperc=powerperc,
- powerzones=r.powerzones,
- )
- row = rdata(csvfilename,rower=rr)
-
- averagehr = row.df[' HRCur (bpm)'].mean()
- maxhr = row.df[' HRCur (bpm)'].max()
- totaldist = row.df['cum_dist'].max()
- totaltime = row.df['TimeStamp (sec)'].max()-row.df['TimeStamp (sec)'].min()
- totaltime = totaltime+row.df.ix[0,' ElapsedTime (sec)']
-
# with Concept2
if source=='c2':
try:
totaldist = data['distance']
totaltime = data['time']/10.
except KeyError:
- pass
+ totaldist = 0
+ totaltime = 0
+ else:
+ totaldist = 0
+ totaltime = 0
+
+ id,message = dataprep.save_workout_database(csvfilename,r,
+ workouttype=workouttype,
+ title=title,notes=comments,
+ totaldist=totaldist,
+ totaltime=totaltime)
+
- hours = int(totaltime/3600.)
- minutes = int((totaltime - 3600.*hours)/60.)
- seconds = int(totaltime - 3600.*hours - 60.*minutes)
- tenths = int(10*(totaltime - 3600.*hours - 60.*minutes - seconds))
-
- duration = "%s:%s:%s.%s" % (hours,minutes,seconds,tenths)
-
-
- summary = row.summary()
- summary += '\n'
- summary += row.intervalstats()
-
- workoutdate = row.rowdatetime.strftime('%Y-%m-%d')
- workoutstarttime = row.rowdatetime.strftime('%H:%M:%S')
-
- # check for duplicate start times
- ws = Workout.objects.filter(starttime=workoutstarttime,
- user=r)
- if (len(ws) != 0):
- warnings.warn("Probably a duplicate workout",UserWarning)
-
- # Create the Workout object
- w = Workout(user=r,name=title,
- date=workoutdate,workouttype=workouttype,
- duration=duration,distance=totaldist,
- weightcategory=r.weightcategory,
- starttime=workoutstarttime,
- csvfilename=csvfilename,notes=comments,
- uploadedtoc2=0,summary=summary,
- averagehr=averagehr,maxhr=maxhr,
- startdatetime=rowdatetime)
- w.save()
-
- return w.id
+ return id,message
# Create workout from SportTracks Data, which are slightly different
# than Strava or Concept2 data
@@ -582,7 +535,8 @@ def add_workout_from_stdata(user,importid,data):
times_location = times_distance
latcoord = np.zeros(len(times_distance))
loncoord = np.zeros(len(times_distance))
-
+ if workouttype == 'water':
+ workouttype = 'rower'
try:
res = splitstdata(data['cadence'])
@@ -668,89 +622,17 @@ def add_workout_from_stdata(user,importid,data):
timestr = strftime("%Y%m%d-%H%M%S")
- # auto smoothing
- pace = df[' Stroke500mPace (sec/500m)'].values
- velo = 500./pace
-
- f = df['TimeStamp (sec)'].diff().mean()
- windowsize = 2*(int(10./(f)))+1
-
- df['originalvelo'] = velo
-
- if windowsize > 3 and windowsize 3 and windowsize