Private
Public Access
1
0

non working version

This commit is contained in:
Sander Roosendaal
2017-03-09 12:38:37 +01:00
parent 5be668ebf0
commit 1dcfbb067c
5 changed files with 604 additions and 11 deletions

View File

@@ -12,7 +12,8 @@ from rowingdata import get_file_type,get_empower_rigging
from pandas import DataFrame,Series
from pytz import timezone as tz,utc
from django.utils import timezone
from time import strftime,strptime,mktime,time,daylight
from django.utils.timezone import get_current_timezone
thetimezone = get_current_timezone()
from rowingdata import (
@@ -80,7 +81,8 @@ columndict = {
'finish':'finish',
'peakforceangle':'peakforceangle',
'wash':'wash',
'slip':'wash',
'slip':'wash',
'workoutstate':' WorkoutState',
}
from scipy.signal import savgol_filter
@@ -621,6 +623,62 @@ def new_workout_from_file(r,f2,
return (id,message,f2)
# Create new workout from data frame and store it in the database
# This routine should be used everywhere in views.py and mailprocessing.py
# Currently there is code duplication
def new_workout_from_df(r,df,
title='New Workout',
parent=None):
message = None
summary = ''
if parent:
oarlength = parent.oarlength
inboard = parent.inboard
workouttype = parent.workouttype
notes=parent.notes
summary=parent.summary
makeprivate=parent.privacy
startdatetime=parent.startdatetime
else:
oarlength = 2.89
inboard = 0.88
workouttype = 'rower'
notes=''
summary=''
makeprivate=False
startdatetime = timezone.now()
timestr = strftime("%Y%m%d-%H%M%S")
csvfilename ='media/Fusion_'+timestr+'.csv'
df.rename(columns = columndict,inplace=True)
starttimeunix = mktime(startdatetime.utctimetuple())
df[' ElapsedTime (sec)'] = df['TimeStamp (sec)']
df['TimeStamp (sec)'] = df['TimeStamp (sec)']+starttimeunix
row = rrdata(df=df)
row.write_csv(csvfilename,gzip=True)
#res = df.to_csv(csvfilename+'.gz',index_label='index',
# compression='gzip')
id,message = save_workout_database(csvfilename,r,
workouttype=workouttype,
title=title,
notes=notes,
oarlength=oarlength,
inboard=inboard,
makeprivate=makeprivate,
dosmooth=False)
return (id,message)
# Compare the data from the CSV file and the database
# Currently only calculates number of strokes. To be expanded with
# more elaborate testing if needed
@@ -696,10 +754,10 @@ def repair_data(verbose=False):
# A wrapper around the rowingdata class, with some error catching
def rdata(file,rower=rrower()):
try:
res = rrdata(file,rower=rower)
res = rrdata(csvfile=file,rower=rower)
except IOError,IndexError:
try:
res = rrdata(file+'.gz',rower=rower)
res = rrdata(csvfile=file+'.gz',rower=rower)
except IOError,IndexError:
res = 0
@@ -900,11 +958,21 @@ def smalldataprep(therows,xparam,yparam1,yparam2):
# data fusion
def datafusion(id1,id2,columns,offset):
df1 = getrowdata_db(id=id1)
df1,w1 = getrowdata_db(id=id1)
df1 = df1.drop(['cumdist',
'hr_ut2',
'hr_ut1',
'hr_at',
'hr_tr',
'hr_an',
'hr_max',],
1,errors='ignore')
columns = ['time']+columns
df2 = getsmallrowdata_db(columns,ids=[id2])
df2 = getsmallrowdata_db(columns,ids=[id2],doclean=False)
keep1 = set(df1.columns)
print df1['pace'].mean()/1000.,'mies'
keep1 = {c:c for c in set(df1.columns)}
for c in columns:
keep1.pop(c)
@@ -913,10 +981,14 @@ def datafusion(id1,id2,columns,offset):
df1 = df1.drop(c,1,errors='ignore')
df = pd.concat([df1,df2],ignore_index=True)
df = df.sort_value(['time'])
df.interpolate(method='linear',axis=0,limit_direction='both')
df = df.sort_values(['time'])
df = df.interpolate(method='linear',axis=0,limit_direction='both')
df.fillna(method='bfill',inplace=True)
df['time'] = df['time']/1000.
df['pace'] = df['pace']/1000.
print df['pace'].mean(),'noot'
return df
# This is the main routine.
@@ -933,6 +1005,7 @@ def dataprep(rowdatadf,id=0,bands=True,barchart=True,otwpower=True,
rowdatadf.loc[row_index,' Stroke500mPace (sec/500m)'] = 3000.
p = rowdatadf.ix[:,' Stroke500mPace (sec/500m)']
print p.mean(),'aap'
hr = rowdatadf.ix[:,' HRCur (bpm)']
spm = rowdatadf.ix[:,' Cadence (stokes/min)']
cumdist = rowdatadf.ix[:,'cum_dist']