Private
Public Access
1
0

updated dataprepnodjango

This commit is contained in:
Sander Roosendaal
2019-10-23 20:04:28 +02:00
parent fa373d781f
commit c0e8e448e3
3 changed files with 172 additions and 146 deletions

View File

@@ -28,6 +28,35 @@ from rowsandall_app.settings_dev import use_sqlite
from rowers.utils import lbstoN
# dtypes
dtypes = {'workoutid':int,
'hr':int,
'pace':float,
'velo':float,
'spm':float,
'driveenergy':float,
'power':float,
'averageforce':float,
'peakforce':float,
'drivelength':float,
'distance':float,
'cumdist':float,
'drivespeed':float,
'catch':float,
'slip':float,
'finish':float,
'wash':float,
'peakforceangle':float,
'totalangle':float,
'effectiveangle':float,
'rhythm':float,
'efficiency':float,
'distanceperstroke':float,
'ftime':str,
'fpace':str,
'fergpace':str,
'fnowindpace':str,
}
try:
user = DATABASES['default']['USER']
@@ -637,20 +666,11 @@ def new_workout_from_file(r,f2,
return (id,message,f2)
def delete_strokedata(id,debug=False):
if debug:
engine = create_engine(database_url_debug, echo=False)
else:
engine = create_engine(database_url, echo=False)
query = sa.text('DELETE FROM strokedata WHERE workoutid={id};'.format(
id=id,
))
with engine.connect() as conn, conn.begin():
try:
result = conn.execute(query)
except:
print("Database Locked")
conn.close()
engine.dispose()
dirname = 'media/strokedata_{id}.parquet.gz'.format(id=id)
try:
shutil.rmtree(dirname)
except FileNotFoundError:
pass
def update_strokedata(id,df,debug=False):
delete_strokedata(id,debug=debug)
@@ -723,19 +743,18 @@ def getsmallrowdata_db(columns,ids=[],debug=False):
if len(ids)>1:
for f in csvfilenames:
try:
df = dd.read_parquet(f,columns=columns,engine='pyarrow')
df = pd.read_parquet(f,columns=columns,engine='pyarrow')
data.append(df)
except OSError:
pass
df = dd.concat(data,axis=0)
df = pd.concat(data,axis=0)
else:
df = dd.read_parquet(csvfilenames[0],columns=columns,engine='pyarrow')
df = pd.read_parquet(csvfilenames[0],columns=columns,engine='pyarrow')
data = df.compute()
return data
return df
def fitnessmetric_to_sql(m,table='powertimefitnessmetric',debug=False,
doclean=False):
@@ -779,51 +798,42 @@ def read_cols_df_sql(ids,columns,debug=False):
columns = list(columns)+['distance','spm']
columns = [x for x in columns if x != 'None']
columns = list(set(columns))
cls = ''
ids = [int(id) for id in ids]
if debug:
engine = create_engine(database_url_debug, echo=False)
else:
engine = create_engine(database_url, echo=False)
for column in columns:
cls += column+', '
cls = cls[:-2]
if len(ids) == 0:
query = sa.text('SELECT {columns} FROM strokedata WHERE workoutid=0'.format(
columns = cls,
))
return pd.DataFrame()
elif len(ids) == 1:
query = sa.text('SELECT {columns} FROM strokedata WHERE workoutid={id}'.format(
id = ids[0],
columns = cls,
))
try:
filename = 'media/strokedata_{id}.parquet.gz'.format(id=ids[0])
df = pd.read_parquet(filename,columns=columns)
except OSError:
pass
else:
query = sa.text('SELECT {columns} FROM strokedata WHERE workoutid IN {ids}'.format(
columns = cls,
ids = tuple(ids),
))
df = pd.read_sql_query(query,engine)
engine.dispose()
data = []
filenames = ['media/strokedata_{id}.parquet.gz'.format(id=id) for id in ids]
for id,f in zip(ids,filenames):
try:
df = pd.read_parquet(f,columns=columns)
data.append(df)
except OSError:
pass
df = pd.concat(data,axis=0)
return df
def read_df_sql(id,debug=False):
if debug:
engine = create_engine(database_url_debug, echo=False)
print("read_df",id)
print(database_url_debug)
else:
engine = create_engine(database_url, echo=False)
try:
f = 'media/strokedata_{id}.parquet.gz'.format(id=id)
df = pd.read_parquet(f)
except OSError:
pass
df = pd.read_sql_query(sa.text(
'SELECT * FROM strokedata WHERE workoutid={id}'.format(
id=id
)), engine)
df = df.fillna(value=0)
engine.dispose()
return df
def getcpdata_sql(rower_id,table='cpdata',debug=False):
@@ -1266,6 +1276,9 @@ def dataprep(rowdatadf,id=0,bands=True,barchart=True,otwpower=True,
# write data if id given
if id != 0:
data['workoutid'] = id
data.to_parquet(filename,engine='pyarrow',compression='gzip')
data = data.astype(dtype=dtypes)
filename = 'media/strokedata_{id}.parquet.gz'.format(id=id)
df = dd.from_pandas(data,npartitions=1)
df.to_parquet(filename,engine='fastparquet',compression='GZIP')
return data