337 lines
9.7 KiB
Python
337 lines
9.7 KiB
Python
import pandas as pd
|
|
import numpy as np
|
|
import datetime
|
|
from datetime import timedelta
|
|
from uuid import uuid4
|
|
import traceback
|
|
|
|
from rowsandall_app.settings import UPLOAD_SERVICE_SECRET, UPLOAD_SERVICE_URL
|
|
from rowsandall_app.settings import NK_API_LOCATION
|
|
|
|
from rowers.utils import dologging
|
|
|
|
import requests
|
|
import json
|
|
|
|
|
|
def strfdelta(tdelta):
|
|
try:
|
|
minutes, seconds = divmod(tdelta.seconds, 60)
|
|
tenths = int(tdelta.microseconds / 1e5)
|
|
except AttributeError: # pragma: no cover
|
|
minutes, seconds = divmod(tdelta.view(np.int64), 60e9)
|
|
seconds, rest = divmod(seconds, 1e9)
|
|
tenths = int(rest / 1e8)
|
|
res = "{minutes:0>2}:{seconds:0>2}.{tenths:0>1}".format(
|
|
minutes=minutes,
|
|
seconds=seconds,
|
|
tenths=tenths,
|
|
)
|
|
|
|
return res
|
|
|
|
|
|
def add_workout_from_data(userid, nkid, data, strokedata, source='nk', splitdata=None,
|
|
workoutsource='nklinklogbook'):
|
|
|
|
csvfilename = 'media/{code}_{nkid}.csv.gz'.format(
|
|
nkid=nkid,
|
|
code=uuid4().hex[:16]
|
|
)
|
|
|
|
# dologging('nklog.log',csvfilename)
|
|
|
|
try:
|
|
userid = int(userid)
|
|
except TypeError: # pragma: no cover
|
|
userid = userid.id
|
|
|
|
strokedata.to_csv(csvfilename, index_label='index', compression='gzip')
|
|
|
|
title = data["name"]
|
|
speedInput = data["speedInput"]
|
|
elapsedTime = data["elapsedTime"]
|
|
totalDistanceGps = data["totalDistanceGps"]
|
|
totalDistanceImp = data["totalDistanceImp"]
|
|
# intervals = data["intervals"] # add intervals
|
|
oarlockSessions = data["oarlockSessions"]
|
|
dologging('nklog.log',oarlockSessions)
|
|
# deviceId = data["deviceId"] # you could get the firmware version
|
|
|
|
totalDistance = totalDistanceGps
|
|
useImpeller = False
|
|
if speedInput: # pragma: no cover
|
|
totalDistance = totalDistanceImp
|
|
useImpeller = True
|
|
|
|
summary = get_nk_allstats(data, strokedata)
|
|
|
|
speedInput = data['speedInput'] # 0 = GPS; 1 = Impeller
|
|
|
|
# oarlock inboard, length, boat name
|
|
if oarlockSessions:
|
|
oarlocksession = oarlockSessions[0] # should take seatIndex
|
|
# boatName = oarlocksession["boatName"]
|
|
oarLength = oarlocksession["oarLength"] # cm
|
|
oarInboardLength = oarlocksession["oarInboardLength"] # cm
|
|
try:
|
|
seatNumber = oarlocksession["seatNumber"]
|
|
except KeyError:
|
|
seatNumber = 1
|
|
try:
|
|
oarlockfirmware = oarlocksession["firmwareVersion"]
|
|
except KeyError:
|
|
oarlockfirmware = ''
|
|
else: # pragma: no cover
|
|
# boatName = ''
|
|
oarLength = 289
|
|
oarInboardLength = 88
|
|
seatNumber = 1
|
|
oarlockfirmware = ''
|
|
|
|
workouttype = "water"
|
|
boattype = "1x"
|
|
|
|
uploadoptions = {
|
|
'secret': UPLOAD_SERVICE_SECRET,
|
|
'user': userid,
|
|
'file': csvfilename,
|
|
'title': title,
|
|
'workouttype': workouttype,
|
|
'boattype': boattype,
|
|
'nkid': nkid,
|
|
'inboard': oarInboardLength/100.,
|
|
'oarlength': oarLength/100.,
|
|
'summary': summary,
|
|
'oarlockfirmware': oarlockfirmware,
|
|
'elapsedTime': elapsedTime/1000., # in seconds
|
|
'totalDistance': totalDistance,
|
|
'useImpeller': useImpeller,
|
|
'seatNumber': seatNumber,
|
|
}
|
|
|
|
# dologging('nklog.log',json.dumps(uploadoptions))
|
|
|
|
session = requests.session()
|
|
newHeaders = {'Content-type': 'application/json', 'Accept': 'text/plain'}
|
|
session.headers.update(newHeaders)
|
|
|
|
response = session.post(UPLOAD_SERVICE_URL, json=uploadoptions)
|
|
|
|
if response.status_code != 200: # pragma: no cover
|
|
return 0, response.text
|
|
|
|
try:
|
|
workoutid = response.json()['id']
|
|
except KeyError: # pragma: no cover
|
|
workoutid = 1
|
|
|
|
# dologging('nklog.log','Workout ID {id}'.format(id=workoutid))
|
|
|
|
# evt update workout summary
|
|
|
|
# return
|
|
return workoutid, ""
|
|
|
|
|
|
def get_nk_intervalstats(workoutdata, strokedata):
|
|
intervals = workoutdata['intervals']
|
|
intervals = sorted(intervals, key=lambda k: k['id'])
|
|
separator = "|"
|
|
stri = "Workout Details\n"
|
|
stri += "#-{sep}SDist{sep}-Split-{sep}-SPace-{sep}-Pwr-{sep}-SPM--{sep}-AvgHR-{sep}DPS-\n".format(
|
|
sep=separator)
|
|
|
|
i = 0
|
|
|
|
for interval in intervals:
|
|
# id = interval['id']
|
|
sdist = interval['totalDistanceGps']
|
|
avgpace = interval['avgPaceGps']/1000.
|
|
avgpacetd = timedelta(seconds=avgpace)
|
|
newpacestring = strfdelta(avgpacetd)
|
|
|
|
elapsedSeconds = interval['elapsedTime']/1000. # secs
|
|
elapsedTime = datetime.datetime.utcfromtimestamp(elapsedSeconds)
|
|
|
|
newsplitstring = "%s.%i" % (elapsedTime.strftime(
|
|
"%M:%S"), elapsedTime.microsecond/100000)
|
|
|
|
pwr = interval['avgPower']
|
|
avghr = interval['avgHeartRate']
|
|
spm = interval['avgStrokeRate']
|
|
dps = interval['distStrokeGps']
|
|
|
|
stri += "{i:0>2}{sep}{sdist:0>5}{sep}{split}{sep}{space}{sep} {pwr:0>3} {sep}".format(
|
|
i=i + 1,
|
|
sdist=int(float(sdist)),
|
|
split=newsplitstring,
|
|
space=newpacestring,
|
|
pwr=int(pwr),
|
|
sep=separator,
|
|
)
|
|
stri += " {spm} {sep} {avghr:0>3} {sep}{dps:0>4.1f}\n".format(
|
|
sep=separator,
|
|
avghr=avghr,
|
|
spm=spm,
|
|
dps=dps,
|
|
)
|
|
|
|
i += 1
|
|
|
|
return stri
|
|
|
|
|
|
def get_nk_summary(workoutdata, strokedata):
|
|
|
|
name = workoutdata['name']
|
|
|
|
avgpace = workoutdata['avgPaceGps']/1000. # secs
|
|
avgpacetd = timedelta(seconds=avgpace)
|
|
avgpacestring = strfdelta(avgpacetd)
|
|
|
|
elapsedSeconds = workoutdata['elapsedTime']/1000. # secs
|
|
elapsedTime = datetime.datetime.utcfromtimestamp(elapsedSeconds)
|
|
|
|
timestring = "%s.%i" % (elapsedTime.strftime(
|
|
"%H:%M:%S"), elapsedTime.microsecond/100000)
|
|
|
|
dist = workoutdata['totalDistanceGps']
|
|
spm = workoutdata['avgStrokeRate']
|
|
avghr = workoutdata['avgHeartRate']
|
|
avgdps = workoutdata['distStrokeGps']
|
|
maxhr = strokedata['heartRate'].max()
|
|
pwr = workoutdata['avgPower']
|
|
|
|
if dist is None: # pragma: no cover
|
|
dist = 0
|
|
|
|
if spm is None: # pragma: no cover
|
|
spm = 0
|
|
|
|
if avghr is None: # pragma: no cover
|
|
avghr = 0
|
|
|
|
if avgdps is None: # pragma: no cover
|
|
avgdps = 0
|
|
|
|
if maxhr is None: # pragma: no cover
|
|
maxhr = 0
|
|
|
|
if pwr is None: # pragma: no cover
|
|
pwr = 0
|
|
|
|
sep = "|"
|
|
|
|
stri1 = "Workout Summary - " + name + "\n"
|
|
stri1 += "--{sep}Total{sep}--Total---{sep}--Avg--{sep}-Avg-{sep}-Avg--{sep}-Avg-{sep}-Max-{sep}-Avg\n".format(
|
|
sep=sep)
|
|
stri1 += "--{sep}Dist-{sep}--Time----{sep}-Pace--{sep}-Pwr-{sep}-SPM--{sep}-HR--{sep}-HR--{sep}-DPS\n".format(
|
|
sep=sep)
|
|
|
|
stri1 += "--{sep}{dist:0>5.0f}{sep}".format(
|
|
sep=sep,
|
|
dist=dist,
|
|
)
|
|
|
|
stri1 += timestring + sep + avgpacestring
|
|
|
|
stri1 += "{sep}{avgpower:0>5.1f}".format(
|
|
sep=sep,
|
|
avgpower=pwr,
|
|
)
|
|
|
|
stri1 += "{sep} {avgsr:2.1f} {sep}{avghr:0>5.1f}{sep}".format(
|
|
avgsr=spm,
|
|
sep=sep,
|
|
avghr=avghr
|
|
)
|
|
|
|
stri1 += "{maxhr:0>5.1f}{sep}{avgdps:0>4.1f}\n".format(
|
|
sep=sep,
|
|
maxhr=maxhr,
|
|
avgdps=avgdps
|
|
)
|
|
|
|
return stri1
|
|
|
|
|
|
def get_nk_allstats(data, workoutdata):
|
|
stri = get_nk_summary(data, workoutdata) + \
|
|
get_nk_intervalstats(data, workoutdata)
|
|
return stri
|
|
|
|
# def get_workout(user,nkid):
|
|
|
|
|
|
def getdict(x, seatIndex=1):
|
|
seatStrokes = pd.DataFrame(x)
|
|
try:
|
|
seatStrokes = seatStrokes.set_index('seatIndex')
|
|
return dict(seatStrokes.loc[seatIndex])
|
|
except KeyError:
|
|
pass
|
|
|
|
return {}
|
|
|
|
|
|
def strokeDataToDf(strokeData, seatIndex=1):
|
|
|
|
df = pd.DataFrame.from_dict(strokeData)
|
|
|
|
oarlockData = df['oarlockStrokes']
|
|
|
|
oarlockData = oarlockData.apply(lambda x: getdict(x, seatIndex=seatIndex))
|
|
df2 = pd.DataFrame.from_records(oarlockData.values)
|
|
|
|
# df.set_index('timestamp',inplace=True)
|
|
|
|
if not df2.empty:
|
|
# df2.set_index('timestamp',inplace=True)
|
|
df = df.merge(df2, left_index=True, right_index=True)
|
|
df = df.rename(columns={"timestamp_x": "timestamp"})
|
|
|
|
df = df.drop('oarlockStrokes', axis=1)
|
|
df.sort_values(by='timestamp', ascending=True, inplace=True)
|
|
df.fillna(inplace=True, method='ffill')
|
|
|
|
return df
|
|
|
|
|
|
def readlogs_summaries(logfile, dosave=0): # pragma: no cover
|
|
with open(logfile, 'r') as f:
|
|
while f:
|
|
s = f.readline()
|
|
if s == "":
|
|
break
|
|
if "Importing" in s:
|
|
words = s.split(" ")
|
|
nkid = words[-1][0:6]
|
|
print(nkid, dosave)
|
|
print('')
|
|
line1 = f.readline()
|
|
line2 = f.readline()
|
|
|
|
data1 = line1[line1.find('{'):]
|
|
data2 = line2[line2.find('['):]
|
|
|
|
try:
|
|
strokeData = json.loads(data2)
|
|
summaryData = json.loads(data1)
|
|
df = strokeDataToDf(strokeData)
|
|
|
|
if dosave == 0 or str(nkid) == str(dosave):
|
|
print(get_nk_allstats(summaryData, df))
|
|
# print(summaryData)
|
|
if str(dosave) == str(nkid):
|
|
print('saving')
|
|
filename = 'strokedata_{id}.json'.format(id=nkid)
|
|
filename2 = 'nk_summarydata_{id}.json'.format(id=nkid)
|
|
with open(filename, 'w') as f2:
|
|
json.dump(strokeData, f2)
|
|
with open(filename2, 'w') as f2:
|
|
json.dump(summaryData, f2)
|
|
except Exception:
|
|
print(traceback.format_exc())
|
|
print("error")
|