small bug fixes
This commit is contained in:
@@ -166,7 +166,7 @@ def create_c2_stroke_data_db(
|
||||
try:
|
||||
spm = 60.*nr_strokes/totalseconds
|
||||
except ZeroDivisionError:
|
||||
spm = 20*zeros(nr_strokes)
|
||||
spm = 20*np.zeros(nr_strokes)
|
||||
|
||||
step = totalseconds/float(nr_strokes)
|
||||
|
||||
@@ -713,7 +713,7 @@ def getsmallrowdata_db(columns,ids=[],debug=False):
|
||||
columns = [c for c in columns if c != 'None']
|
||||
|
||||
if len(ids)>1:
|
||||
for f in csvfilenames:
|
||||
for id, f in zip(ids,csvfilenames):
|
||||
try:
|
||||
df = pd.read_parquet(f,columns=columns,engine='pyarrow')
|
||||
data.append(df)
|
||||
@@ -721,7 +721,10 @@ def getsmallrowdata_db(columns,ids=[],debug=False):
|
||||
pass
|
||||
|
||||
|
||||
df = pd.concat(data,axis=0)
|
||||
try:
|
||||
df = pd.concat(data,axis=0)
|
||||
except ValueError:
|
||||
df = pd.DataFrame()
|
||||
else:
|
||||
df = pd.read_parquet(csvfilenames[0],columns=columns,engine='pyarrow')
|
||||
|
||||
|
||||
Reference in New Issue
Block a user