Skip to content

Commit 20247f3

Browse files
committed
Individual test scripts for each feature
Final version
1 parent d1feb05 commit 20247f3

File tree

5 files changed

+282
-96
lines changed

5 files changed

+282
-96
lines changed

Diff for: test3.py

+2-96
Original file line numberDiff line numberDiff line change
@@ -4,100 +4,6 @@
44
55
@author: Shamir
66
"""
7-
#==============================================================================
8-
#
9-
# for i in range(len(os.listdir(sourcePath))): # we have 6 files corresponding to 6 gestures
10-
# gesture = os.listdir(sourcePath)[i] # Jab, Uppercut, Throw, Jets, Block, Asgard
11-
# dataset = os.listdir(sourcePath + gesture)[0] # Dataset: Train = 0, Cross Validation = 1, Test = 2
12-
# copy = False
13-
# variance_array = []
14-
#
15-
# for k in range(len(os.listdir(sourcePath + gesture + backslash + dataset))):
16-
# sensor = os.listdir(sourcePath + gesture + backslash + dataset)[k] # Sensor15, Sensor16, Sensor17, Sensor18, Sensor19
17-
# sensorFolder = os.listdir(sourcePath + gesture + backslash + dataset + backslash + sensor) # 1.csv ... 4.csv
18-
#
19-
# for l in range(len(sensorFolder)):
20-
# csvfile = sourcePath + gesture + backslash + dataset + backslash + sensor + backslash + sensorFolder[l] # full filepath
21-
# readFile = pandas.read_csv(csvfile, header = None)
22-
# readFile.values[1:] = readFile.values[1:].astype(float)
23-
#
24-
# variance = ['Var_' + sensor[6:] + '_' + readFile.values[0,0]]
25-
# print variance
26-
# variance = np.asarray(variance)
27-
#
28-
# if copy == True:
29-
# for m in range(1, len(readFile.values)): # |||len(readFile.values)|||
30-
# ## need to add code to check if number_of_rows matches
31-
# Var = np.var(readFile.values[m])
32-
# variance = np.vstack((variance, Var))
33-
# variance_array = np.hstack((variance_array, variance))
34-
# else:
35-
# for m in range(1, len(readFile.values)):
36-
# Var = np.var(readFile.values[m])
37-
# variance = np.vstack((variance, Var))
38-
# #covariance_array = np.zeros([len(readFile1.values),1])
39-
# variance_array = variance.copy()
40-
# copy = True
41-
# if i == 0:
42-
# fullFile = DataFrame(variance_array)
43-
# else:
44-
# variance_array = DataFrame(variance_array)
45-
# fullFile = pandas.concat([fullFile, variance_array], join = 'inner')
46-
#==============================================================================
47-
#==============================================================================
48-
#
49-
#
50-
# for i in range(len(os.listdir(sourcePath))): # we have 6 files corresponding to 6 gestures
51-
# gesture = os.listdir(sourcePath)[i] # Jab, Uppercut, Throw, Jets, Block, Asgard
52-
# dataset = os.listdir(sourcePath + gesture)[0] # Train, Cross Validation, Test
53-
# copy = False
54-
# velocity_array = []
55-
#
56-
# for k in range(len(os.listdir(sourcePath + gesture + backslash + dataset))):
57-
# sensor = os.listdir(sourcePath + gesture + backslash + dataset)[k]
58-
# sensorFolder = os.listdir(sourcePath + gesture + backslash + dataset + backslash + sensor)
59-
#
60-
# for l in range(len(sensorFolder)):
61-
# csvfile = sourcePath + gesture + backslash + dataset + backslash + sensor + backslash + sensorFolder[l] # full filepath
62-
# readFile = pandas.read_csv(csvfile, header = None)
63-
# readFile.values[1:] = readFile.values[1:].astype(float)
64-
#
65-
# velocity = ['Vel_' + sensor[6:] + '_' + readFile.values[0,0]]
66-
# print velocity
67-
# velocity = np.asarray(velocity)
68-
# distance = 0
69-
# time = np.shape(readFile.values)[1] / frequency_quat
70-
#
71-
# if copy == True:
72-
# for m in range(1, len(readFile.values)): # for every two files
73-
# for n in range(np.shape(readFile.values)[1] - 1):
74-
# ## need to add code to check if number_of_rows matches
75-
# next_index = n + 1
76-
# distance += euclidean(readFile.values[m, n], readFile.values[m, next_index])
77-
# vel = distance/time
78-
# velocity = np.vstack((velocity, vel))
79-
# velocity_array = np.hstack((velocity_array, velocity))
80-
# else:
81-
# for m in range(1, len(readFile.values)): # len(readFile.values)
82-
# for n in range(np.shape(readFile.values)[1] - 1):
83-
# next_index = n + 1
84-
# distance += euclidean(readFile.values[m, n], readFile.values[m, next_index])
85-
# vel = distance/time
86-
# velocity = np.vstack((velocity, vel))
87-
# velocity_array = velocity.copy()
88-
# copy = True
89-
# # Create complete file structure/dataframe
90-
# if i == 0:
91-
# fullFile3 = DataFrame(velocity_array)
92-
# else:
93-
# velocity_array = DataFrame(velocity_array)
94-
#==============================================================================
95-
#==============================================================================
96-
# fullFile3 = pandas.concat([fullFile3, velocity_array], join = 'inner')
97-
#
98-
#
99-
#==============================================================================
100-
#==============================================================================
1017

1028
def CalculateValidData():
1039
# Calculate the number of missing values in the array
@@ -157,7 +63,7 @@ def CalculateValidData():
15763
#print '1st catch (copy = True) at file, m, n = ', csvfile[-6:], m, n
15864
break
15965

160-
valid_data = CalculateValidData() - 60 # Exclude missing values (we exclude 6 more values to remain within a safer margin)
66+
valid_data = CalculateValidData() # Exclude missing values (we exclude 6 more values to remain within a safer margin)
16167
time = valid_data / frequency_euc
16268

16369
precessionVelocity = precession/time
@@ -218,7 +124,7 @@ def CalculateValidData():
218124
#print '1st catch (copy = False) at print file, m, n = ', csvfile[-6:], m, n
219125
continue
220126

221-
valid_data = CalculateValidData() - 60
127+
valid_data = CalculateValidData()
222128
time = valid_data / frequency_euc
223129

224130
precessionVelocity = precession/time

Diff for: test_covariance.py

+82
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Thu Oct 01 21:08:13 2015
4+
5+
@author: Shamir
6+
"""
7+
8+
# Calculate the number of missing values in the array
9+
def CalculateValidData(currentFile, currentRow): # currentFile = readFile, currentRow = m
10+
number_of_nan = len(currentFile.values[currentRow][pandas.isnull(currentFile.values[currentRow])])
11+
length_of_array = len(currentFile.values[currentRow])
12+
valid_datapoints = length_of_array - number_of_nan
13+
return valid_datapoints
14+
15+
sensor_combos = np.asarray(list(combinations(range(15,20), 2)))
16+
17+
for i in range(len(os.listdir(sourcePath))): # we have 6 files corresponding to 6 gestures
18+
gesture = os.listdir(sourcePath)[i] # Jab, Uppercut, Throw, Jets, Block, Asgard
19+
copy = False
20+
covariance_array = []
21+
22+
for k in range(len(sensor_combos)): # we have 10 combinations
23+
## this section can be optimized for greater computational efficiency
24+
sensorFolder1 = 'Sensor' + str(sensor_combos[k,0])
25+
sensorFolder2 = 'Sensor' + str(sensor_combos[k,1])
26+
sensor1 = os.listdir(sourcePath + gesture + backslash + sensorFolder1) # desired csv files in the folder
27+
sensor2 = os.listdir(sourcePath + gesture + backslash + sensorFolder2)
28+
sensor1 = natsorted(sensor1)
29+
sensor2 = natsorted(sensor2)
30+
31+
for l in range(len(sensor1)):
32+
csvfile1 = sourcePath + gesture + backslash + sensorFolder1 + backslash + sensor1[l] # full filepath
33+
csvfile2 = sourcePath + gesture + backslash + sensorFolder2 + backslash + sensor2[l]
34+
readFile1 = pandas.read_csv(csvfile1, header = None)
35+
readFile2 = pandas.read_csv(csvfile2, header = None)
36+
37+
readFile1.values[1:] = readFile1.values[1:].astype(float)
38+
readFile2.values[1:] = readFile2.values[1:].astype(float)
39+
40+
number_of_rows = len(readFile1.values)
41+
covariance = ['Cov_' + sensorFolder1[6:] + '_' + sensorFolder2[6:] + '_' + readFile1.values[0,0]]
42+
print covariance, csvfile1[-7:], csvfile2[-7:]
43+
covariance = np.asarray(covariance)
44+
45+
if copy == True:
46+
for m in range(1, number_of_rows): # for every two files; len(readFile1.values)
47+
## need to add code to check if number_of_rows matches
48+
valid_data1 = CalculateValidData(readFile1, m) # exclude missing values
49+
valid_data2 = CalculateValidData(readFile2, m)
50+
51+
# consider the shorter length for both the arrays to avoid dimension error
52+
if valid_data1 > valid_data2:
53+
cov = np.cov(readFile1.values[m, 0:valid_data2], readFile2.values[m, 0:valid_data2], bias = 1)[0,1]
54+
covariance = np.vstack((covariance, cov))
55+
else:
56+
cov = np.cov(readFile1.values[m, 0:valid_data1], readFile2.values[m, 0:valid_data1], bias = 1)[0,1]
57+
covariance = np.vstack((covariance, cov))
58+
59+
covariance_array = np.hstack((covariance_array, covariance))
60+
61+
else:
62+
for m in range(1, number_of_rows):
63+
valid_data1 = CalculateValidData(readFile1, m)
64+
valid_data2 = CalculateValidData(readFile2, m)
65+
66+
# consider the shorter length for both the arrays to avoid dimension error
67+
if valid_data1 > valid_data2:
68+
cov = np.cov(readFile1.values[m, 0:valid_data2], readFile2.values[m, 0:valid_data2], bias = 1)[0,1]
69+
covariance = np.vstack((covariance, cov))
70+
else:
71+
cov = np.cov(readFile1.values[m, 0:valid_data1], readFile2.values[m, 0:valid_data1], bias = 1)[0,1]
72+
covariance = np.vstack((covariance, cov))
73+
74+
covariance_array = covariance.copy()
75+
copy = True
76+
77+
# Create complete file structure/dataframe
78+
if i == 0:
79+
fullFile5 = DataFrame(covariance_array)
80+
else:
81+
covariance_array = DataFrame(covariance_array)
82+
fullFile5 = pandas.concat([fullFile5, covariance_array], join = 'inner')

Diff for: test_range.py

+62
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Thu Oct 01 19:43:07 2015
4+
5+
@author: Shamir
6+
"""
7+
8+
# -*- coding: utf-8 -*-
9+
"""
10+
Created on Thu Oct 01 19:26:11 2015
11+
12+
@author: Shamir
13+
"""
14+
15+
# Calculate the number of missing values in the array
16+
def CalculateValidData(currentFile, currentRow): # currentFile = readFile, currentRow = m
17+
number_of_nan = len(currentFile.values[currentRow][pandas.isnull(currentFile.values[currentRow])])
18+
length_of_array = len(currentFile.values[currentRow])
19+
valid_datapoints = length_of_array - number_of_nan
20+
return valid_datapoints
21+
22+
for i in range(len(os.listdir(sourcePath))): # we have 6 files corresponding to 6 gestures
23+
gesture = os.listdir(sourcePath)[i] # Jab, Uppercut, Throw, Jets, Block, Asgard
24+
copy = False
25+
range_array = []
26+
27+
for k in range(len(os.listdir(sourcePath + gesture))):
28+
sensor = os.listdir(sourcePath + gesture)[k]
29+
sensorFolder = os.listdir(sourcePath + gesture + backslash + sensor)
30+
sensorFolder = natsorted(sensorFolder)
31+
32+
for l in range(len(sensorFolder)):
33+
csvfile = sourcePath + gesture + backslash + sensor + backslash + sensorFolder[l] # full filepath
34+
readFile = pandas.read_csv(csvfile, header = None)
35+
readFile.values[1:] = readFile.values[1:].astype(float)
36+
37+
number_of_rows = len(readFile.values)
38+
range_header = ['Range_' + sensor[6:] + '_' + readFile.values[0,0]]
39+
print range_header, csvfile[-7:]
40+
range_header = np.asarray(range_header)
41+
42+
if copy == True:
43+
for m in range(1, number_of_rows): # for every two files
44+
## need to add code to check if number_of_rows matches
45+
valid_data = CalculateValidData(readFile, m)
46+
Range = np.ptp(readFile.values[m, 0:valid_data])
47+
range_header = np.vstack((range_header, Range))
48+
range_array = np.hstack((range_array, range_header))
49+
else:
50+
for m in range(1, number_of_rows):
51+
valid_data = CalculateValidData(readFile, m)
52+
Range = np.ptp(readFile.values[m, 0:valid_data])
53+
range_header = np.vstack((range_header, Range))
54+
#covariance_array = np.zeros([len(readFile1.values),1])
55+
range_array = range_header.copy()
56+
copy = True
57+
# Create complete file structure/dataframe
58+
if i == 0:
59+
fullFile2 = DataFrame(range_array)
60+
else:
61+
range_array = DataFrame(range_array)
62+
fullFile2 = pandas.concat([fullFile2, range_array], join = 'inner')

Diff for: test_variance.py

+55
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Created on Thu Oct 01 20:54:02 2015
4+
5+
@author: Shamir
6+
"""
7+
8+
# Calculate the number of missing values in the array
9+
def CalculateValidData(currentFile, currentRow): # currentFile = readFile, currentRow = m
10+
number_of_nan = len(currentFile.values[currentRow][pandas.isnull(currentFile.values[currentRow])])
11+
length_of_array = len(currentFile.values[currentRow])
12+
valid_datapoints = length_of_array - number_of_nan
13+
return valid_datapoints
14+
15+
for i in range(len(os.listdir(sourcePath))): # we have 6 files corresponding to 6 gestures
16+
gesture = os.listdir(sourcePath)[i] # Jab, Uppercut, Throw, Jets, Block, Asgard
17+
copy = False
18+
variance_array = []
19+
20+
for k in range(len(os.listdir(sourcePath + gesture))):
21+
sensor = os.listdir(sourcePath + gesture)[k] # Sensor15, Sensor16, Sensor17, Sensor18, Sensor19
22+
sensorFolder = os.listdir(sourcePath + gesture + backslash + sensor) # 1.csv ... 4.csv
23+
sensorFolder = natsorted(sensorFolder)
24+
25+
for l in range(len(sensorFolder)):
26+
csvfile = sourcePath + gesture + backslash + sensor + backslash + sensorFolder[l] # full filepath
27+
readFile = pandas.read_csv(csvfile, header = None)
28+
readFile.values[1:] = readFile.values[1:].astype(float)
29+
30+
number_of_rows = len(readFile.values)
31+
variance = ['Var_' + sensor[6:] + '_' + readFile.values[0,0]]
32+
print variance, csvfile[-7:]
33+
variance = np.asarray(variance)
34+
35+
if copy == True:
36+
for m in range(1, number_of_rows): # |||len(readFile.values)|||
37+
## need to add code to check if number_of_rows matches
38+
valid_data = CalculateValidData(readFile, m) # exclude missing values
39+
Var = np.var(readFile.values[m, 0:valid_data])
40+
variance = np.vstack((variance, Var))
41+
variance_array = np.hstack((variance_array, variance))
42+
else:
43+
for m in range(1, number_of_rows):
44+
valid_data = CalculateValidData(readFile, m)
45+
Var = np.var(readFile.values[m, 0:valid_data])
46+
variance = np.vstack((variance, Var))
47+
#covariance_array = np.zeros([len(readFile1.values),1])
48+
variance_array = variance.copy()
49+
copy = True
50+
# Create complete file structure/dataframe
51+
if i == 0:
52+
fullFile1 = DataFrame(variance_array)
53+
else:
54+
variance_array = DataFrame(variance_array)
55+
fullFile1 = pandas.concat([fullFile1, variance_array], join = 'inner')

0 commit comments

Comments
 (0)