-
Notifications
You must be signed in to change notification settings - Fork 0
/
1DCNN.py
186 lines (142 loc) · 6.13 KB
/
1DCNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
from Functions import inputstargets
import numpy as np
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import Sequential, optimizers, activations, callbacks
from keras.layers import Dense, Conv1D, Flatten, MaxPooling1D,Dropout, BatchNormalization, Activation
from keras.utils import to_categorical
import time
#define subject datasets to evaluate
subjects = list(range(1,28))
#define lists to store data accumulated across multiple subjects
all_acc = []
all_train_time = []
all_test_times = []
#Function that defines stepwise learning rate for DL models
def scheduler(epoch):
if epoch < 50:
lr =0.001
elif 50 <= epoch <100:
lr =0.0005
elif 100 <= epoch <150:
lr =0.0001
elif epoch >= 150:
lr =0.00001
return lr
#iterate through subjects
for subject in subjects:
#retrieve input sEMG feature data and target labels from CSV files
inputs, targets = inputstargets(subject,"train")
val_inputs, val_targets = inputstargets(subject,"validation")
test_inputs, test_targets = inputstargets(subject,"test")
#reshape data inputs to preferred CNN configuration
inputs = np.array(inputs).transpose(0, 2, 1)
val_inputs = np.array(val_inputs).transpose(0, 2, 1)
test_inputs = np.array(test_inputs).transpose(0, 2, 1)
#one hot encode target data
targets = to_categorical(targets, num_classes = 15)
val_targets = to_categorical(val_targets, num_classes= 15)
test_targets = to_categorical(test_targets, num_classes = 15)
#define optimised 1D CNN architecture
model = Sequential()
#1st convolutional layer
model.add(Conv1D(32, kernel_size=3, strides=1,padding= 'same', use_bias=False,input_shape=(40,10)))
model.add(BatchNormalization())
model.add(Activation(activations.relu))
model.add(MaxPooling1D(pool_size=(2),strides=(1), padding='same'))
#2nd convolutional layer
model.add(Conv1D(64, kernel_size=3, strides=1,padding= 'same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation(activations.relu))
model.add(MaxPooling1D(pool_size=(2),strides=(1), padding='same'))
#3rd convolutional layer
model.add(Conv1D(128, kernel_size=3, strides=1,padding= 'same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation(activations.relu))
model.add(MaxPooling1D(pool_size=(2),strides=(1), padding='same'))
#4th convolutional layer
model.add(Conv1D(64, kernel_size=3, strides=1,padding= 'same', use_bias=False))
model.add(BatchNormalization())
model.add(Activation(activations.relu))
model.add(MaxPooling1D(pool_size=(2),strides=(1), padding='same'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100,activation='linear'))
model.add(Dropout(0.5))
model.add(Dense(100,activation='linear'))
model.add(Dropout(0.5))
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(15, activation='softmax'))
#compile model and define loss function and otpimiser
model.compile(optimizer= optimizers.Adam(learning_rate= 0.001), loss='categorical_crossentropy', metrics=['accuracy'])
callback = callbacks.LearningRateScheduler(scheduler) #implement stepwise learning rate
#start timer
start_time = time.time()
#train and validate the model and define epochs and batch size
history = model.fit(inputs,targets, epochs=200,validation_data=(val_inputs,val_targets), verbose=1, callbacks=[callback], batch_size = 64)
#calculate training time
train_time = (time.time() - start_time)
#test the model
loss, acc = model.evaluate(test_inputs,test_targets, verbose=1)
#store accuracy for specific subject
all_acc.append(acc)
#calculate test time
test_time = (time.time() - train_time -start_time)
#store time values in appropriate lists
all_train_time.append(train_time)
all_test_times.append(test_time)
#plot graph of loss against epoch for validation set
plt.figure(1)
plt.plot(history.history['loss'], label='Training')
plt.plot(history.history['val_loss'], label='Test')
plt.xlabel('Epoch')
plt.ylabel("Model loss")
plt.legend()
plt.show()
#plot graph of accuracy against epoch for validation set
plt.figure(2)
plt.plot(history.history['accuracy'], label='Training')
plt.plot(history.history['val_accuracy'], label='Test')
plt.xlabel('Epoch')
plt.ylabel("Classification accuracy/%")
plt.legend()
plt.show()
#test 1D CNN again using model.predict function so that performance metrics can be generated
#create lists to store actual and predicted classes
# to allow performance metrics to be generated
Actual_Class = []
Predicted_Class = []
for input, target in zip(test_inputs, test_targets):
# predict gesture classes and append to list
prediction = model.predict(np.asarray([input]))
Predicted_Class.append(np.argmax(prediction))
# append actual gesture class of to list
Actual_Class.append(np.argmax(target))
#create and display confusion matrix
cm = confusion_matrix(Actual_Class, Predicted_Class)
display = "0", "1", "2","3","4","5","6","7","8","9","10","11","12","13","14"
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display)
disp.plot(cmap="Blues")
#create classification report
cr = classification_report(Actual_Class,Predicted_Class)
print(cr)
plt.show()
#calculate average classification accuracy
all_acc = np.array(all_acc)
totalaccuracy = np.mean(all_acc)
print("avg:",totalaccuracy,"values:",all_acc)
#calculate average and cumulative train and test times
all_train_time = np.array(all_train_time)
all_test_times = np.array(all_test_times)
avg_train = np.mean(all_train_time)
avg_test = np.mean(all_test_times)
cum_train = np.sum(all_train_time)
cum_test = np.sum(all_test_times)
print("train times", all_train_time)
print("test times", all_test_times)
print(" avg.train times", avg_train)
print("avg. test times", avg_test)
print(" cum.train times", cum_train)
print("cum. test times", cum_test)