import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import numpy as np
import scipy.io as sio
from keras.layers import Conv1D, AveragePooling1D
from keras.layers.normalization import BatchNormalization
from keras.callbacks  import EarlyStopping
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score
from keras.models import Sequential
from keras.layers.core import Activation, Dense, Flatten, Dropout
from keras.utils import np_utils
from keras.optimizers import SGD, Adam, RMSprop
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.calibration import CalibratedClassifierCV
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from keras.callbacks  import EarlyStopping
from sklearn.metrics import confusion_matrix
import pandas as pd
import tensorflow as tf
import seaborn as sns
import matplotlib.pyplot as plt
#os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'

Data

# Path to the pre-processed features
#dataPath = '../data/iir_feat_tensor_full_with_full_labels.mat'
dataPath = '../data/spectral_feat_tensor_full_with_full_labels.mat'

# Path to save features, results, etc.
savePath = '../results/1D_CNN_Results/Spectral/'

# Experiment name
experiment = 'HVLV_Spectral_PD'

filename = savePath+'CNN_1D_results_'+experiment+'.mat'
plot_title = 'HV vs LV Classification for Spectral Features (PD)'


# --- Model parameters ---

nb_filters = [16, 32, 32, 64, 128]  # Number of filters for convolution
kernel_size = 3
pool_size = 2
stride_size = 2
padding = 'same'
weight_decay = 0.000001
dense_layer_neuron_num = 128
epochs = 30
momentum =0.8

# Load data
matContent = sio.loadmat(dataPath)
features = matContent['pd_feat']
labels = np.squeeze(matContent['pd_hvlv_labels'])
#labels[labels < 0] = 0
print(labels)
#labels[labels == 6] = 0
#labels = labels.astype(int)

#df = pd.read_csv(dataPath, header = None)
#features = df.iloc[1:,:-2].to_numpy()
#labels = df.iloc[1:,-1].to_numpy() # last but one column for PD vs NC

#dict_hvlv = {1:0, 2:1, 3:0, 4:0, 5:1, 6:0} #HVLV labels mapping dictionary
#labels = labels.map(dict_hvlv).to_numpy()
#labels[labels == 1] = 0
#labels[labels == 2] = 1
#labels[labels == 3] = 1
#labels[labels == 4] = 1
#labels[labels == 5] = 1
#labels[labels == 6] = 1
#labels[labels < 0]=0
#labels[labels == 6]=0

# randomise the sample sequence
rand_order = np.arange(features.shape[0])
np.random.shuffle(rand_order)
features = features[rand_order,]
labels = np.squeeze(labels[rand_order,])
class_num = np.size(np.unique(labels))
labels_categorical = np_utils.to_categorical(labels, class_num)
del matContent

print('Features :', features.shape)
print('Labels :', labels_categorical.shape)
print('Number of classes :', class_num)
print('Unique labels:',np.unique(labels))

[0 0 0 ... 0 0 0]
Features : (7388, 42)
Labels : (7388, 2)
Number of classes : 2
Unique labels: [0 1]

Model

# Function to create, and compile Keras model
def create_model(init_mode, activation, dropout_rate, optimizer, learn_rate):
#def create_model(activation):
  model = Sequential()
  model.add(Conv1D(filters=nb_filters[0], kernel_size=kernel_size, padding=padding,
                   activation=activation, input_shape=(X.shape[1],X.shape[2]), trainable=True))
  model.add(AveragePooling1D(pool_size=pool_size, strides=stride_size, padding=padding))
  model.add(Conv1D(filters=nb_filters[1], kernel_size=kernel_size, padding=padding,
                   activation=activation, kernel_initializer=init_mode, trainable=True))
  model.add(AveragePooling1D(pool_size=pool_size, strides=stride_size, padding=padding))
  model.add(Conv1D(filters=nb_filters[2], kernel_size=kernel_size, padding=padding,
                   activation=activation, kernel_initializer=init_mode, trainable=True))
  model.add(AveragePooling1D(pool_size=pool_size, strides=stride_size, padding=padding))
  # ####added by me#####
  #model.add(Conv1D(filters=nb_filters[3], kernel_size=kernel_size, padding=padding, activation=activation,
  #              kernel_initializer='he_normal'))
  #model.add(AveragePooling1D(pool_size=pool_size, strides=stride_size, padding=padding))
  #model.add(Conv1D(filters=nb_filters[4], kernel_size=kernel_size, padding=padding, activation=activation,
  #              kernel_initializer='he_normal'))
  #model.add(AveragePooling1D(pool_size=pool_size, strides=stride_size, padding=padding))
  # ####added by me#####
  model.add(Flatten())
  model.add(BatchNormalization(epsilon=0.001))
  model.add(Dense(dense_layer_neuron_num, kernel_initializer=init_mode, activation=activation))
  model.add(Dropout(dropout_rate))
  model.add(Dense(class_num))
  model.add(Activation('softmax'))
  model.summary()
  #model.load_weights('Gender_notClean_HIweights.hdf5')
  #earlyStopping = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto')
  if optimizer == 'SGD':
    opt = SGD(learning_rate=learn_rate / 10 ** epochs, momentum = momentum, decay = weight_decay, nesterov = True)
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
  elif optimizer == 'Adam':
    opt = Adam(learning_rate=learn_rate)
    model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
  elif optimizer == 'RMSprop':
    opt = RMSprop(learning_rate=learn_rate, epsilon=1e-07)
    model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
  return model

Train and Test

mat_path = '../results/CNN_1D/CNN_1D_CSP/CNN_1D_results_HVLV_csp_PD.mat'
params = sio.loadmat(mat_path)
best_params = params['best_params']
del mat_path
print(best_params)

[[(array(['relu'], dtype='<U4'), array([[0.3]]), array(['he_normal'], dtype='<U9'), array([[0.001]]), array(['RMSprop'], dtype='<U7'))]]

# Standardize

feat_shape = features.shape
#features = np.reshape(features, (feat_shape[0], feat_shape[1]*feat_shape[2]))
#scaler = StandardScaler()
#scaler.fit(features)
#scaleFeatures = scaler.transform(features)
features = np.reshape(features, (features.shape[0], 42, -1))

X = features
Y = labels_categorical
tf.keras.backend.clear_session()
estimator = create_model(init_mode='he_normal', learn_rate=0.0001, optimizer='Adam', activation='sigmoid', 
                         dropout_rate=0.3)

Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1d (Conv1D)              (None, 42, 16)            64        
_________________________________________________________________
average_pooling1d (AveragePo (None, 21, 16)            0         
_________________________________________________________________
conv1d_1 (Conv1D)            (None, 21, 32)            1568      
_________________________________________________________________
average_pooling1d_1 (Average (None, 11, 32)            0         
_________________________________________________________________
conv1d_2 (Conv1D)            (None, 11, 32)            3104      
_________________________________________________________________
average_pooling1d_2 (Average (None, 6, 32)             0         
_________________________________________________________________
flatten (Flatten)            (None, 192)               0         
_________________________________________________________________
batch_normalization (BatchNo (None, 192)               768       
_________________________________________________________________
dense (Dense)                (None, 128)               24704     
_________________________________________________________________
dropout (Dropout)            (None, 128)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 2)                 258       
_________________________________________________________________
activation (Activation)      (None, 2)                 0         
=================================================================
Total params: 30,466
Trainable params: 30,082
Non-trainable params: 384
_________________________________________________________________

cnn_model = KerasClassifier(build_fn=create_model, verbose=1)  # Call the classifier

#batch_size = [16,32]
#epochs = [5,10,15]
#optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']
#init_mode = ['uniform', 'lecun_uniform', 'normal', 'zero', 'he_normal', 'he_uniform']
#activation = ['softmax', 'softsign', 'relu', 'tanh', 'sigmoid', 'linear']
#dropout_rate = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
#learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3]
#momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]
#neurons = [1, 5, 10, 15, 20, 25, 30]

learn_rate = [0.0001, 0.001, 0.01]
optimizer = ['SGD', 'Adam','RMSprop']
#momentum = [0.8,0.9]
init_mode = ['he_normal','he_uniform']
activation = ['relu','tanh']
dropout_rate = [0.3,0.4,0.5]
foldNum = 10

p_grid = dict(init_mode=init_mode, dropout_rate=dropout_rate, activation=activation,
              optimizer=optimizer, learn_rate=learn_rate)
              #, momentum=momentum)
grid = GridSearchCV(estimator=cnn_model, param_grid=p_grid,
                    cv=foldNum, verbose=0)
# Standerdize
feat_shape = features.shape
#features = np.reshape(features, (feat_shape[0], feat_shape[1]*feat_shape[2]))
scaler = StandardScaler()
scaler.fit(features)
scaleFeatures = scaler.transform(features)
scaleFeatures = np.reshape(scaleFeatures, (features.shape[0], 640, 14))

# Perform Grid Search
print('Performing Gridsearch')
X = scaleFeatures
Y = labels_categorical
grid_result = grid.fit(X,Y)
best_params = grid_result.best_params_
print('Best parameters:', best_params)
'''
tf.keras.backend.clear_session()
estimator = create_model(init_mode=best_params.get('init_mode'), 
                         learn_rate=best_params.get('learn_rate'), 
                         optimizer=best_params.get('optimizer'), 
                         #momentum=best_params.get('momentum'), 
                         activation=best_params.get('activation'), 
                         dropout_rate=best_params.get('dropout_rate'))
'''

foldNum = 5
conf_mat = np.zeros((2,2))
#conf_mat = np.zeros((6,6))
f = 0
val_loss = []
val_accuracy = []
train_loss = []
train_accuracy = []
f1_MacroNet = np.zeros([foldNum,]) 
f1_weightedNet = np.zeros([foldNum,])
precisionNet = np.zeros([foldNum,])
recallNet = np.zeros([foldNum,])
accNet = np.zeros([foldNum,])

#channels = features.shape[2] # number of channels 14
# Use Stratified K Fold to make sure that the train data and test data split distribution match.
kfold = StratifiedKFold(n_splits=5, random_state=100, shuffle=True)

#features = np.reshape(features, (features.shape[0],640,14)) #reshaped to (None, 3, 14) for spectral

cm_list = []
for train, test in kfold.split(features, labels):
    trainingFeatures = features[train,:,:]
    testFeatures = features[test,:,:]
    train_shape = trainingFeatures.shape
    test_shape = testFeatures.shape
    
    # Standerdize 
    #scaler = StandardScaler()
    #trainingFeatures = np.reshape(trainingFeatures, [train_shape[0], train_shape[1]*train_shape[2]])
    #testFeatures = np.reshape(testFeatures, [test_shape[0], test_shape[1]*test_shape[2]])
    #scaler.fit(trainingFeatures)
    #trainingFeatures = scaler.transform(trainingFeatures)
    #trainingFeatures = np.reshape(trainingFeatures, [train_shape[0],train_shape[1],train_shape[2]])
    #testFeatures = scaler.transform(testFeatures)
    #testFeatures = np.reshape(testFeatures,[test_shape[0], test_shape[1], test_shape[2]])
    tf.keras.backend.clear_session()
    estimator = create_model(init_mode=best_params.get('init_mode'),
                         learn_rate=best_params.get('learn_rate'),
                         optimizer=best_params.get('optimizer'),
                         #momentum=best_params.get('momentum'),
                         activation=best_params.get('activation'),
                         dropout_rate=best_params.get('dropout_rate'))

    estimator.summary()
    X_train, X_val, y_train, y_val = train_test_split(trainingFeatures, labels_categorical[train,:], test_size=0.1,
                                                      random_state=100, stratify=labels_categorical[train,:])
    #earlyStopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='min')
    dummy = estimator.fit(X_train, y_train, validation_data=(X_val, y_val), batch_size=32, epochs=epochs, 
                          verbose=2, validation_split=0.1)
    
    predicted_labelsNet = estimator.predict_classes(testFeatures, verbose=0)
    cm = confusion_matrix(labels[test,], predicted_labelsNet, labels=[0,1])
    #cm = confusion_matrix(labels[test,], predicted_labelsNet, labels=[1,2,3,4,5,0])
    cm = cm/cm.sum(axis=1, keepdims=True)
    #conf_mat = conf_mat+cm
    cm_list.append(cm)

    precisionNet[f] = precision_score(labels[test,], predicted_labelsNet, average='macro')
    recallNet[f] = recall_score(labels[test,], predicted_labelsNet, average='macro')
    f1_MacroNet[f] = f1_score(labels[test,], predicted_labelsNet, average='macro')
    f1_weightedNet[f] = f1_score(labels[test,], predicted_labelsNet, average='weighted')
    accNet[f] = accuracy_score(labels[test,], predicted_labelsNet)
    print(experiment + '_CNN: Fold %d : f1_macroscore: %.4f' % (f + 1, f1_MacroNet[f]))
    print(experiment + '_CNN: Fold %d : f1_weightedscore: %.4f' % (f + 1, f1_weightedNet[f]))
    print(experiment + '_CNN: Fold %d : acc: %.4f' % (f + 1, accNet[f]))
    f += 1
    train_loss.append(dummy.history['loss'])
    val_loss.append(dummy.history['val_loss'])
    train_accuracy.append(dummy.history['accuracy'])
    val_accuracy.append(dummy.history['val_accuracy'])

#xc = range(1,epochs+1)
tr_loss_df = pd.DataFrame(train_loss)
tr_acc_df = pd.DataFrame(train_accuracy)
val_loss_df = pd.DataFrame(val_loss)
val_acc_df = pd.DataFrame(val_accuracy)

train_loss_mlist = tr_loss_df.mean(axis=0).to_numpy()
train_loss_slist = tr_loss_df.std(axis=0).to_numpy()
train_acc_mlist = tr_acc_df.mean(axis=0).to_numpy() 
train_acc_slist = tr_acc_df.std(axis=0).to_numpy() 
val_loss_mlist = val_loss_df.mean(axis=0).to_numpy() 
val_loss_slist = val_loss_df.std(axis=0).to_numpy() 
val_acc_mlist = val_acc_df.mean(axis=0).to_numpy()
val_acc_slist = val_acc_df.std(axis=0).to_numpy()

Visualise results

plt.figure()
plt.subplot(1, 2, 1)

y1 = train_loss_mlist
e1 = train_loss_slist
y2 = val_loss_mlist
e2 = val_loss_slist
x1 = range(1, len(train_loss_mlist) + 1)
a1 = plt.plot(x1, y1, color='#089FFF', label='Training Loss')
plt.fill_between(x1, y1 - e1, y1 + e1, alpha=0.3, facecolor='#089FFF', linewidth=4)
a2 = plt.plot(x1, y2, color='#cc9106', label='Validation Loss')
plt.fill_between(x1, y2 - e2, y2 + e2, alpha=0.3, facecolor='#cc9106', linewidth=4)
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend()

plt.subplot(1, 2, 2)
y3 = train_acc_mlist
e3 = train_acc_slist
y4 = val_acc_mlist
e4 = val_acc_slist
x2 = range(1, len(train_acc_mlist) + 1)
a1 = plt.plot(x2, y3, color='#089FFF', label='Training Accuracy')
plt.fill_between(x2, y3 - e3, y3 + e3, alpha=0.3, facecolor='#089FFF', linewidth=4)
a2 = plt.plot(x2, y4, color='#cc9106', label='Validation Accuracy')
plt.fill_between(x2, y4 - e4, y4 + e4, alpha=0.3, facecolor='#cc9106', linewidth=4)
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend()
plt.tight_layout()
#plt.savefig(filename[:-4]+'_loss_acc_curve'+'.png')
plt.show()

#conf_mat /= conf_mat.sum(axis=1, keepdims=True) #Normalised values in the CM
conf_mat = np.mean(cm_list, axis=0)
#fig, ax = plt.subplots(figsize=(10,8))
ax = sns.heatmap(conf_mat, cmap='YlGnBu', annot=True, fmt='.4f', vmin=0, vmax=1, annot_kws={'fontsize': 12})
ax.set_yticklabels(['NC', 'PD'], rotation=0)
ax.set_xticklabels(['NC', 'PD'], rotation=0)
#ax.set_yticklabels(['Sad', 'Happy', 'Fear', 'Disgust', 'Surprise', 'Anger'], rotation = 0)
#ax.set_xticklabels(['Sad', 'Happy', 'Fear', 'Disgust', 'Surprise', 'Anger'], rotation = 0)

ax.set_title(plot_title)
ax.get_figure().savefig(filename[:-4] + '_conf_mat' + '.png')
plt.show()

print('Mean and std of F1 MACRO is %.4f +- %.4f' % (np.mean(f1_MacroNet), np.std(f1_MacroNet)))
print('Mean and std of F1 WEIGHTED is %.4f +- %.4f' % (np.mean(f1_weightedNet), np.std(f1_weightedNet)))
print('Mean and std of accuracy is %.4f +- %.4f' % (np.mean(accNet), np.std(accNet)))

# Save results
sio.savemat(filename, {'precisionNet': precisionNet, 'recallNet': recallNet, 'f1_MacroNet': f1_MacroNet,
                       'f1_weightedNet': f1_weightedNet, 'accNet': accNet, 'conf_mat': conf_mat,
                       'conf_mat_list': cm_list,
                       'best_params': best_params, 'experiment': experiment, 'nb_filters': nb_filters,
                       'kernel_size': kernel_size, 'pool_size': pool_size, 'stride_size': stride_size,
                       'padding': padding,
                       'weight_decay': weight_decay, 'dense_layer_neuron_num': dense_layer_neuron_num, 'epochs': epochs,
                       'train_loss': train_loss, 'train_accuracy': train_accuracy, 'val_loss': val_loss,
                       'val_accuracy': val_accuracy})

