Incorporating-Clinical-Variables / SVM_ Classifier.ipynb
SVM_ Classifier.ipynb
Raw
import pandas as pd
import numpy as np
from torch import nn
from sklearn.svm import SVC
from sklearn import metrics 
import seaborn as sns
from collections import Counter
from scipy.linalg import eigh
from numpy.linalg import inv,pinv
from numpy.linalg import matrix_rank
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from matplotlib.colors import ListedColormap
from bayes_opt import BayesianOptimization
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.neighbors import KNeighborsClassifier as knn
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
def Discriminat_Vector(X,y,n):

    X1 = np.array([X[i] for i in range(len(X)) if y[i] == 0])
    X2 = np.array([X[i] for i in range(len(X)) if y[i] == 1])  
    N1=len(X1) 
    N2=len(X2)
 
    m1h = np.mean(X1, axis=0)
    m2h = np.mean(X2, axis=0)
    delta = m1h-m2h    # difference in the estimated means

    # Between class scatter
    W1 = np.cov(X1.T)
    W2 = np.cov(X2.T)
    c = (N2-1)/(N1+N2-2)

    A = c*W1 + (1-c)*W2
    I = np.identity(X1.shape[1])
    W = np.add(A, np.dot(0.005,I)) 
    Wi = np.linalg.inv(W)
    Wi2 = Wi @ Wi
    Wi3 = Wi2 @ Wi

    d1 = Wi @ (m1h-m2h)
    d1_hat = d1 / np.linalg.norm(d1)
    alpha1 = np.sqrt(1/np.dot(np.dot(delta.T, Wi2),delta)) 
    b1 = np.dot(np.dot(delta.T, Wi2),delta) / np.dot(np.dot(delta.T, Wi3),delta)
    d2 = (Wi - b1 * Wi2) @ (m1h-m2h)
    d2_hat = d2 / np.linalg.norm(d2)
   
    d_lists=[]
    d_lists.append(d1_hat)
    d_lists.append(d2_hat)

    if n>1:

        for N in range(3,n+1):
            S_n_1=np.zeros([N-1,N-1])
            for i in range(N-1):
                for j in range(N-1):
                    S_n_1[i][j] = d_lists[j].T @ (Wi @ d_lists[i])     
            S_n_1_in = np.linalg.inv(S_n_1) 
            alpha_list=np.zeros(N-1)
            alpha_list[0]=1/alpha1
            
            D = np.array(d_lists)    
            Vec = np.array([1/alpha1,0])
            dn = np.dot(Wi, delta - np.dot(np.dot(D.T,S_n_1_in),alpha_list))
            dn_hat = dn / np.linalg.norm(dn)           
            d_lists.append(dn_hat)
            
    return d_lists, W
def Discriminat_Vector_gender(X,y,n):

    X1 = np.array([X[i] for i in range(len(X)) if y[i] == 0])
    X2 = np.array([X[i] for i in range(len(X)) if y[i] == 1])  
    N1=len(X1) 
    N2=len(X2)
     
    
    m1h = np.mean(X1, axis=0)
    m2h = np.mean(X2, axis=0)
    delta = m1h-m2h    # difference in the estimated means
    
    
    # Between class scatter
    W1 = np.cov(X1.T)
    W2 = np.cov(X2.T)
    c = 0.5

    
    A = c*W1 + (1-c)*W2
    I = np.identity(X1.shape[1])
    W = np.add(A, np.dot(0.005,I)) 
    Wi = np.linalg.inv(W)
    Wi2 = Wi @ Wi
    Wi3 = Wi2 @ Wi

    d1 = Wi @ (m1h-m2h)
    d1_hat = d1 / np.linalg.norm(d1)
    d_lists=[]
    d_lists.append(d1_hat)
            
    return d_lists, W
train_data=np.load("DATA/R1_Traindata_530|132|_Pretrained_genderAge.npy",allow_pickle=True)
test_data=np.load("DATA/R1_Test_530|132|_Pretrained_genderAge.npy",allow_pickle=True)
train_labels=np.load("DATA/R1_Train_label_530|132|_Pretrained_genderAge.npy",allow_pickle=True)
test_labels=np.load("DATA/R1_Test_label_530|132|_Pretrained_genderAge.npy",allow_pickle=True)
print("train_data",train_data.shape)
print("test_data",test_data.shape)
print("train_labels",train_labels.shape)
print("test_labels",test_labels.shape)
train_gender = train_data[:,-1]
test_gender = test_data[:,-1]
train_age = train_data[:,-2]
test_age = test_data[:,-2]
gender = np.append(train_gender,test_gender)
print(Counter(train_gender))
print(Counter(test_gender))
Age_data = np.append(train_age,test_age)
Age_label = np.append(train_labels,test_labels)

Original dimension (1000)

def svm_cv_512(C):
    svm = SVC(C=C, kernel='rbf', gamma='scale', random_state=42)
    svm.fit(train_data[:,:1000], train_labels)
    y_pred = svm.predict(test_data[:,:1000])
    return accuracy_score(test_labels, y_pred)

# Defining hyperparameter ranges
pbounds = {'C': (0.1, 10)}

optimizer = BayesianOptimization(f=svm_cv_512, pbounds=pbounds, random_state=42)
optimizer.maximize(init_points=5, n_iter=10)

print(optimizer.max)
svm_512 = SVC(C=3.80, kernel='rbf', gamma='scale', random_state=42, probability=True)
svm_512.fit(train_data[:,:1000], train_labels)
y_pred_512 = svm_512.predict(test_data[:,:1000])
y_pred_proba_512 = svm_512.predict_proba(test_data[:,:1000])
print(accuracy_score(test_labels, y_pred_512))
fpr, tpr, thresholds = roc_curve(test_labels, y_pred_proba_512[:, 1])
auc = roc_auc_score(test_labels, y_pred_proba_512[:, 1])

plt.plot(fpr, tpr, label='SVM (AUC = {:.2f})'.format(auc))
plt.plot([0, 1], [0, 1], 'k--')  
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.show()

Original dimension + Age/Gender (514)

def svm_cv(C):
    svm = SVC(C=C, kernel='rbf', gamma='scale', random_state=42)
    svm.fit(train_data, train_labels)
    y_pred = svm.predict(test_data)
    return accuracy_score(test_labels, y_pred)

# Defining hyperparameter ranges
pbounds = {'C': (0.1, 10)}

optimizer = BayesianOptimization(f=svm_cv, pbounds=pbounds, random_state=42)
optimizer.maximize(init_points=5, n_iter=10)

print(optimizer.max)
svm = SVC(C=0.84, kernel='rbf', gamma='scale', random_state=42, probability=True)
svm.fit(train_data, train_labels)
y_pred = svm.predict(test_data)
y_pred_proba = svm.predict_proba(test_data)
print(accuracy_score(test_labels, y_pred))
fpr, tpr, thresholds = roc_curve(test_labels, y_pred_proba[:, 1])
auc = roc_auc_score(test_labels, y_pred_proba[:, 1])

plt.plot(fpr, tpr, label='SVM (AUC = {:.2f})'.format(auc))
plt.plot([0, 1], [0, 1], 'k--')  
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.show()

LDA (512 --> 10)

d_lists, W = Discriminat_Vector(train_data[:,:1000],train_labels,10)
d_lists_ = np.array(d_lists)
project_da_train = np.dot(train_data[:,:1000], d_lists_.T)
print(project_da_train.shape)
project_da_test = np.dot(test_data[:,:1000], d_lists_.T)
print(project_da_test.shape)
def svm_cv_10(C):
    svm = SVC(C=C, kernel='rbf', gamma='scale', random_state=42)
    svm.fit(project_da_train, train_labels)
    y_pred = svm.predict(project_da_test)
    return accuracy_score(test_labels, y_pred)

# Defining hyperparameter ranges
pbounds = {'C': (0.1, 10)}

optimizer = BayesianOptimization(f=svm_cv_10, pbounds=pbounds, random_state=42,allow_duplicate_points=True)
optimizer.maximize(init_points=5, n_iter=10)

print(optimizer.max)
svm_10 = SVC(C=0.1, kernel='rbf', gamma='scale', random_state=42, probability=True)
svm_10.fit(project_da_train, train_labels)
y_pred_10 = svm_10.predict(project_da_test)
y_pred_proba_10 = svm_10.predict_proba(project_da_test)
print(accuracy_score(test_labels, y_pred_10))
fpr, tpr, thresholds = roc_curve(test_labels, y_pred_proba_10[:, 1])
auc = roc_auc_score(test_labels, y_pred_proba_10[:, 1])

plt.plot(fpr, tpr, label='SVM (AUC = {:.2f})'.format(auc))
plt.plot([0, 1], [0, 1], 'k--')  
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.show()

LDA (512 --> 10 + Age/Gender)

project_da_train_gender = np.append(project_da_train, np.reshape(train_gender,(530,1)), axis=1)
project_da_test_gender = np.append(project_da_test, np.reshape(test_gender,(132,1)), axis=1)
project_da_train_add = np.append(project_da_train_gender, np.reshape(train_age,(530,1)), axis=1)
project_da_test_add = np.append(project_da_test_gender, np.reshape(test_age,(132,1)), axis=1)
print(project_da_train_add.shape)
print(project_da_test_add.shape)
def svm_cv_12(C):
    svm = SVC(C=C, kernel='rbf', gamma='scale', random_state=42)
    svm.fit(project_da_train_add, train_labels)
    y_pred = svm.predict(project_da_test_add)
    return accuracy_score(test_labels, y_pred)

# Defining hyperparameter ranges
pbounds = {'C': (0.1, 10)}

optimizer = BayesianOptimization(f=svm_cv_12, pbounds=pbounds, random_state=42)
optimizer.maximize(init_points=5, n_iter=10)

print(optimizer.max)
svm_12 = SVC(C=0.100, kernel='rbf', gamma='scale', random_state=42, probability=True)
svm_12.fit(project_da_train_add, train_labels)
y_pred_12 = svm_12.predict(project_da_test_add)
y_pred_proba_12 = svm_12.predict_proba(project_da_test_add)
print(accuracy_score(test_labels, y_pred_12))
fpr, tpr, thresholds = roc_curve(test_labels, y_pred_proba_12[:, 1])
auc = roc_auc_score(test_labels, y_pred_proba_12[:, 1])

plt.plot(fpr, tpr, label='SVM (AUC = {:.2f})'.format(auc))
plt.plot([0, 1], [0, 1], 'k--')  
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.show()

LDA (514 -- > 10)

d_lists, W = Discriminat_Vector(train_data,train_labels,40)
d_lists_ = np.array(d_lists)
project_da_train = np.dot(train_data, d_lists_.T)
print(project_da_train.shape)
project_da_test = np.dot(test_data, d_lists_.T)
print(project_da_test.shape)
def svm_cv_514_10(C):
    svm = SVC(C=C, kernel='rbf', gamma='scale', random_state=42, probability=True)
    svm.fit(project_da_train, train_labels)
    y_pred = svm.predict(project_da_test)
    y_pred_proba = svm.predict_proba(project_da_test)
    return accuracy_score(test_labels, y_pred)

# Defining hyperparameter ranges
pbounds = {'C': (0.1, 10)}

optimizer = BayesianOptimization(f=svm_cv_514_10, pbounds=pbounds, random_state=42, allow_duplicate_points=True)
optimizer.maximize(init_points=5, n_iter=10)

print(optimizer.max)
svm_12 = SVC(C=0.1, kernel='rbf', gamma='scale', random_state=42, probability=True)
svm_12.fit(project_da_train, train_labels)
y_pred_12 = svm_12.predict(project_da_test)
y_pred_proba_12 = svm_12.predict_proba(project_da_test)
print(accuracy_score(test_labels, y_pred_12))
fpr, tpr, thresholds = roc_curve(test_labels, y_pred_proba_12[:, 1])
auc = roc_auc_score(test_labels, y_pred_proba_12[:, 1])

plt.plot(fpr, tpr, label='SVM (AUC = {:.2f})'.format(auc))
plt.plot([0, 1], [0, 1], 'k--')  
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.show()

PCA (512--> 10)

U2, s2, V2 = np.linalg.svd(train_data.T, full_matrices=False)

project_svd_train = np.dot(U2.T[:10],train_data.T).T
project_svd_test = np.dot(U2.T[:10],test_data.T).T
print("train:",project_svd_train.shape)
print("test:",project_svd_test.shape)
def svm_cv_10(C):
    svm = SVC(C=C, kernel='rbf', gamma='scale', random_state=42)
    svm.fit(project_svd_train, train_labels)
    y_pred = svm.predict(project_svd_test)
    return accuracy_score(test_labels, y_pred)

# Defining hyperparameter ranges
pbounds = {'C': (0.1, 10)}

optimizer = BayesianOptimization(f=svm_cv_10, pbounds=pbounds, random_state=42)
optimizer.maximize(init_points=5, n_iter=10)

print(optimizer.max)
svm_10 = SVC(C=3.80, kernel='rbf', gamma='scale', random_state=42, probability=True)
svm_10.fit(project_svd_train, train_labels)
y_pred_10 = svm_10.predict(project_svd_test)
y_pred_proba_10 = svm_10.predict_proba(project_svd_test)
print(accuracy_score(test_labels, y_pred_10))
fpr, tpr, thresholds = roc_curve(test_labels, y_pred_proba_10[:, 1])
auc = roc_auc_score(test_labels, y_pred_proba_10[:, 1])

plt.plot(fpr, tpr, label='SVM (AUC = {:.2f})'.format(auc))
plt.plot([0, 1], [0, 1], 'k--')  
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.show()

PCA (512 --> 10 + Age/gender)

project_svd_train_gender = np.append(project_svd_train, np.reshape(train_gender,(530,1)), axis=1)
project_svd_test_gender = np.append(project_svd_test, np.reshape(test_gender,(132,1)), axis=1)
project_svd_train_add = np.append(project_svd_train_gender, np.reshape(train_age,(530,1)), axis=1)
project_svd_test_add = np.append(project_svd_test_gender, np.reshape(test_age,(132,1)), axis=1)
print(project_svd_train_add.shape)
print(project_svd_test_add.shape)
def svm_cv_12(C):
    svm = SVC(C=C, kernel='rbf', gamma='scale', random_state=42)
    svm.fit(project_svd_train_add, train_labels)
    y_pred = svm.predict(project_svd_test_add)
    return accuracy_score(test_labels, y_pred)

# Defining hyperparameter ranges
pbounds = {'C': (0.1, 10)}

optimizer = BayesianOptimization(f=svm_cv_12, pbounds=pbounds, random_state=42)
optimizer.maximize(init_points=5, n_iter=10)

print(optimizer.max)
svm_12 = SVC(C=3.41, kernel='rbf', gamma='scale', random_state=42, probability=True)
svm_12.fit(project_svd_train_add, train_labels)
y_pred_12 = svm_12.predict(project_svd_test_add)
y_pred_proba_12 = svm_12.predict_proba(project_svd_test_add)
print(accuracy_score(test_labels, y_pred_12))
fpr, tpr, thresholds = roc_curve(test_labels, y_pred_proba_12[:, 1])
auc = roc_auc_score(test_labels, y_pred_proba_12[:, 1])

plt.plot(fpr, tpr, label='SVM (AUC = {:.2f})'.format(auc))
plt.plot([0, 1], [0, 1], 'k--')  
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.show()


Reduced Dim + Bert Gender

embedding_train = np.load('../BERT/Gender_embedding_train.npy')
embedding_test = np.load('../BERT/Gender_embedding_test.npy')
print(embedding_train.shape)
print(embedding_test.shape)
d_lists, W = Discriminat_Vector_gender(embedding_train,train_gender,1)
d_lists_ = np.array(d_lists)
d_lists_.shape
embedding_train_gender = np.dot(embedding_train, d_lists_[0].T)
print(embedding_train_gender.shape)
embedding_test_gender = np.dot(embedding_test, d_lists_[0].T)
print(embedding_test_gender.shape)
project_da_train_gender = np.append(project_da_train, np.reshape(embedding_train_gender,(530,1)), axis=1)
project_da_test_gender = np.append(project_da_test, np.reshape(embedding_test_gender,(132,1)), axis=1)
project_da_train_add = np.append(project_da_train_gender, np.reshape(train_age,(530,1)), axis=1)
project_da_test_add = np.append(project_da_test_gender, np.reshape(test_age,(132,1)), axis=1)
print(project_da_train_add.shape)
print(project_da_test_add.shape)
def svm_cv_12(C):
    svm = SVC(C=C, kernel='rbf', gamma='scale', random_state=42)
    svm.fit(project_da_train_add, train_labels)
    y_pred = svm.predict(project_da_test_add)
    return accuracy_score(test_labels, y_pred)

# Defining hyperparameter ranges
pbounds = {'C': (0.1, 10)}

optimizer = BayesianOptimization(f=svm_cv_12, pbounds=pbounds, random_state=42,allow_duplicate_points=True)
optimizer.maximize(init_points=5, n_iter=10)

print(optimizer.max)
svm_12 = SVC(C=3.80, kernel='rbf', gamma='scale', random_state=42, probability=True)
svm_12.fit(project_da_train_add, train_labels)
y_pred_12 = svm_12.predict(project_da_test_add)
y_pred_proba_12 = svm_12.predict_proba(project_da_test_add)
print(accuracy_score(test_labels, y_pred_12))
fpr, tpr, thresholds = roc_curve(test_labels, y_pred_proba_12[:, 1])
auc = roc_auc_score(test_labels, y_pred_proba_12[:, 1])

plt.plot(fpr, tpr, label='SVM (AUC = {:.2f})'.format(auc))
plt.plot([0, 1], [0, 1], 'k--')  
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.show()