import pandas as pd import numpy as np from tqdm import tqdm import os # for reading and displaying images from skimage.io import imread import matplotlib.pyplot as plt # for creating validation set from sklearn.model_selection import train_test_split # for evaluating the model from sklearn.metrics import accuracy_score # PyTorch libraries and modules import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F from torch.optim import * import h5py # Determine sample shape sample_shape = (60, 60, 268) train_x = torch.from_numpy(Xtrain).unsqueeze(1).permute(0, 1, 4, 2, 3).float() train_y = torch.from_numpy(ytrain).long() test_x = torch.from_numpy(Xtest).unsqueeze(1).permute(0, 1, 4, 2, 3).float() test_y = torch.from_numpy(ytest).long() train_x = train_x[0:50] train_y = train_y[0:50] batch_size = 10 #We pick beforehand a batch_size that we will use for the training # Pytorch train and test sets train = torch.utils.data.TensorDataset(train_x,train_y) test = torch.utils.data.TensorDataset(test_x,test_y) # data loader train_loader = torch.utils.data.DataLoader(train, batch_size = batch_size, shuffle = False) test_loader = torch.utils.data.DataLoader(test, batch_size = batch_size, shuffle = False) num_classes = 5 # Create CNN Model class CNNModel(nn.Module): def __init__(self): super(CNNModel, self).__init__() self.conv_layer1 = self._conv_layer_set(1, 32) self.conv_layer2 = self._conv_layer_set(32, 64) self.fc1 = nn.Linear(703040, 128) self.fc2 = nn.Linear(128, num_classes) self.relu = nn.LeakyReLU() self.batch=nn.BatchNorm1d(128) self.drop=nn.Dropout(p=0.15) def _conv_layer_set(self, in_c, out_c): conv_layer = nn.Sequential( nn.Conv3d(in_c, out_c, kernel_size=(3, 3, 3), padding=0), nn.LeakyReLU(), nn.MaxPool3d((2, 2, 2)), ) return conv_layer def forward(self, x): # Set 1 out = self.conv_layer1(x) out = self.conv_layer2(out) out = out.view(out.size(0), -1) out = self.fc1(out) out = self.relu(out) out = self.batch(out) out = self.drop(out) out = self.fc2(out) return out #Definition of hyperparameters n_iters = 4500 num_epochs = n_iters / (len(train_x) / batch_size) num_epochs = int(num_epochs) # Create CNN model = CNNModel() #model.cuda() print(model) # Cross Entropy Loss error = nn.CrossEntropyLoss() # SGD Optimizer learning_rate = 0.001 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # CNN model training count = 0 loss_list = [] iteration_list = [] accuracy_list = [] for epoch in range(num_epochs): for i, (images, labels) in enumerate(train_loader): train = Variable(images.view(10, 1, 268, 60, 60)) labels = Variable(labels) # Clear gradients optimizer.zero_grad() # Forward propagation outputs = model(train) # Calculate softmax and ross entropy loss loss = error(outputs, labels) # Calculating gradients loss.backward() # Update parameters optimizer.step() count += 1 if count % 50 == 0: # Calculate Accuracy correct = 0 total = 0 # Iterate through test dataset for images, labels in test_loader: test = Variable(images.view(10, 1, 268, 60, 60)) # Forward propagation outputs = model(test) # Get predictions from the maximum value predicted = torch.max(outputs.data, 1)[1] # Total number of labels total += len(labels) correct += (predicted == labels).sum() accuracy = 100 * correct / float(total) # store loss and iteration loss_list.append(loss.data) iteration_list.append(count) accuracy_list.append(accuracy) if count % 500 == 0: # Print Loss print('Iteration: {} Loss: {} Accuracy: {} %'.format(count, loss.data, accuracy))