RNFS-A-robust-nature-inspired-feature-selection-for-remote-sensing-image-classification / evaluation / CKNN.m
CKNN.m
Raw
%% Cosine KNN Code for Saving Average Testing Metrics
% This script processes multiple trials of a neural network model,
% computes various testing metrics, and saves the average metrics to Excel files.
% The code performs the following tasks:
% 1. Loads data from multiple .mat files containing features and labels.
% 2. Trains a neural network model and performs cross-validation.
% 3. Computes validation and test accuracies as well as other metrics.
% 4. Saves individual models and metrics to separate .mat files.
% 5. Calculates average metrics across all trials.
% 6. Saves the average metrics to Excel files for further analysis.
% This script helps in evaluating model performance across different trials and 
% provides a convenient way to store and review the results.

clear;
clc;

% Define default paths
defaultBasePath = 'matFiles/'; % Base directory for .mat files
filePrefix = 'Algorithm_Name_Dataset_name_Trial_'; % Prefix for file names
fileSuffix = '.mat'; % File extension

% Number of trials
numTrials = 60;

% Create a cell array for file paths
fileNames = cell(1, numTrials);

% Populate fileNames with trial file paths
for i = 1:numTrials
    fileNames{i} = fullfile(defaultBasePath, [filePrefix, num2str(i), fileSuffix]);
end

% Initialize arrays to store results for each file
validationAccuracies = zeros(numTrials, 1);
testAccuracies = zeros(numTrials, 1);

% Initialize arrays to store testing metrics
macroPrecisionTesting = zeros(numTrials, 1);
microPrecisionTesting = zeros(numTrials, 1);
macroSensitivityTesting = zeros(numTrials, 1);
microSensitivityTesting = zeros(numTrials, 1);
macroSpecificityTesting = zeros(numTrials, 1);
microSpecificityTesting = zeros(numTrials, 1);
macroAccuracyTesting = zeros(numTrials, 1);
microAccuracyTesting = zeros(numTrials, 1);
macroFMeasureTesting = zeros(numTrials, 1);
microFMeasureTesting = zeros(numTrials, 1);

% Save the random number generator state
rngState = rng;

% Loop through each file
for i = 1:numTrials
    % Load the data from the file
    data = load(fileNames{i});
    
    % Extract features and labels
    trFeat = data.trFeat;
    trainLabel = categorical(data.trainLabel);
    tsFeat = data.tsFeat;
    testLabel = categorical(data.testLabel);
    
    % Train k-NN classifier
    classificationKNN = fitcknn(trFeat, trainLabel, ...
        'Distance', 'Cosine', ...
        'NumNeighbors', 10, ...
        'DistanceWeight', 'Equal', ...
        'Standardize', true, ...
        'ClassNames', unique(trainLabel));

    % Perform cross-validation
    partitionedModel = crossval(classificationKNN, 'KFold', 5);
    
    % Compute validation accuracy
    validationAccuracy = 1 - kfoldLoss(partitionedModel, 'LossFun', 'ClassifError');
    validationAccuracies(i) = validationAccuracy * 100;
    
    % Predict test labels
    testPredictions = predict(classificationKNN, tsFeat);
    
    % Compute test accuracy
    testAccuracy = sum(testPredictions == testLabel) / numel(testLabel);
    testAccuracies(i) = testAccuracy * 100;
    
    % Compute testing metrics
    testConfusionMatrix = confusionmat(testLabel, testPredictions);
    statsTest = statsOfMeasure(testConfusionMatrix, 0);
    
    % Store testing metrics
    macroPrecisionTesting(i) = statsTest.macroAVG(5);
    microPrecisionTesting(i) = statsTest.microAVG(5);
    macroSensitivityTesting(i) = statsTest.macroAVG(6);
    microSensitivityTesting(i) = statsTest.microAVG(6);
    macroSpecificityTesting(i) = statsTest.macroAVG(7);
    microSpecificityTesting(i) = statsTest.microAVG(7);
    macroAccuracyTesting(i) = statsTest.macroAVG(8);
    microAccuracyTesting(i) = statsTest.microAVG(8);
    macroFMeasureTesting(i) = statsTest.macroAVG(9);
    microFMeasureTesting(i) = statsTest.microAVG(9);

    % Save the model and metrics in results folder
    save(fullfile('results', sprintf('Cosine_KNN_model_data_%d.mat', i)), 'classificationKNN', 'partitionedModel');
end

% Compute the average metrics
averageMacroMetrics = table(mean(validationAccuracies), mean(macroPrecisionTesting), ...
    mean(macroSensitivityTesting), mean(macroSpecificityTesting), mean(macroAccuracyTesting), ...
    mean(macroFMeasureTesting), 'VariableNames', ...
    {'AvgValAcc', 'AvgMacroPrec', 'AvgMacroSens', 'AvgMacroSpec', 'AvgMacroAcc', 'AvgMacroF1'});

averageMicroMetrics = table(mean(validationAccuracies), mean(microPrecisionTesting), ...
    mean(microSensitivityTesting), mean(microSpecificityTesting), mean(microAccuracyTesting), ...
    mean(microFMeasureTesting), 'VariableNames', ...
    {'AvgValAcc', 'AvgMicroPrec', 'AvgMicroSens', 'AvgMicroSpec', 'AvgMicroAcc', 'AvgMicroF1'});

% Save results in the results folder
save(fullfile('results', 'Algorithm_Name_Dataset_Name_Cosine_KNN_trained_models_and_metrics.mat'), ...
    'rngState', 'fileNames', 'validationAccuracies', 'testAccuracies', ...
    'macroPrecisionTesting', 'microPrecisionTesting', 'macroSensitivityTesting', ...
    'microSensitivityTesting', 'macroSpecificityTesting', 'microSpecificityTesting', ...
    'macroAccuracyTesting', 'microAccuracyTesting', 'macroFMeasureTesting', ...
    'microFMeasureTesting', 'averageMacroMetrics', 'averageMicroMetrics');

% Save average metrics to Excel in the results folder
writetable(averageMacroMetrics, fullfile('results', 'Algorithm_Name_Dataset_Name_Cosine_KNN_average_macro_metrics.xlsx'));
writetable(averageMicroMetrics, fullfile('results', 'Algorithm_Name_Dataset_Name_Cosine_KNN_average_micro_metrics.xlsx'));