RNFS-A-robust-nature-inspired-feature-selection-for-remote-sensing-image-classification / evaluation / WNN.m
WNN.m
Raw
%% WNN Code for Saving Average Testing Metrics
% This script processes multiple trials of a neural network model,
% computes various testing metrics, and saves the average metrics to Excel files.
% The code performs the following tasks:
% 1. Loads data from multiple .mat files containing features and labels.
% 2. Trains a neural network model and performs cross-validation.
% 3. Computes validation and test accuracies as well as other metrics.
% 4. Saves individual models and metrics to separate .mat files.
% 5. Calculates average metrics across all trials.
% 6. Saves the average metrics to Excel files for further analysis.
% This script helps in evaluating model performance across different trials and 
% provides a convenient way to store and review the results.

clear;
clc;

% Number of trials
numTrials = 60;

% Create a cell array for file paths
fileNames = cell(1, numTrials);

% Populate fileNames with trial file paths
for i = 1:numTrials
    fileNames{i} = sprintf('Algorithm_Name_Dataset_name_Trial_%d.mat', i);
end

% Initialize arrays to store results
validationAccuracies = zeros(numTrials, 1);
testAccuracies = zeros(numTrials, 1);
macroPrecisionTesting = zeros(numTrials, 1);
microPrecisionTesting = zeros(numTrials, 1);
macroSensitivityTesting = zeros(numTrials, 1);
microSensitivityTesting = zeros(numTrials, 1);
macroSpecificityTesting = zeros(numTrials, 1);
microSpecificityTesting = zeros(numTrials, 1);
macroAccuracyTesting = zeros(numTrials, 1);
microAccuracyTesting = zeros(numTrials, 1);
macroFMeasureTesting = zeros(numTrials, 1);
microFMeasureTesting = zeros(numTrials, 1);

% Save the random number generator state
rngState = rng;

% Loop through each file
for i = 1:numTrials
    data = load(fileNames{i});
    
    % Extract features and labels
    trFeat = data.trFeat;
    trainLabel = categorical(data.trainLabel);
    tsFeat = data.tsFeat;
    testLabel = categorical(data.testLabel);
    
    % Train neural network classifier
    classificationNeuralNetwork = fitcnet(trFeat, trainLabel, 'LayerSizes', 100, ...
        'Activations', 'relu', 'Lambda', 0, 'IterationLimit', 1000, 'Standardize', true, ...
        'ClassNames', unique(trainLabel));
    
    % Perform 5-fold cross-validation
    partitionedModel = crossval(classificationNeuralNetwork, 'KFold', 5);
    
    % Compute validation accuracy
    validationAccuracy = 1 - kfoldLoss(partitionedModel, 'LossFun', 'ClassifError');
    validationAccuracies(i) = validationAccuracy * 100;
    
    % Predict test labels
    testPredictions = predict(classificationNeuralNetwork, tsFeat);
    testAccuracy = sum(testPredictions == testLabel) / numel(testLabel);
    testAccuracies(i) = testAccuracy * 100;
    
    % Generate confusion matrix and compute metrics
    testConfusionMatrix = confusionmat(testLabel, testPredictions);
    statsTest = statsOfMeasure(testConfusionMatrix, 0);
    
    % Store computed metrics
    macroPrecisionTesting(i) = statsTest.macroAVG(5);
    microPrecisionTesting(i) = statsTest.microAVG(5);
    macroSensitivityTesting(i) = statsTest.macroAVG(6);
    microSensitivityTesting(i) = statsTest.microAVG(6);
    macroSpecificityTesting(i) = statsTest.macroAVG(7);
    microSpecificityTesting(i) = statsTest.microAVG(7);
    macroAccuracyTesting(i) = statsTest.macroAVG(8);
    microAccuracyTesting(i) = statsTest.microAVG(8);
    macroFMeasureTesting(i) = statsTest.macroAVG(9);
    microFMeasureTesting(i) = statsTest.microAVG(9);
    
    % Save model and metrics
    save(sprintf('model_data_%d.mat', i), 'classificationNeuralNetwork', 'partitionedModel');
end

% Compute average metrics
averageMacroMetrics = table(mean(validationAccuracies), mean(macroPrecisionTesting), ...
    mean(macroSensitivityTesting), mean(macroSpecificityTesting), mean(macroAccuracyTesting), ...
    mean(macroFMeasureTesting), 'VariableNames', {'AvgValAcc', 'AvgMacroPrec', 'AvgMacroSens', ...
    'AvgMacroSpec', 'AvgMacroAcc', 'AvgMacroF1'});

averageMicroMetrics = table(mean(validationAccuracies), mean(microPrecisionTesting), ...
    mean(microSensitivityTesting), mean(microSpecificityTesting), mean(microAccuracyTesting), ...
    mean(microFMeasureTesting), 'VariableNames', {'AvgValAcc', 'AvgMicroPrec', 'AvgMicroSens', ...
    'AvgMicroSpec', 'AvgMicroAcc', 'AvgMicroF1'});

% Save metrics and models
save('WNN_trained_models_and_metrics.mat', 'rngState', 'fileNames', 'classificationNeuralNetwork', ...
    'partitionedModel', 'validationAccuracies', 'testAccuracies', 'macroPrecisionTesting', ...
    'microPrecisionTesting', 'macroSensitivityTesting', 'microSensitivityTesting', 'macroSpecificityTesting', ...
    'microSpecificityTesting', 'macroAccuracyTesting', 'microAccuracyTesting', 'macroFMeasureTesting', ...
    'microFMeasureTesting', 'averageMacroMetrics', 'averageMicroMetrics');

% Save average metrics to Excel
writetable(averageMacroMetrics, 'WNN_average_macro_metrics.xlsx');
writetable(averageMicroMetrics, 'WNN_average_micro_metrics.xlsx');