SASMAsims / GORS_SSLPSO_origfPB.m
GORS_SSLPSO_origfPB.m
Raw
function [bestever,gbpos,conv_curve,time_cost] = GORS_SSLPSO_origfPB(lb,ub,max_FEs,ndims,npop,fobj,initPos,init_f)
%%%*********************************************************************************************************%%%
%% Implementation of A Generation-based Optimal Restart Strategy for Surrogate-assisted SLPSO (GORS-SSLPSO) adapted to one function by PedroBento
%% H. Yu, Y. Tan, C. Sun, J. Zeng, A generation-based optimal restart strategy for surrogate-assisted
%% social learning particle swarm optimization, Knowledge-Based Systems, (2018).
%%%*********************************************************************************************%%%
%% This paper and this code should be referenced whenever they are used to
%% generate results for the user's own research.
%%%*********************************************************************************************%%%
%% This matlab code was written by Haibo Yu and Adapted by Pedro Bento November 2023 (retrieved from the repository https://github.com/h-hg/DDEA/blob/master/README.md)
%% Please refer with all questions, comments, bug reports, etc. to tyustyuhaibo@126.com
% %
% rng('default');
% rng('shuffle');


warning('off');

CE=zeros(max_FEs,2);

time_begin=tic;

%---------------Initialization-----------------
%parameter setting
c3 = 0;
PL = ones(npop,1);

%initialization
p = initPos;
v = zeros(npop, ndims);
lu = [lb* ones(1, ndims); ub* ones(1, ndims)];
  
gen = 0;
FES = npop;  

fitness=init_f';

hx=p;   
hf=fitness;

[bestever,id] = min(fitness);
gbpos=p(id,:);

CE(1:npop,1)=1:1:npop;
CE(1:npop,2)=bestever.*ones(npop,1);


num_gen=30;% 1(SSLPSO), 10, 20, 30(Standard.GORS-SSLPSO), 40, 50, 60, 80

%main loop
while(FES < max_FEs)
    
    if mod(gen, num_gen) == 0 % only meeting this condition, the top-ranking npop data are used
        
%         fprintf('Iteration: %d Fitness evaluation: %d Best fitness: %e\n', gen, FES, bestever);
        
        [~,idx]=sort(fitness);
        p_app=p(idx,:); 
        f_app=fitness(idx);
        [~,~,ip]=intersect(hx,p_app,'rows');
        p_app(ip,:)=[];
        f_app(ip)=[];
        if ~isempty(p_app)==1
            sbest_pos=p_app(1,:);
            
            sbesty=fobj(sbest_pos);
            FES=FES+1;
            
            if (sbesty<CE(FES-1,2)) % melhoria na conv curve é aceite, em teoria equivale a sbesty<bestever
                CE(FES,:)=[FES,sbesty];
            else
                CE(FES,:)=[FES,CE(FES-1,2)]; % se nao repete o valor já encontrado
            end
            

            hx=[hx;sbest_pos];   hf=[hf,sbesty];
            [bestever,ib] = min([sbesty, bestever]);
            if ib==1
                gbpos=sbest_pos;
            end
        end
        
        % select the best npop sample points to form the population
        [~,idx]=sort(hf);   idx=idx(1:npop);
        p=hx(idx,:);        fitness=hf(idx);
        
    end
    
    %% ********************* Phase 2 ************************ behavioral learning
    %population sorting
    [~,rank] = sort(fitness, 'descend');
    p = p(rank,:);
    v = v(rank,:);
    %center position
    center = ones(npop,1)*mean(p);
    %random matrix
    %         rand('seed', sum(100 * clock));
    randco1 = rand(npop, ndims);
    %         rand('seed', sum(100 * clock));
    randco2 = rand(npop, ndims);
    %         rand('seed', sum(100 * clock));
    randco3 = rand(npop, ndims);
    winidxmask = repmat([1:npop]', [1 ndims]);
    winidx = winidxmask + ceil(rand(npop, ndims).*(npop - winidxmask));
    %     winidx = npop - floor(0.5*rand(npop, ndims).*(npop - winidxmask));
    pwin = p;
    for j = 1:ndims
        pwin(:,j) = p(winidx(:,j),j);
    end
    %learning
    %         rand('seed', sum(100 * clock));
    lpmask = repmat(rand(npop,1) < PL, [1 ndims]);
    lpmask(npop,:) = 0;
    v1 =  1*(randco1.*v + randco2.*(pwin - p) + c3*randco3.*(center - p));
    p1 =  p + v1;
    v = lpmask.*v1 + (~lpmask).*v;
    p = lpmask.*p1 + (~lpmask).*p;
    %boundary
    for i = 1:npop
        p(i,:) = max(p(i,:), lu(1,:));
        p(i,:) = min(p(i,:), lu(2,:));
    end
    gen = gen + 1;
    
    %% ********************* RBF modeling ************************
    % Local model for 50D / 100D
    %         NS=2*(ndims+1);
    %         phdis=real(sqrt(p.^2*ones(size(hx'))+ones(size(p))*(hx').^2-2*p*(hx')));
    %         [~,sidx]=sort(phdis,2);
    %         nidx=sidx; nidx(:,NS+1:end)=[];
    %         nid=unique(nidx);
    %         trainx=hx(nid,:);
    %         trainf=hf(nid);
    
    % Global model for 10D / 20D / 30D
    trainx=hx;  trainf=hf;
    
    % radial basis function interpolation----(RBF-interpolation)
    flag='cubic';
    [lambda, gamma]=RBF(trainx,trainf',flag);
    FUN=@(x) RBF_eval(x,trainx,lambda,gamma,flag);
    %%
    fitness=FUN(p);     fitness=fitness';
end

conv_curve=CE(:,2);
time_cost=toc(time_begin);
end
% end main function!
%% Auxiliary functions
%______________________________________________________________________________
function [lambda, gamma]=RBF(S,Y,flag)
% function [lambda, gamma, rho]=RBF(S,Y,flag)   % For Gaussian basis function

%computes the parameters of the radial basis function interpolant
%--------------------------------------------------------------------------
%Copyright (c) 2012 by Juliane Mueller
%
% This file is part of the surrogate model module toolbox.
%
%--------------------------------------------------------------------------
%Author information
%Juliane Mueller
%Tampere University of Technology, Finland
%juliane.mueller2901@gmail.com
%--------------------------------------------------------------------------
%
%Input:
%S - Sample site matrix (m x d), m=number of sample sites, d=dimension
%Y - Objective function values corresponding to points in S
%flag - string determining what type of RBF model should be used
%
%Output:
%lambda, gamma - vectors with RBF model parameters
%--------------------------------------------------------------------------


[m,n]=size(S);
P=[S,ones(m,1)];

R=zeros(m,m);
for ii = 1:m
    for jj = ii:m
        R(ii,jj)=sum((S(ii,:)-S(jj,:)).^2,2);
        R(jj,ii)=R(ii,jj);
    end
end
R=sqrt(R);
% rho=2;

% Sd=real(sqrt(S.^2*ones(size(S'))+ones(size(S))*(S').^2-2*S*(S')));    % For Gaussian basis function
% rho=max(max(Sd))/(n*m)^(1/n);     % For Gaussian basis function

if strcmp(flag,'cubic') %cubic RBF
    Phi= R.^3;
elseif strcmp(flag,'TPS') %thin plate spline RBF
    R(R==0)=1;
    Phi=R.^2.*log(R);
elseif strcmp(flag,'linear') %linear RBF
    Phi=R;
% elseif strcmp(flag,'Gaussian')
%     Phi=exp(-R.^2/rho^2);
% elseif strcmp(flag,'multiquad')
%     Phi=sqrt((R.^2 + rho^2).^3);
% elseif strcmp(flag,'invmultiquad')
%     Phi=1./sqrt(R.^2 + rho^2);
end

A=[Phi,P;P', zeros(n+1,n+1)];
RHS=[Y;zeros(n+1,1)];
params=A\RHS;
lambda=params(1:m);
gamma=params(m+1:end);

end
%______________________________________________________________________________
function Yest=RBF_eval(X,S,lambda,gamma,flag)
% function Yest=RBF_eval(X,S,lambda,gamma,rho,flag)   % For Gaussian basis function

%--------------------------------------------------------------------------
%Copyright (c) 2012 by Juliane Mueller
%
% This file is part of the surrogate model module toolbox.
%
%--------------------------------------------------------------------------
%Author information
%Juliane Mueller
%Tampere University of Technology, Finland
%juliane.mueller2901@gmail.com
%--------------------------------------------------------------------------
%
%input:
%X are points where function values should be calculated
%S are points where the function values are known
%lambda parameter vector
%gamma contains the parameters of the optional polynomial tail
%flag is a string indicating which RBF to be used
%output: 
%the estimated function values at the points in X
%--------------------------------------------------------------------------

[mX,nX]=size(X); %dimensions of the points where function value should be predicted
[mS,nS]=size(S); %dimesnions of sample sites taken so far 
if nX~=nS %check that both matrices are of the right shape
    X=X';
    [mX,~]=size(X);
end

R=zeros(mX,mS); %compute pairwise distances of points in X and S
for ii = 1:mX
    for jj = 1:mS
        R(ii,jj)=norm(X(ii,:)-S(jj,:));
    end
end

if strcmp(flag,'cubic')
    Phi= R.^3;
elseif strcmp(flag,'TPS')
    R(R==0)=1;
    Phi=R.^2.*log(R);
elseif strcmp(flag,'linear')
    Phi=R;
% elseif strcmp(flag,'Gaussian')
%     Phi=exp(-R.^2/rho^2);
% elseif strcmp(flag,'multiquad')
%     Phi=sqrt((R.^2 + rho^2).^3);
% elseif strcmp(flag,'invmultiquad')
%     Phi=1./sqrt(R.^2 + rho^2);
end
    
Yest1=Phi*lambda; %first part of response surface
Yest2=[X,ones(mX,1)]*gamma; %optional polynomial tail
Yest=Yest1+Yest2; %predicted function value

end
%______________________________________________________________________________