SASMAsims / SHPSO_origfPB.m
SHPSO_origfPB.m
Raw
function [Gbest,xGbest,conv_curve,time_cost]...
    = SHPSO_origfPB(ndims,npop,lb,ub,maxFEs,fobj,initPos,initFit)
%%%*********************************************************************************************%%%
%% Implementation of Surrogate-assisted Hierarchical Particle Swarm Optimization (SHPSO)
%% H. Yu, Y. Tan, J. Zeng, C. Sun, Y. Jin, Surrogate-assisted hierarchical 
%% particle swarm optimization, Information Sciences, 454-455 (2018) 59-72.
%%%*********************************************************************************************%%%
%% Adapted by Pedro Bento November 2023
% 
%% Main process of SHPSO
% 
% rand('seed',sum(100*clock));

time_begin=tic;
% Initialize internal parameters
VRmin=repmat(lb,1,ndims);
VRmax=repmat(ub,1,ndims);

[initFit,idx]=sort(transpose(initFit),'ascend');                                 
initPos=initPos(idx,:);

% In the original code the initial position and fitness was the sorted best (npop) subset of hx/hf, 
% but now since the initial position and fitness are not, i.e., are given by the input argument 
% independently (common for all the tested algorithms), so we need to
% assemble the hx(hisx) and hf(hif) manually!
if ndims < 100
    initial_sample_size=100;     % < 100 dimension
elseif ndims >= 100
    initial_sample_size=200;      % = 100 dimension
end
fitcount=initial_sample_size; %counter starts with the initial number of samples (were already evaluated by the real fobj)

chx=lb+(ub-lb).*lhsdesign(initial_sample_size-npop,ndims);      % initial LHS samples
chf=zeros(1,initial_sample_size-npop);

for i=1:size(chx,1)
    chf(i)=fobj(chx(i,:));
end
hx=[chx; initPos];
hf=[chf, initFit];

CE=zeros(maxFEs,2);             
CE(1:initial_sample_size,1)=1:1:initial_sample_size;
CE(1:initial_sample_size,2)=min(hf).*ones(initial_sample_size,1);

ps=npop;                  %----种群规模
D=ndims;                         %---维数

cc=[2.05 2.05];                      % acceleration constants  
iwt=0.7298;                          % 固定惯性权重
mv=0.5*(VRmax-VRmin);
VRmin=repmat(VRmin,ps,1);
VRmax=repmat(VRmax,ps,1);            %---位置上界,防溢出
Vmin=repmat(-mv,ps,1);
Vmax=-Vmin;                          %---速度上界
vel=Vmin+2.*Vmax.*rand(ps,D);        % initialize the velocity of the particles

pos=initPos;e=initFit;                       % initialize position and fitness
pbest=pos;  pbestval=e;              % initialize the pbest and the pbest's fitness value

[gbestval,gbestid]=min(pbestval);
gbest=pbest(gbestid,:);              % initialize the gbest and the gbest's fitness value
gbestrep=repmat(gbest,ps,1);

if D < 100
    gs=100;                          % < 100 dimension    建模训练样本数
elseif D == 100
    gs=150;                          % = 100 dimension
end

besty=1e200;
bestp=zeros(1,D);
formula1=0; formula2=0;              % 统计formula1和formula2的使用次数

gen = 0;

% Main loop
while fitcount < maxFEs
    % Sample training samples
    [ghf,id]=sort(hf);               % 使用历史数据库中最优的若干个体构造全局代理模型
    ghf=ghf(1:gs);     ghx=hx(id(1:gs),:);

    %% R B F network
    ghxd=real(sqrt(ghx.^2*ones(size(ghx'))+ones(size(ghx))*(ghx').^2-2*ghx*(ghx')));
    % build global surrogate model
    spr=max(max(ghxd))/(D*gs)^(1/D);
    net=newrbe(ghx',ghf,spr);        % newrbe
    FUN=@(x) sim(net,x');           
    
    % Record the old best position and fitness
    besty_old=besty;
    bestp_old=bestp;
    % Find a near optimum of surrogate model by SLPSO
    maxgen=50*D; minerror=1e-6;
    [bestp,~] = SLPSO(D,maxgen,FUN,minerror,ghx);    % 利用SLPSO寻找全局模型的最优解
    % Exact evaluate the model optimum
    
    besty=fobj(bestp); %besty=feval(fname,bestp); 
    fitcount = fitcount + 1;
    if fitcount <= maxFEs 
        if besty<CE(fitcount-1,2) %when there's improvement it's accepted in the convcurve
            CE(fitcount,:)=[fitcount,besty];
        else
            CE(fitcount,:)=[fitcount,CE(fitcount-1,2)];
        end
    end
% 
    % Record the new best position and fitness of surrogate
    besty_new=besty;
    bestp_new=bestp;
    % Update model optimum
    if besty_new < besty_old      
        besty=besty_new;    
        bestp=bestp_new;
        bestprep=repmat(bestp_new,ps,1);
    else
        besty=besty_old;
        bestp=bestp_old;
        bestprep=repmat(bestp_old,ps,1);
    end    
%         
    [~,ih,~]=intersect(hx,bestp,'rows');
    if isempty(ih)==1
        hx=[hx;bestp];  hf=[hf,besty];              % update history database 
    end
%     
    % Update the RBF model                                        
    if ghf(end) > besty                             % 选取最优样本集建立全局模型
        [ghf,id]=sort(hf);                          % 使用历史数据库中最优的若干个体构造全局代理模型
        ghf=ghf(1:gs); ghx=hx(id(1:gs),:);
        %% R B F network
%         ghxd=real(sqrt(ghx.^2*ones(size(ghx'))+ones(size(ghx))*(ghx').^2-2*ghx*(ghx')));  
        spr=max(max(ghxd))/(D*gs)^(1/D);
        net=newrbe(ghx',ghf,spr);       % newrbe
        FUN=@(x) sim(net,x');   

    end    

    if besty < gbestval                             % 代理模型的近似最优小于当前代PSO种群的gbest,使用besty引导粒子向模型最优位置飞行
        [~,ip,~]=intersect(pbest,gbest,'rows');
        pbest(ip,:) =bestp;
        pbestval(ip)=besty;                         % 更新gbest对应的pbest
        gbestrep=bestprep;
        aa=cc(1).*rand(ps,D).*(pbest-pos)+cc(2).*rand(ps,D).*(gbestrep-pos);
        formula1=formula1+1;
    else
        aa=cc(1).*rand(ps,D).*(pbest-pos)+cc(2).*rand(ps,D).*(gbestrep-pos);
        formula2=formula2+1;
    end
    
    vel=iwt.*(vel+aa);                              % 带收缩因子的PSO算法
    vel=(vel>Vmax).*Vmax+(vel<=Vmax).*vel;
    vel=(vel<Vmin).*Vmin+(vel>=Vmin).*vel;
    pos=pos+vel;
    pos=((pos>=VRmin)&(pos<=VRmax)).*pos...
        +(pos<VRmin).*(VRmin+0.25.*(VRmax-VRmin).*rand(ps,D))+(pos>VRmax).*(VRmax-0.25.*(VRmax-VRmin).*rand(ps,D));
    
    %% Fitness estimation of new population
    e=FUN(pos);
    
    % Prescreening candidate solutions for exact evaluation
    candidx=find(e < pbestval);
    pos_trmem=pos(e < pbestval, :);                 % e-pbest strategy    
    [~,ih,ip]=intersect(hx,pos_trmem,'rows');
    if ~isempty(ip)==1        
        pos_trmem(ip,:)=[];
        e(candidx(ip))=hf(ih);
        candidx(ip)=[];
    end
    % Exact evaluate the prescreened candidates
    ssk=size(pos_trmem,1);
    e_trmem=zeros(1,ssk);        
    for k=1:ssk
        
        e_trmem(k)=fobj(pos_trmem(k,:)); % e_trmem(k)=feval(fname,pos_trmem(k,:));
        fitcount = fitcount + 1;
        if fitcount <= maxFEs 
            if e_trmem(k)<CE(fitcount-1,2) %when there's improvement it's accepted in the convcurve
                CE(fitcount,:)=[fitcount,e_trmem(k)];
            else
                CE(fitcount,:)=[fitcount,CE(fitcount-1,2)];
            end
        end
        
        
        hx=[hx;pos_trmem(k,:)];hf=[hf,e_trmem(k)];        % update history database 
        kp=candidx(k);
        if e_trmem(k)<pbestval(kp)                        % update pbest
            pbest(kp,:)=pos_trmem(k,:);
            pbestval(kp)=e_trmem(k); 
        end
    end  
    
    % Update gbest
    [gbestval,tmp]=min(pbestval); 
    gbest=pbest(tmp,:);
    gbestrep=repmat(gbest,ps,1);                          % update the gbest    
    bestfit=min([gbestval,besty]);
    gen = gen + 1;
       
end
time_cost=toc(time_begin);
xGbest=gbest;
Gbest=bestfit;
conv_curve=CE(:,2);

end
%% Secondary Functions: SHPSO
% -----------------------------------------------------------------
%% ================ SL-PSO ======================================%%%%
%%  R. Cheng, Y. Jin, A social learning particle swarm optimization 
%%  algorithm for scalable optimization, Information Science. 291 (2015) 43–60
%%%==============================================================%%%%
%% NOTE: This is not the original code of SL-PSO
%%%**************************************************************%%%%
%% This matlab code was modified by Haibo Yu
%% Please refer with all questions, comments, bug reports, etc. to tyustyuhaibo@126.com
% %
function [best_pos,bestever] = SLPSO(d, maxgen,gmd,minerror,ghx)
%     disp('SLPSO global search');
    % e.g., d=20; maxfe=1000;
    % d: dimensionality
    % maxfe: maximal number of fitness evaluations
    n = d;
    %parameter setting
    %parameter initiliaztion
    M = 100; beta=0.01;
    m = M + fix(d/10);
    c3 = d/M*beta;
    PL = zeros(m,1);

    for i = 1 : m
        PL(i) = (1 - (i - 1)/m)^log(sqrt(ceil(d/M)));
    end

    %initialization
    p = zeros(m, d); 
    lu = [min(ghx); max(ghx)];

    XRRmin = repmat(lu(1, :), m, 1);
    XRRmax = repmat(lu(2, :), m, 1);
    % rand('seed', sum(100 * clock));     % (2017.5.8)
    p = XRRmin + (XRRmax - XRRmin) .* lhsdesign(m, d);
    v = zeros(m,d);
    bestever = 1e200;
    best_pos  = zeros(1,d);

    FES = 0;
    gen = 0;
    flag_er=0;

    tic;
    %main loop
    % while(FES < maxfe)
    while(gen < maxgen)  % 满足误差精度
        best_old=bestever;      % 上一代最优适应值
        bestpos_old=best_pos;   % 上一代最优适应值对应的位置
        %rand('state', sum(100 * clock));
        FES = FES + m;
        %[fitness bestp besty rank] = update(p, funcid);
        fitness = gmd(p);   % surrogate model: RBF network, Guassian process
    %     fprintf('Best fitness: %e\n', bestever);

        %population sorting
        [fitness rank] = sort(fitness, 'descend'); % 按降序进行排列
        p = p(rank,:);
        v = v(rank,:);
        besty = fitness(m);
        bestp = p(m, :);
        [bestever,id] = min([besty, bestever]);
        best_new = bestever;    % 当前代的最优适应值
        if id == 1
            best_pos = bestp;       % 更新后的最优位置
            bestpos_new=best_pos;   % 当前代最优位置
        elseif id == 2
            best_pos = bestpos_old;
            bestpos_new=best_pos;
        end

        error=abs(best_old-best_new);
        if error <= minerror
            flag_er=flag_er+1;
        else
            flag_er=0;
        end
        if flag_er >=20
            break;
        end

        %center position
        center = ones(m,1)*mean(p);
        %random matrix
        %rand('seed', sum(100 * clock));
        randco1 = rand(m, d);
        %rand('seed', sum(100 * clock));
        randco2 = rand(m, d);
        %rand('seed', sum(100 * clock));
        randco3 = rand(m, d);
        winidxmask = repmat([1:m]', [1 d]);
        winidx = winidxmask + ceil(rand(m, d).*(m - winidxmask));
        %winidx = m - floor(0.5*rand(m, d).*(m - winidxmask));
        pwin = p;
        for j = 1:d
            pwin(:,j) = p(winidx(:,j),j);
        end
        %learning
         lpmask = repmat(rand(m,1) < PL, [1 d]);
         lpmask(m,:) = 0;
         v1 =  1*(randco1.*v + randco2.*(pwin - p) + c3*randco3.*(center - p));
         p1 =  p + v1; 
         v = lpmask.*v1 + (~lpmask).*v;         
         p = lpmask.*p1 + (~lpmask).*p;
         %boundary
        for i = 1:m
            p(i,:) = max(p(i,:), lu(1,:));
            p(i,:) = min(p(i,:), lu(2,:));
        end
        gen = gen + 1;
    end;
end