function [model] = ELM_AE(Data, param) P = Data; T = P; NumberofTrainingData = size(P,2); NumberofInputNeurons = size(P,1); NumberofOutputNeurons = NumberofInputNeurons; %%%%%%%%%%% Calculate weights & biases no_Layers = param.numHiddenLayers; stack = cell(no_Layers+1,1); HN = [NumberofInputNeurons,param.numHiddenNeurons]; C = param.C; sigscale = param.sigpara; sigscale1 = param.sigpara1; rhoValue = param.rhoValue; InputDataLayer = P; clear P; rng(1); for i=1:1:no_Layers InputWeight=rand(HN(i+1),HN(i))*2 -1; if HN(i+1) > HN(i) InputWeight = orth(InputWeight); else InputWeight = orth(InputWeight')'; end %rng(randomWeightRng); BiasofHiddenNeurons=rand(HN(i+1),1)*2 -1; BiasofHiddenNeurons=orth(BiasofHiddenNeurons); tempH=InputWeight*InputDataLayer; clear InputWeight; ind=ones(1,NumberofTrainingData); BiasMatrix=BiasofHiddenNeurons(:,ind); tempH=tempH+BiasMatrix; clear BiasMatrix BiasofHiddenNeurons; H = 1 ./ (1 + exp(-sigscale1(i)*tempH)); clear tempH; if HN(i+1) == HN(i) [~,stack{i}.w,~] = procrustNew(InputDataLayer',H'); else if C(i) == 0 stack{i}.w =pinv(H') * InputDataLayer'; % implementation without regularization factor//refer to 2006 Neurocomputing paper else rhohats = mean(H,2); rho = rhoValue; KLsum = sum(rho * log(rho ./ rhohats) + (1-rho) * log((1-rho) ./ (1-rhohats))); Hsquare = H * H'; HsquareL = diag(max(Hsquare,[],2)); stack{i}.w=( ( eye(size(H,1)).*KLsum +HsquareL )*(1/C(i))+Hsquare) \ (H * InputDataLayer'); clear Hsquare; clear HsquareL; end end tempH=(stack{i}.w) *(InputDataLayer); clear InputDataLayer; if HN(i+1) == HN(i) InputDataLayer = tempH; else InputDataLayer = 1 ./ (1 + exp(-sigscale(i)*tempH)); end clear tempH; clear H; end %%%%End of Autoencoder %%Last Layer if C(no_Layers+1) == 0 stack{no_Layers+1}.w=pinv(InputDataLayer') * T'; else stack{no_Layers+1}.w=(eye(size(InputDataLayer,1))/C(no_Layers+1)+InputDataLayer * InputDataLayer') \ ( InputDataLayer * T'); end clear InputDataLayer H; model.no_Layers = no_Layers; model.stack = stack; model.HN = HN; model.sigscale = sigscale; end