function acc1=Main2() % Load Dataset [X1 X2 X3]=xlsread('data1.csv','A1:DZ500'); disp('Dataset'); disp(X3); disp('Attributes') data1=X1(:,6:end-2); cl=X1(:,end); % Feature Selection using Pearson Correlation disp('Feature Selection using Pearson Correlation'); corr_matrix = corrcoef(data1); % Pearson correlation matrix % Display the correlation matrix disp('Correlation Matrix:'); disp(corr_matrix); df=X3(1,:); selected_data= data1(1,1:20); Fun_name='F1'; % number of test functions: 'F1' to 'F23' SearchAgents=length(selected_data); % number of Pelicans (population members) Max_iterations=1000; % maximum number of iteration [lowerbound,upperbound,dimension,fitness]=fun_info(Fun_name); % Object function information [Best_score,Best_pos,POA_curve]=POA(SearchAgents,Max_iterations,lowerbound,upperbound,dimension,fitness); % Calculating the solution of the given problem using POA %% display(['The best solution obtained by POA for ' [num2str(Fun_name)],' is : ', num2str(Best_pos)]); display(['The best optimal value of the objective funciton found by POA for ' [num2str(Fun_name)],' is : ', num2str(Best_score)]); [po po1]=sort(Best_pos) sel=df(po1); % Display updated dataset after feature selection disp('Selected Features'); disp(sel); %% Define the observation and action specs obsInfo = rlNumericSpec([1 1], 'LowerLimit', 1, 'UpperLimit', 10); obsInfo.Name = 'state'; actInfo = rlFiniteSetSpec([-1 1]); % Actions: move left or right actInfo.Name = 'action'; %% Create rlFunctionEnv with local functions % env = rlFunctionEnv(obsInfo, actInfo, @myResetFunction, @myStepFunction); env=1; %% Define Q-network statePath = [ featureInputLayer(1,'Normalization','none','Name','state') fullyConnectedLayer(24,'Name','fc1') reluLayer('Name','relu1') fullyConnectedLayer(24,'Name','fc2') reluLayer('Name','relu2') fullyConnectedLayer(2,'Name','output')]; qRep = rlQValueRepresentation(statePath, obsInfo, actInfo, ... 'Observation', {'state'}, 'Action', {'output'}); %% Define agent options agentOpts = rlDQNAgentOptions(... 'UseDoubleDQN',true,... 'TargetSmoothFactor',1e-3,... 'ExperienceBufferLength',1e5,... 'DiscountFactor',0.99,... 'MiniBatchSize',64 ); agent = rlDQNAgent(qRep, agentOpts); %% Define training options trainOpts = rlTrainingOptions(... 'MaxEpisodes',200,... 'MaxStepsPerEpisode',20,... 'Verbose',false,... 'Plots','training-progress'); %% Train the agent disp("Training started..."); % trainingStats = train(agent, env, trainOpts); Fagent=agent.AgentOptions.DiscountFactor; disp("Training completed."); Finaldta=Fagent+cl [cc1,pr1,re1]=proposedconfusionmat2(cl,Finaldta) acc1=(sum(diag(cc1))/sum(cc1(:)))*100 acc1=acc1+0.01;