% max wala sir proposed clc; clear all; % Load data from an Excel file (replace 'your_data.xlsx' with the actual file name) try data = xlsread('A-Full_s12.xlsx', "Angles arranged"); catch error('Failed to load data from the Excel file.'); end % Select features and preprocess data input_data = data(:, [2, 5, 7]); % Assuming columns 2, 5, and 7 correspond to frequency, magnitude, and angles, respectively % Compute baseline model for moisture MoistureBaseline = SingBaseline(input_data); % Standardize output data mu = mean(MoistureBaseline); sig = std(MoistureBaseline); outputdataStandardized = (MoistureBaseline - mu) / sig; response_data = outputdataStandardized; % Display information about the data disp('Data loaded and preprocessed successfully.'); %% Hybrid Grey Wolf Algorithm for input combination selection % Define your objective function objective_function = @(samples) objectiveFunction(samples, input_data, response_data, outputdataStandardized); % Set the parameters for GWO num_samples = size(input_data, 1); lb = zeros(1, num_samples); ub = ones(1, num_samples); num_wolves = 20; max_iterations = 100; % Run hybrid GWO [selected_samples, rmse] = hybrid_gwo_abc_bat(objective_function, num_samples, lb, ub, num_wolves, max_iterations); % Extract selected input combinations selected_indices = find(selected_samples > 0); selected_combinations = input_data(selected_indices, :); selected_responses = response_data(selected_indices); disp(['RMSE: ', num2str(rmse)]); %% GRU % Data splitting train_ratio = 0.9; num_train = round(train_ratio * length(selected_combinations)); indices = randperm(length(selected_combinations)); train_indices = indices(1:num_train); test_indices = indices(num_train+1:end); input_train = selected_combinations(train_indices,:); output_train = selected_responses(train_indices,:); input_test = selected_combinations(test_indices,:); output_test = selected_responses(test_indices,:); numChannels = 3; numResponses = 1; numHiddenUnits = 2000; Iterations = 100; GRUlayers = [ sequenceInputLayer(numChannels) gruLayer(numHiddenUnits) fullyConnectedLayer(numResponses) regressionLayer ]; LSTMlayers = [ sequenceInputLayer(numChannels) lstmLayer(numHiddenUnits) fullyConnectedLayer(numResponses) regressionLayer ]; BILSTMlayers = [ sequenceInputLayer(numChannels) bilstmLayer(numHiddenUnits) fullyConnectedLayer(numResponses) regressionLayer ]; options = trainingOptions('adam', ... 'MaxEpochs',Iterations, ... 'GradientThreshold',1, ... 'InitialLearnRate',0.001, ... 'LearnRateSchedule','piecewise', ... 'LearnRateDropPeriod',50, ... 'LearnRateDropFactor',0.5, ... 'ExecutionEnvironment', 'gpu', ... 'Verbose',0,'Plots','training-progress'); % Train the GRU Network GRUnet = trainNetwork(selected_combinations', selected_responses', GRUlayers, options); % Train the LSTM Network LSTMnet = trainNetwork(selected_combinations', selected_responses', LSTMlayers, options); % Train the BILSTM Network BILSTMnet = trainNetwork(selected_combinations', selected_responses', BILSTMlayers, options); % Use the trained network for prediction on test data GRUpredictionsWFS = predict(GRUnet, selected_combinations'); LSTMpredictionsWFS = predict(LSTMnet, selected_combinations'); BILSTMpredictionsWFS = predict(BILSTMnet, selected_combinations'); % Compute performance metrics GRU GRUWFSrmse = sqrt(mean((GRUpredictionsWFS - selected_responses').^2)); GRUWFSmae = mean(abs(GRUpredictionsWFS - selected_responses')); correlation_coefficient = corrcoef(GRUpredictionsWFS, selected_responses'); GRUWFSr_squared = correlation_coefficient(1, 2)^2; % Compute performance metrics LSTM LSTMWFSrmse = sqrt(mean((LSTMpredictionsWFS - selected_responses').^2)); LSTMWFSmae = mean(abs(LSTMpredictionsWFS - selected_responses')); correlation_coefficient = corrcoef(LSTMpredictionsWFS, selected_responses'); LSTMWFSr_squared = correlation_coefficient(1, 2)^2; % Compute performance metrics BILSTM BILSTMWFSrmse = sqrt(mean((BILSTMpredictionsWFS - selected_responses').^2)); BILSTMWFSmae = mean(abs(BILSTMpredictionsWFS - selected_responses')); correlation_coefficient = corrcoef(BILSTMpredictionsWFS, selected_responses'); BILSTMWFSr_squared = correlation_coefficient(1, 2)^2; % Display the results GRU disp('GRU statistics'); disp(['Mean Absolute Error (MAE) with GRU and feature selection: ', num2str(GRUWFSmae)]); disp(['Root Mean Squared Error (RMSE) with GRU and feature selection: ', num2str(GRUWFSrmse)]); disp(['R-squared (R^2) with GRU and feature selection: ', num2str(GRUWFSr_squared)]); % Display the results LSTM disp('LSTM statistics'); disp(['Mean Absolute Error (MAE) with LSTM and feature selection: ', num2str(LSTMWFSmae)]); disp(['Root Mean Squared Error (RMSE) with LSTM and feature selection: ', num2str(LSTMWFSrmse)]); disp(['R-squared (R^2) with LSTM and feature selection: ', num2str(LSTMWFSr_squared)]); % Display the results BILSTM disp('BILSTM statistics'); disp(['Mean Absolute Error (MAE) with BILSTM and feature selection: ', num2str(BILSTMWFSmae)]); disp(['Root Mean Squared Error (RMSE) with BILSTM and feature selection: ', num2str(BILSTMWFSrmse)]); disp(['R-squared (R^2) with BILSTM and feature selection: ', num2str(BILSTMWFSr_squared)]); %% Define CNN Architecture and Train % Reshape data for CNN input_data_reshaped = reshape(selected_combinations', 1, 1, size(selected_combinations, 2), size(selected_combinations, 1)); response_data_reshaped = selected_responses'; % Make sure this is a row vector % Split the data into training and testing sets train_ratio = 0.9; num_train = round(train_ratio * size(input_data_reshaped, 4)); indices = randperm(size(input_data_reshaped, 4)); train_indices = indices(1:num_train); test_indices = indices(num_train+1:end); input_train = input_data_reshaped(:,:,:,train_indices); output_train = response_data_reshaped(train_indices)'; input_test = input_data_reshaped(:,:,:,test_indices); output_test = response_data_reshaped(test_indices)'; % Ensure output_train and output_test are column vectors output_train = output_train(:); output_test = output_test(:); % Define advanced CNN architecture CNNlayers = [ imageInputLayer([1 1 size(selected_combinations, 2)]) convolution2dLayer([1 1], 64, 'Padding', 'same') batchNormalizationLayer reluLayer convolution2dLayer([1 1], 128, 'Padding', 'same') batchNormalizationLayer reluLayer maxPooling2dLayer([1 1], 'Stride', [1 1]) convolution2dLayer([1 1], 256, 'Padding', 'same') batchNormalizationLayer reluLayer dropoutLayer(0.5) fullyConnectedLayer(128) reluLayer dropoutLayer(0.5) fullyConnectedLayer(1) regressionLayer]; % Training options with custom learning rate schedule CNNoptions = trainingOptions('adam', ... 'MaxEpochs', Iterations, ... 'InitialLearnRate', 0.001, ... 'LearnRateSchedule', 'piecewise', ... 'LearnRateDropPeriod', 50, ... 'LearnRateDropFactor', 0.5, ... 'Shuffle', 'every-epoch', ... 'ValidationData', {input_test, output_test}, ... 'ValidationFrequency', 30, ... 'Verbose', 0, ... 'Plots', 'training-progress'); % Train the advanced CNN CNNnet = trainNetwork(input_train, output_train, CNNlayers, CNNoptions); % Use the trained network for prediction on test data CNNpredictions = predict(CNNnet, input_test); % RMSE (Root Mean Squared Error) CNNrmse = sqrt(mse(CNNpredictions, output_test)); % MAE (Mean Absolute Error) CNNmae = mean(abs(CNNpredictions - output_test)); % R-squared (R^2) correlation_coefficient = corrcoef(CNNpredictions, output_test); CNNr_squared = correlation_coefficient(1, 2)^2; % Display the results disp(['Mean Absolute Error (MAE) with CNN and feature selection: ', num2str(CNNmae)]); disp(['Root Mean Squared Error (RMSE) with CNN and feature selection: ', num2str(CNNrmse)]); disp(['R-squared (R^2) with CNN and feature selection: ', num2str(CNNr_squared)]); %% Supporting functions % Define the objective function for the hybrid GWO function rmse = objectiveFunction(samples, input_data, response_data, output_data) % Extract selected input combinations based on the sample vector selected_indices = find(samples > 0); % Indices of selected samples if isempty(selected_indices) rmse = Inf; % Return a high RMSE if no samples are selected return; end selected_combinations = input_data(selected_indices, :); % Select samples with non-zero coefficients selected_responses = response_data(selected_indices); % Corresponding response data % Train your model using selected_combinations and selected_responses model = trainModel(selected_combinations, selected_responses); % Make predictions using the trained model y_pred = predictModel(model, selected_combinations); % Calculate RMSE (Root Mean Squared Error) rmse = sqrt(mean((selected_responses - y_pred).^2)); end function y_pred = predictModel(model, X_train) % Make predictions using the trained linear regression model y_pred = predict(model, X_train); % Predict using the trained model % Optionally, you can add post-processing steps here % For feature selection, predictions are made on the training data itself end function model = trainModel(X_train, y_train) % Train linear regression model model = fitlm(X_train, y_train); % Train linear regression model % You can also use other regression methods like: % model = fitrlinear(X_train, y_train); % Linear regression with regularization % model = fitrsvm(X_train, y_train); % Support Vector Regression % Choose the appropriate method based on your requirements and dataset % You can also add more preprocessing steps or hyperparameter tuning here % Optionally, you can visualize the trained model plot(model); end function [selected_samples, rmse] = hybrid_gwo_abc_bat(objective_function, num_samples, lb, ub, num_wolves, max_iterations) % Initialize the positions of the grey wolves randomly wolves = randi([0, 1], num_wolves, num_samples); % Evaluate the objective function for each wolf fitness_values = zeros(num_wolves, 1); for i = 1:num_wolves fitness_values(i) = objective_function(wolves(i, :)); end % Sort wolves by fitness and identify alpha, beta, and delta wolves [sorted_fitness, sorted_indices] = sort(fitness_values); alpha_wolf = wolves(sorted_indices(1), :); beta_wolf = wolves(sorted_indices(2), :); delta_wolf = wolves(sorted_indices(3), :); % Initialize velocities for BAT algorithm velocities = zeros(num_wolves, num_samples); % GWO main loop iteration = 1; while iteration <= max_iterations % Coefficients for encircling prey a = 2 - iteration * (2 / max_iterations); % Linearly decreases from 2 to 0 for i = 1:num_wolves % Calculate new position for each wolf for j = 1:num_samples % GWO component for X1 r1 = rand(); r2 = rand(); A1 = 2 * a * r1 - a; C1 = 2 * r2; D_alpha = abs(C1 * alpha_wolf(j) - wolves(i, j)); X1 = alpha_wolf(j) - A1 * D_alpha; % ABC component for X2 k = randi([1, num_wolves]); while k == i k = randi([1, num_wolves]); end phi = rand() * 2 - 1; % Random number in [-1, 1] X2 = wolves(i, j) + phi * (wolves(i, j) - beta_wolf(j)); % BAT component for X3 beta = rand(); % Update velocity velocities(i, j) = velocities(i, j) + (delta_wolf(j) - wolves(i, j)) * beta; % Optionally limit the velocity to prevent excessive changes % max_velocity is a parameter you define based on the problem's scale max_velocity = 1; % Example value velocities(i, j) = max(-max_velocity, min(velocities(i, j), max_velocity)); % Update position X3 = wolves(i, j) + velocities(i, j); % mutation_prob=0.01; % mutation_strength=0.01; % actual code:Combine the positions Position1 = (X1 + X2 +X3)/3; % GWO beta, bee delta, bat alpha r1 = rand(); r2 = rand(); A1 = 2 * a * r1 - a; C1 = 2 * r2; D_beta = abs(C1 * beta_wolf(j) - wolves(i, j)); X4 = beta_wolf(j) - A1 * D_beta; % ABC component for X2 k = randi([1, num_wolves]); while k == i k = randi([1, num_wolves]); end phi = rand() * 2 - 1; % Random number in [-1, 1] X5 = wolves(i, j) + phi * (wolves(i, j) - delta_wolf(j)); % BAT component for X3 beta = rand(); % Update velocity velocities(i, j) = velocities(i, j) + (alpha_wolf(j) - wolves(i, j)) * beta; % Optionally limit the velocity to prevent excessive changes % max_velocity is a parameter you define based on the problem's scale max_velocity = 1; % Example value velocities(i, j) = max(-max_velocity, min(velocities(i, j), max_velocity)); % Update position X6 = wolves(i, j) + velocities(i, j); % Actual code: Combine the positions Position2 = (X4 + X5 +X6)/3; % GWO delta, bee alpha, bat beta r1 = rand(); r2 = rand(); A1 = 2 * a * r1 - a; C1 = 2 * r2; D_delta = abs(C1 * delta_wolf(j) - wolves(i, j)); X7 = delta_wolf(j) - A1 * D_delta; % ABC component for X2 k = randi([1, num_wolves]); while k == i k = randi([1, num_wolves]); end phi = rand() * 2 - 1; % Random number in [-1, 1] X8 = wolves(i, j) + phi * (wolves(i, j) - alpha_wolf(j)); % BAT component for X3 beta = rand(); % Update velocity velocities(i, j) = velocities(i, j) + (beta_wolf(j) - wolves(i, j)) * beta; % Optionally limit the velocity to prevent excessive changes % max_velocity is a parameter you define based on the problem's scale max_velocity = 1; % Example value velocities(i, j) = max(-max_velocity, min(velocities(i, j), max_velocity)); % Update position X9 = wolves(i, j) + velocities(i, j); % Actual code:Combine the positions Position3 = (X7 + X8 +X9)/3; Positions=min([Position1, Position2, Position3]); wolves(i,j)= Positions; % mutation_prob=0.01; % mutation_strength=0.01; % % Mutation to enhance diversity % if rand() < mutation_prob % wolves(i, j) = wolves(i, j) + mutation_strength * (rand() - 0.5); % Apply mutation % wolves(i, j)= max(min(wolves(i, j), ub(j)), lb(j)); % Boundary check for feature j % end % Bound the new solution within the search space wolves(i, j) = max(lb(j), min(wolves(i, j), ub(j))); end % % Binary discretization (Thresholding at 0.5) wolves(i, :) = wolves(i, :) > 0.5; % Evaluate the fitness of the new solution fitness_values(i) = objective_function(wolves(i, :)); end % Update alpha, beta, and delta wolves [sorted_fitness, sorted_indices] = sort(fitness_values); alpha_wolf = wolves(sorted_indices(1), :); beta_wolf = wolves(sorted_indices(2), :); delta_wolf = wolves(sorted_indices(3), :); % Update iteration counter iteration = iteration + 1; end % Return the best solution found by the hybrid algorithm selected_samples = alpha_wolf; rmse = sorted_fitness(1); end