function [best_params, best_loss] = optimize_LSTM_with_TAVO_IMF(pop_size, bounds, max_iter, train_data, val_data) % Decompose the training and validation data into IMFs using REMD [imfs_train, ~] = remd(train_data); [imfs_val, ~] = remd(val_data); num_imfs = size(imfs_train, 2); % Initialize population population = initialize_population(pop_size, bounds); fitness = evaluate_population(population, imfs_train, imfs_val, num_imfs); % Best solution initialization [best_loss, best_idx] = min(fitness); best_params = population(best_idx, :); % Initialize chaotic map chaotic_map = rand(pop_size, 1); % Main loop for t = 1:max_iter weight = time_varying_weight(t, max_iter); for i = 1:pop_size % Update using Tent Chaotic Map chaotic_map(i) = tent_map(chaotic_map(i)); % Generate new position r1 = rand(); r2 = rand(); new_position = population(i, :) + r1 * weight * (best_params - chaotic_map(i) * population(i, :)) + ... r2 * (chaotic_map(i) * (bounds(:, 2) - bounds(:, 1))' + bounds(:, 1)'); % Apply boundary constraints new_position = max(min(new_position, bounds(:, 2)'), bounds(:, 1)'); % Evaluate new position new_loss = fitness_function(new_position, imfs_train, imfs_val, num_imfs); % Update if the new position is better if new_loss < fitness(i) population(i, :) = new_position; fitness(i) = new_loss; % Update best solution if new_loss < best_loss best_params = new_position; best_loss = new_loss; end end end end end function population = initialize_population(pop_size, bounds) population = bounds(:, 1)' + (bounds(:, 2)' - bounds(:, 1)') .* rand(pop_size, size(bounds, 1)); end function fitness = evaluate_population(population, imfs_train, imfs_val, num_imfs) pop_size = size(population, 1); fitness = zeros(pop_size, 1); for i = 1:pop_size fitness(i) = fitness_function(population(i, :), imfs_train, imfs_val, num_imfs); end end function loss = fitness_function(params, imfs_train, imfs_val, num_imfs) num_units = round(params(1)); learning_rate = params(2); batch_size = round(params(3)); % Train an LSTM model for each IMF and get the total loss total_loss = 0; for k = 1:num_imfs model = train_LSTM_model(num_units, learning_rate, batch_size, imfs_train(:, k)); predictions = predict(model, imfs_val(:, k)); total_loss = total_loss + mean((predictions - imfs_val(:, k)).^2); end loss = total_loss / num_imfs; end function model = train_LSTM_model(num_units, learning_rate, batch_size, data) % Prepare data for LSTM X = data(:, 1:end-1)'; Y = data(:, 2:end)'; % Create LSTM network layers = [ ... sequenceInputLayer(1) lstmLayer(num_units, 'OutputMode', 'sequence') fullyConnectedLayer(1) regressionLayer]; % Specify training options options = trainingOptions('adam', ... 'InitialLearnRate', learning_rate, ... 'MaxEpochs', 150, ... 'MiniBatchSize', batch_size, ... 'Shuffle', 'every-epoch', ... 'Plots', 'none', ... 'Verbose', false); % Train the LSTM model model = trainNetwork(X, Y, layers, options); end function y = tent_map(x, a) if nargin < 2 a = 0.7; end if x < 0.5 y = a * x; else y = a * (1 - x); end end function weight = time_varying_weight(t, max_iter, initial_weight, final_weight) if nargin < 3 initial_weight = 0.9; end if nargin < 4 final_weight = 0.4; end weight = initial_weight - (initial_weight - final_weight) * (t / max_iter); end % Parameters for optimization pop_size = 30; bounds = [50, 200; % num_units: from 50 to 200 0.0001, 0.01; % learning_rate: from 0.0001 to 0.01 16, 128]; % batch_size: from 16 to 128 max_iter = 100; % Load your training and validation data % train_data = ... % val_data = ... % Run the TAVOA to optimize LSTM hyperparameters for IMFs [best_params, best_loss] = optimize_LSTM_with_TAVO_IMF(pop_size, bounds, max_iter, train_data, val_data); disp('Best Parameters:'); disp(best_params); disp('Best Loss:'); disp(best_loss);