install.packages('keras') library(keras) install.packages('ggplot2') library(ggplot2) install.packages('pracma') library(pracma) remove(list=ls()) #======================================================================== # Setting #======================================================================== # Working directory setwd("C:/Users/takumainai/OneDrive - 国立研究開発法人産業技術総合研究所/2023_0512_imac_analysis/Experiment") #======================================================================== # Read data #======================================================================== # Input and output Mat_Input = read.csv('Mat_Type100_Input.csv',header=F) #Mat_Input_Option = read.csv('Mat_Type100_Input_Option.csv',header=F) Mat_Output = read.csv('Mat_Type100_Output.csv',header=F) # k-fold validation k_SubjectNumber = list() k_DataNumber = list() for (i in 1:5){ k_SubjectNumber[[i]] = read.csv(paste('k',i,'_SubjectNumber.csv',sep=''),header=F) k_DataNumber[[i]] = read.csv(paste('k',i,'_Type100_DataNumber.csv',sep=''),header=F) } #======================================================================== # Function #======================================================================== f_rep = function(Mat,n_row,n_col){ Mat_new = matrix(1, nrow=n_row, ncol=n_col) %x% Mat return(Mat_new) } #------------------------------------------------------------------------ f_get_PCS_Scaled = function(Data_model,N=NULL,Data_val=NULL){ # Data # Not scaled Data_NotScaled = as.matrix(Data_model) # n x v Data_NotScaled_Mean = apply(Data_NotScaled,2,mean) # 1 x v Data_NotScaled_SD = apply(Data_NotScaled,2,sd) # 1 x v # Scaled Data_Scaled = apply(Data_NotScaled,2,scale) # n x v # Result for PCA Res = prcomp(Data_NotScaled,scale=T) # Addition sqrt_EigenValue = Res$sdev # 固有値の平方根, n EigenValue = sqrt_EigenValue^2 # 固有値, n NumOfPC = sum(EigenValue>=1) # 抽出された主成分の数, Scalar if (!is.null(N)) { NumOfPC = N } Rotation = Res$rotation[,1:NumOfPC] # 抽出された各主成分軸の固有ベクトル, v x NumOfPC from v x v (or n) Center = Res$center # 解析に用いた各変数の平均値 (= Data_NotScaled_Mean), v Scale = Res$scale # 解析に用いた各変数の標準偏差 (= Data_NotScaled_SD), v CR = EigenValue/sum(EigenValue) # Contribution ratio, 寄与率 [ratio], CCR = cumsum(CR) # Cumulative contribution ratio,累積寄与率 [ratio] ComponentMatrix = t(t(Res$rotation)*sqrt_EigenValue)[,1:NumOfPC] # 成分行列(主成分負荷量), v x NumOfPC from v x v (or n) # Check # PCS_NotScaled == Data_Scaled %*% Rotation # n x NumOfPC # Principal component score # Not scaled PCS_NotScaled = Res$x[,1:NumOfPC] # 非標準化主成分得点, n x NumOfPC PCS_NotScaled_Mean = apply(PCS_NotScaled,2,mean) # 1 x NumOfPC PCS_NotScaled_SD = apply(PCS_NotScaled,2,sd) # 1 x NumOfPC # Scaled PCS_Scaled = apply(PCS_NotScaled,2,scale) # 標準化主成分得点, n x NumOfPC PCS_Scaled_model = PCS_Scaled if (!is.null(Data_val)) { # For val Data_NotScaled_val = as.matrix(Data_val) Data_Scaled_val = (Data_NotScaled_val-f_rep(matrix(Data_NotScaled_Mean,1,length(Data_NotScaled_Mean)),dim(Data_NotScaled_val)[1],1))/f_rep(matrix(Data_NotScaled_SD,1,length(Data_NotScaled_SD)),dim(Data_NotScaled_val)[1],1) PCS_NotScaled_val = Data_Scaled_val %*% Rotation PCS_Scaled_val = (PCS_NotScaled_val-f_rep(matrix(PCS_NotScaled_Mean,1,length(PCS_NotScaled_Mean)),dim(PCS_NotScaled_val)[1],1))/f_rep(matrix(PCS_NotScaled_SD,1,length(PCS_NotScaled_SD)),dim(PCS_NotScaled_val)[1],1) # Return return(list(PCS_Scaled_model,PCS_Scaled_val,CCR[1:NumOfPC])) } else { # Return return(list(PCS_Scaled_model,0,CCR[1:NumOfPC])) } } #------------------------------------------------------------------------ f_get_Data_NotScaled = function(Data_model,N=NULL,PCS_Scaled_test){ # Data # Not scaled Data_NotScaled = as.matrix(Data_model) # n x v Data_NotScaled_Mean = apply(Data_NotScaled,2,mean) # 1 x v Data_NotScaled_SD = apply(Data_NotScaled,2,sd) # 1 x v # Scaled Data_Scaled = apply(Data_NotScaled,2,scale) # n x v # Result for PCA Res = prcomp(Data_NotScaled,scale=T) # Addition sqrt_EigenValue = Res$sdev # 固有値の平方根, n EigenValue = sqrt_EigenValue^2 # 固有値, n NumOfPC = sum(EigenValue>=1) # 抽出された主成分の数, Scalar if (!is.null(N)) { NumOfPC = N } Rotation = Res$rotation[,1:NumOfPC] # 抽出された各主成分軸の固有ベクトル, v x NumOfPC from v x v (or n) Center = Res$center # 解析に用いた各変数の平均値 (= Data_NotScaled_Mean), v Scale = Res$scale # 解析に用いた各変数の標準偏差 (= Data_NotScaled_SD), v CR = EigenValue/sum(EigenValue) # Contribution ratio, 寄与率 [ratio], CCR = cumsum(CR) # Cumulative contribution ratio,累積寄与率 [ratio] ComponentMatrix = t(t(Res$rotation)*sqrt_EigenValue)[,1:NumOfPC] # 成分行列(主成分負荷量), v x NumOfPC from v x v (or n) # Check # PCS_NotScaled == Data_Scaled %*% Rotation # n x NumOfPC # Principal component score # Not scaled PCS_NotScaled = Res$x[,1:NumOfPC] # 非標準化主成分得点, n x NumOfPC PCS_NotScaled_Mean = apply(PCS_NotScaled,2,mean) # 1 x NumOfPC PCS_NotScaled_SD = apply(PCS_NotScaled,2,sd) # 1 x NumOfPC # Scaled #PCS_Scaled = apply(PCS_NotScaled,2,scale) # 標準化主成分得点, n x NumOfPC #PCS_Scaled_model = PCS_Scaled # PCS_Scaled_test to Data_NotScaled_test PCS_NotScaled_test = (as.matrix(PCS_Scaled_test)*f_rep(matrix(PCS_NotScaled_SD,1,length(PCS_NotScaled_SD)),dim(PCS_Scaled_test)[1],1)) + f_rep(matrix(PCS_NotScaled_Mean,1,length(PCS_NotScaled_Mean)),dim(PCS_Scaled_test)[1],1) Data_Scaled_test = PCS_NotScaled_test %*% t(Rotation) Data_NotScaled_test = (Data_Scaled_test*f_rep(matrix(Data_NotScaled_SD,1,length(Data_NotScaled_SD)),dim(Data_Scaled_test)[1],1)) + f_rep(matrix(Data_NotScaled_Mean,1,length(Data_NotScaled_Mean)),dim(Data_Scaled_test)[1],1) # Return return(Data_NotScaled_test) } #------------------------------------------------------------------------ f_MachineLeaning_TrainingForVal = function(n_I,n_O,Data_I,Data_O,ValDataNumber,TestDataNumber,HiddenLayer,HiddenUnit,NumEpoch,BatchSize,Verbose,Function,Preprocessing,drop_rate,lr_rate){ # For training Data_I_train_BeforePCA = Data_I[-c(ValDataNumber[[1]],TestDataNumber[[1]]),] Data_O_train_BeforePCA = Data_O[-c(ValDataNumber[[1]],TestDataNumber[[1]]),] #Data_I_Option_train_BeforePCA = Data_I_Option[-c(ValDataNumber[[1]],TestDataNumber[[1]]),] # For validation Data_I_val_BeforePCA = Data_I[ValDataNumber[[1]],] Data_O_val_BeforePCA = Data_O[ValDataNumber[[1]],] #Data_I_Option_val_BeforePCA = Data_I_Option[ValDataNumber[[1]],] # PCA Data_I_PCA = f_get_PCS_Scaled(cbind(Data_I_train_BeforePCA),n_I,cbind(Data_I_val_BeforePCA)) Data_O_PCA = f_get_PCS_Scaled(Data_O_train_BeforePCA,n_O,Data_O_val_BeforePCA) # For training Data_I_train = Data_I_PCA[[1]] Data_O_train = Data_O_PCA[[1]] # For validation Data_I_val = Data_I_PCA[[2]] Data_O_val = Data_O_PCA[[2]] # [1] Standardization or [2] Normalization if (Preprocessing == 1){ # Data not scaled Data_I_train_NotScaled = as.matrix(Data_I_train) # n x v Data_I_train_Mean = apply(Data_I_train_NotScaled,2,mean) # 1 x v Data_I_train_SD = apply(Data_I_train_NotScaled,2,sd) # 1 x v # Data scaled Data_I_train_Scaled = apply(Data_I_train_NotScaled,2,scale) # n x v }else if (Preprocessing == 2){ # Data not scaled Data_I_train_NotScaled = as.matrix(Data_I_train) # n x v Data_I_train_Max = apply(Data_I_train_NotScaled,2,max) # 1 x v Data_I_train_Min = apply(Data_I_train_NotScaled,2,min) # 1 x v Max_train = f_rep(matrix(Data_I_train_Max,1,length(Data_I_train_Max)),dim(Data_I_train_NotScaled)[1],1) # n x v Min_train = f_rep(matrix(Data_I_train_Min,1,length(Data_I_train_Min)),dim(Data_I_train_NotScaled)[1],1) # n x v # Data scaled Data_I_train_Scaled = (Data_I_train_NotScaled-Min_train)/(Max_train-Min_train) # n x v } # [1] Standardization or [2] Normalization if (Preprocessing == 1){ Mean_val = f_rep(matrix(Data_I_train_Mean,1,length(Data_I_train_Mean)),dim(Data_I_val)[1],1) SD_val = f_rep(matrix(Data_I_train_SD, 1,length(Data_I_train_SD )),dim(Data_I_val)[1],1) Data_I_val_Scaled = (Data_I_val-Mean_val)/SD_val } else if (Preprocessing == 2){ Max_val = f_rep(matrix(Data_I_train_Max,1,length(Data_I_train_Max)),dim(Data_I_val)[1],1) Min_val = f_rep(matrix(Data_I_train_Min,1,length(Data_I_train_Min)),dim(Data_I_val)[1],1) Data_I_val_Scaled = (Data_I_val-Min_val)/(Max_val-Min_val) } #-------------------- # Machine Learning #-------------------- # Building of model if (HiddenLayer == 1){ Model = keras_model_sequential() %>% # kernel_regularizer=regularizer_l2(0.001) layer_dense(units=HiddenUnit[1],activation=Function,input_shape=dim(Data_I_train_Scaled)[2]) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=dim(Data_O_train)[2]) }else if (HiddenLayer == 2){ Model = keras_model_sequential() %>% layer_dense(units=HiddenUnit[1],activation=Function,input_shape=dim(Data_I_train_Scaled)[2]) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[2],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=dim(Data_O_train)[2]) }else if (HiddenLayer == 3){ Model = keras_model_sequential() %>% layer_dense(units=HiddenUnit[1],activation=Function,input_shape=dim(Data_I_train_Scaled)[2]) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[2],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[3],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=dim(Data_O_train)[2]) }else if (HiddenLayer == 4){ Model = keras_model_sequential() %>% layer_dense(units=HiddenUnit[1],activation=Function,input_shape=dim(Data_I_train_Scaled)[2]) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[2],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[3],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[4],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=dim(Data_O_train)[2]) }else if (HiddenLayer == 5){ Model = keras_model_sequential() %>% layer_dense(units=HiddenUnit[1],activation=Function,input_shape=dim(Data_I_train_Scaled)[2]) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[2],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[3],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[4],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[5],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=dim(Data_O_train)[2]) } # Compiling # Model %>% compile( # optimizer = 'adam', # loss = 'mse', # metrics = 'mae') Model %>% compile( optimizer = optimizer_adam(learning_rate=lr_rate), loss = 'mse', metrics = 'mae') # Fitting History = Model %>% fit(Data_I_train_Scaled,Data_O_train,epoch=NumEpoch,batch_size=BatchSize,verbose=Verbose,validation_data=list(Data_I_val_Scaled,Data_O_val)) # Execution (use for test data) #Results = Model %>% evaluate(Data_I_val_Scaled,Data_O_val) # Extraction #LS = Results[1] # Loss #AE = Results[2] # Absolute error #PV = Model %>% predict(Data_I_val_Scaled) # Predicted values #AV = Data_O_val # Actual values # Return return(History) } #------------------------------------------------------------------------ f_MachineLeaning_TrainingForTest = function(n_I,n_O,Data_I,Data_O,ValDataNumber,TestDataNumber,HiddenLayer,HiddenUnit,NumEpoch,BatchSize,Verbose,Function,Preprocessing,drop_rate,lr_rate){ # For training Data_I_train_BeforePCA = Data_I[-TestDataNumber[[1]],] Data_O_train_BeforePCA = Data_O[-TestDataNumber[[1]],] #Data_I_Option_train_BeforePCA = Data_I_Option[-TestDataNumber[[1]],] # For test Data_I_test_BeforePCA = Data_I[TestDataNumber[[1]],] Data_O_test_BeforePCA = Data_O[TestDataNumber[[1]],] #Data_I_Option_test_BeforePCA = Data_I_Option[TestDataNumber[[1]],] # PCA Data_I_PCA = f_get_PCS_Scaled(cbind(Data_I_train_BeforePCA),n_I,cbind(Data_I_test_BeforePCA)) Data_O_PCA = f_get_PCS_Scaled(Data_O_train_BeforePCA,n_O,Data_O_test_BeforePCA) # For training Data_I_train = Data_I_PCA[[1]] Data_O_train = Data_O_PCA[[1]] # For validation Data_I_test = Data_I_PCA[[2]] Data_O_test = Data_O_PCA[[2]] # [1] Standardization or [2] Normalization if (Preprocessing == 1){ # Data not scaled Data_I_train_NotScaled = as.matrix(Data_I_train) # n x v Data_I_train_Mean = apply(Data_I_train_NotScaled,2,mean) # 1 x v Data_I_train_SD = apply(Data_I_train_NotScaled,2,sd) # 1 x v # Data scaled Data_I_train_Scaled = apply(Data_I_train_NotScaled,2,scale) # n x v }else if (Preprocessing == 2){ # Data not scaled Data_I_train_NotScaled = as.matrix(Data_I_train) # n x v Data_I_train_Max = apply(Data_I_train_NotScaled,2,max) # 1 x v Data_I_train_Min = apply(Data_I_train_NotScaled,2,min) # 1 x v Max_train = f_rep(matrix(Data_I_train_Max,1,length(Data_I_train_Max)),dim(Data_I_train_NotScaled)[1],1) # n x v Min_train = f_rep(matrix(Data_I_train_Min,1,length(Data_I_train_Min)),dim(Data_I_train_NotScaled)[1],1) # n x v # Data scaled Data_I_train_Scaled = (Data_I_train_NotScaled-Min_train)/(Max_train-Min_train) # n x v } # [1] Standardization or [2] Normalization if (Preprocessing == 1){ Mean_test = f_rep(matrix(Data_I_train_Mean,1,length(Data_I_train_Mean)),dim(Data_I_test)[1],1) SD_test = f_rep(matrix(Data_I_train_SD, 1,length(Data_I_train_SD )),dim(Data_I_test)[1],1) Data_I_test_Scaled = (Data_I_test-Mean_test)/SD_test } else if (Preprocessing == 2){ Max_test = f_rep(matrix(Data_I_train_Max,1,length(Data_I_train_Max)),dim(Data_I_test)[1],1) Min_test = f_rep(matrix(Data_I_train_Min,1,length(Data_I_train_Min)),dim(Data_I_test)[1],1) Data_I_test_Scaled = (Data_I_test-Min_test)/(Max_test-Min_test) } #-------------------- # Machine Learning #-------------------- # Building of model if (HiddenLayer == 1){ Model = keras_model_sequential() %>% # kernel_regularizer=regularizer_l2(0.001) layer_dense(units=HiddenUnit[1],activation=Function,input_shape=dim(Data_I_train_Scaled)[2]) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=dim(Data_O_train)[2]) }else if (HiddenLayer == 2){ Model = keras_model_sequential() %>% layer_dense(units=HiddenUnit[1],activation=Function,input_shape=dim(Data_I_train_Scaled)[2]) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[2],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=dim(Data_O_train)[2]) }else if (HiddenLayer == 3){ Model = keras_model_sequential() %>% layer_dense(units=HiddenUnit[1],activation=Function,input_shape=dim(Data_I_train_Scaled)[2]) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[2],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[3],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=dim(Data_O_train)[2]) }else if (HiddenLayer == 4){ Model = keras_model_sequential() %>% layer_dense(units=HiddenUnit[1],activation=Function,input_shape=dim(Data_I_train_Scaled)[2]) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[2],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[3],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[4],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=dim(Data_O_train)[2]) }else if (HiddenLayer == 5){ Model = keras_model_sequential() %>% layer_dense(units=HiddenUnit[1],activation=Function,input_shape=dim(Data_I_train_Scaled)[2]) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[2],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[3],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[4],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=HiddenUnit[5],activation=Function) %>% layer_dropout(rate=drop_rate) %>% layer_dense(units=dim(Data_O_train)[2]) } # Compiling # Model %>% compile( # optimizer = 'adam', # loss = 'mse', # metrics = 'mae') Model %>% compile( optimizer = optimizer_adam(learning_rate=lr_rate), loss = 'mse', metrics = 'mae') # Fitting History = Model %>% fit(Data_I_train_Scaled,Data_O_train,epoch=NumEpoch,batch_size=BatchSize,verbose=Verbose) # Execution (use for test data) Results = Model %>% evaluate(Data_I_test_Scaled,Data_O_test) # Extraction LS = Results[1] # Loss AE = Results[2] # Absolute error PV = Model %>% predict(Data_I_test_Scaled) # Predicted values AV = Data_O_test # Actual values # Return return(list(History,LS,AE,PV,AV)) } #======================================================================== # Main #======================================================================== #========# # Check # #========# if (1 == 0){ L_ValDataNumber = k_DataNumber[1:4] TestDataNumber = k_DataNumber[[5]] NumOfPC_I = matrix(0,4,1) NumOfPC_O = matrix(0,4,1) for (i_ValDataNumber in 1:4){ ValDataNumber = L_ValDataNumber[[i_ValDataNumber]] TempData_I = cbind(Mat_Input[-c(ValDataNumber[[1]],TestDataNumber[[1]]),]) TempData_O = Mat_Output[-c(ValDataNumber[[1]],TestDataNumber[[1]]),] TempRes_I = f_get_PCS_Scaled(TempData_I,N=NULL) TempRes_O = f_get_PCS_Scaled(TempData_O,N=NULL) # Return NumOfPC_I[i_ValDataNumber,] = dim(TempRes_I[[1]])[2] NumOfPC_O[i_ValDataNumber,] = dim(TempRes_O[[1]])[2] } } # Setting NumOfPC n_I = 65 # 2023_0512 n_O = 17 # 2023_0512 if (1 == 0){ # check of main TempData_I = cbind(Mat_Input[-c(TestDataNumber[[1]]),]) TempData_O = Mat_Output[-c(TestDataNumber[[1]]),] TempRes_I = f_get_PCS_Scaled(TempData_I,N=NULL) TempRes_O = f_get_PCS_Scaled(TempData_O,N=NULL) # Return main_NumOfPC_I = dim(TempRes_I[[1]])[2] main_NumOfPC_O = dim(TempRes_O[[1]])[2] main_NumOfMaxCCR_I = max(TempRes_I[[3]]) main_NumOfMaxCCR_O = max(TempRes_O[[3]]) } #========# # Step 1 # #========# # Not fixed Parameters L_ValDataNumber = k_DataNumber[1:4] L_HiddenUnit = list(c(160,80,40),c(120,60,30),c(80,40,20),c(40,20,20),c(80,40),c(60,30),c(40,20),c(20,20),c(80,0),c(60,0),c(40,0),c(20,0)) L_HiddenLayer = numeric(length(L_HiddenUnit)) for (i in 1:length(L_HiddenUnit)){ L_HiddenLayer[i] = sum(L_HiddenUnit[[i]] != 0) } L_BatchSize = c(128,256,512) L_drop_rate = c(0,0.25,0.50) L_lr_rate = c(0.01,0.001,0.0001) # Fixed parameters TestDataNumber = k_DataNumber[[5]] NumEpoch = 300 Verbose = 1 Function = 'swish' Preprocessing = 1 # [1] Standardization or [2] Normalization if (1 == 0){ #--------------------------------- # Time AC_OM #--------------------------------- # Data Data_I = as.matrix(Mat_Input) Data_O = as.matrix(Mat_Output) #Data_I_Option = as.matrix(Mat_Input_Option) n = 1 # Machine learning Time_Start = Sys.time() for (i_ValDataNumber in 1:length(L_ValDataNumber)){ for (i_layer in 1:length(L_HiddenLayer)){ for (i_BatchSize in 1:length(L_BatchSize)){ for (i_drop_rate in 1:length(L_drop_rate)){ for (i_lr_rate in 1:length(L_lr_rate)){ print(paste0('k = ',i_ValDataNumber,'/',length(L_ValDataNumber))) print(paste0('Layer = ',i_layer,'/',length(L_HiddenLayer))) print(paste0('Batch size = ',i_BatchSize,'/',length(L_BatchSize))) print(paste0('Dropout rate = ',i_drop_rate,'/',length(L_drop_rate))) print(paste0('Learning rate = ',i_lr_rate,'/',length(L_lr_rate))) print(paste0('Total => ',n,'/',prod(c(length(L_ValDataNumber),length(L_HiddenLayer),length(L_BatchSize),length(L_drop_rate),length(L_lr_rate))))) # Not fixed Parameters ValDataNumber = L_ValDataNumber[[i_ValDataNumber]] HiddenLayer = L_HiddenLayer[i_layer] HiddenUnit = L_HiddenUnit[[i_layer]] BatchSize = L_BatchSize[[i_BatchSize]] drop_rate = L_drop_rate[[i_drop_rate]] lr_rate = L_lr_rate[[i_lr_rate]] # Machine learning History = f_MachineLeaning_TrainingForVal(n_I,n_O,Data_I,Data_O,ValDataNumber,TestDataNumber,HiddenLayer,HiddenUnit,NumEpoch,BatchSize,Verbose,Function,Preprocessing,drop_rate,lr_rate) # Save saveRDS(History,file=paste('Output_ForML_Type100_AC_OM/History1_k',i_ValDataNumber,'_Layer',i_layer,'_BatchSize',i_BatchSize,'_DropoutRate',i_drop_rate,'_LearningRate',i_lr_rate,'.obj',sep='')) n = n + 1 } } } } } Time_End = Sys.time() } #------------------------------------------------------------------------ # After training L_Condition = list('AC_OM') Arr_LS_train = array(0,c(length(L_HiddenLayer),NumEpoch,length(L_ValDataNumber),length(L_Condition),length(L_BatchSize),length(L_drop_rate),length(L_lr_rate))) Arr_AE_train = array(0,c(length(L_HiddenLayer),NumEpoch,length(L_ValDataNumber),length(L_Condition),length(L_BatchSize),length(L_drop_rate),length(L_lr_rate))) Arr_LS_val = array(0,c(length(L_HiddenLayer),NumEpoch,length(L_ValDataNumber),length(L_Condition),length(L_BatchSize),length(L_drop_rate),length(L_lr_rate))) Arr_AE_val = array(0,c(length(L_HiddenLayer),NumEpoch,length(L_ValDataNumber),length(L_Condition),length(L_BatchSize),length(L_drop_rate),length(L_lr_rate))) i_Condition = 1 for (i_ValDataNumber in 1:length(L_ValDataNumber)){ for (i_layer in 1:length(L_HiddenLayer)){ for (i_BatchSize in 1:length(L_BatchSize)){ for (i_drop_rate in 1:length(L_drop_rate)){ for (i_lr_rate in 1:length(L_lr_rate)){ History = readRDS(paste('Output_ForML_Type100_AC_OM/History1_k',i_ValDataNumber,'_Layer',i_layer,'_BatchSize',i_BatchSize,'_DropoutRate',i_drop_rate,'_LearningRate',i_lr_rate,'.obj',sep='')) Arr_LS_train[i_layer,,i_ValDataNumber,i_Condition,i_BatchSize,i_drop_rate,i_lr_rate] = as.array(matrix(History$metrics$loss,1,NumEpoch)) Arr_AE_train[i_layer,,i_ValDataNumber,i_Condition,i_BatchSize,i_drop_rate,i_lr_rate] = as.array(matrix(History$metrics$mae,1,NumEpoch)) Arr_LS_val[i_layer,,i_ValDataNumber,i_Condition,i_BatchSize,i_drop_rate,i_lr_rate] = as.array(matrix(History$metrics$val_loss,1,NumEpoch)) Arr_AE_val[i_layer,,i_ValDataNumber,i_Condition,i_BatchSize,i_drop_rate,i_lr_rate] = as.array(matrix(History$metrics$val_mae,1,NumEpoch)) } } } } } Header = c() for (i_lr_rate in 1:length(L_lr_rate)){ for (i_drop_rate in 1:length(L_drop_rate)){ for (i_BatchSize in 1:length(L_BatchSize)){ for (i_layer in 1:length(L_HiddenLayer)){ temp = paste0('HL',i_layer,'_BS',i_BatchSize,'_DR',i_drop_rate,'_LR',i_lr_rate) Header = c(Header,temp) } } } } i_Condition = 1 Arr_Mean_LS_train = apply(Arr_LS_train,c(1,2,4,5,6,7),mean) Arr_Mean_AE_train = apply(Arr_AE_train,c(1,2,4,5,6,7),mean) Arr_Mean_LS_val = apply(Arr_LS_val,c(1,2,4,5,6,7),mean) Arr_Mean_AE_val = apply(Arr_AE_val,c(1,2,4,5,6,7),mean) temp1 = aperm(Arr_Mean_LS_train,c(2,1,4,5,6,3)) temp2 = array(temp1,c(NumEpoch,length(L_HiddenLayer)*length(L_BatchSize)*length(L_drop_rate)*length(L_lr_rate))) temp3 = as.data.frame(temp2) names(temp3) = Header write.csv(temp3,paste('Output_ForML_Type100_LS_AE/History1_LS_train_',L_Condition[[i_Condition]],'.csv',sep='')) temp1 = aperm(Arr_Mean_AE_train,c(2,1,4,5,6,3)) temp2 = array(temp1,c(NumEpoch,length(L_HiddenLayer)*length(L_BatchSize)*length(L_drop_rate)*length(L_lr_rate))) temp3 = as.data.frame(temp2) names(temp3) = Header write.csv(temp3,paste('Output_ForML_Type100_LS_AE/History1_AE_train_',L_Condition[[i_Condition]],'.csv',sep='')) temp1 = aperm(Arr_Mean_LS_val,c(2,1,4,5,6,3)) temp2 = array(temp1,c(NumEpoch,length(L_HiddenLayer)*length(L_BatchSize)*length(L_drop_rate)*length(L_lr_rate))) temp3 = as.data.frame(temp2) names(temp3) = Header write.csv(temp3,paste('Output_ForML_Type100_LS_AE/History1_LS_val_',L_Condition[[i_Condition]],'.csv',sep='')) temp1 = aperm(Arr_Mean_AE_val,c(2,1,4,5,6,3)) temp2 = array(temp1,c(NumEpoch,length(L_HiddenLayer)*length(L_BatchSize)*length(L_drop_rate)*length(L_lr_rate))) temp3 = as.data.frame(temp2) names(temp3) = Header write.csv(temp3,paste('Output_ForML_Type100_LS_AE/History1_AE_val_',L_Condition[[i_Condition]],'.csv',sep='')) # check which.min(temp3[NumEpoch,]) # 2023 0513 HL9_BS3_DR3_LR2 # Num of hidden layer: 1 # Batchsize 512 # Droprate 0.50 # Learning rate 0.001 #======================================================================== #========# # Main # #========# # Not fixed Parameters L_ValDataNumber = 0 L_HiddenLayer = c(1) L_HiddenUnit = list(c(80,0)) # Fixed parameters TestDataNumber = k_DataNumber[[5]] NumEpoch = 300 Verbose = 1 Function = 'swish' Preprocessing = 1 # [1] Standardization or [2] Normalization # Fixed Parameters BatchSize = L_BatchSize[[3]] drop_rate = L_drop_rate[[3]] lr_rate = L_lr_rate[[2]] if (1 == 0){ #--------------------------------- # Time AC_OM #--------------------------------- # Data Data_I = as.matrix(Mat_Input) Data_O = as.matrix(Mat_Output) #Data_I_Option = as.matrix(Mat_Input_Option) # Machine learning Time_Start = Sys.time() # Not fixed Parameters ValDataNumber = L_ValDataNumber HiddenLayer = L_HiddenLayer HiddenUnit = L_HiddenUnit[[1]] # Machine learning Return = f_MachineLeaning_TrainingForTest(n_I,n_O,Data_I,Data_O,ValDataNumber,TestDataNumber,HiddenLayer,HiddenUnit,NumEpoch,BatchSize,Verbose,Function,Preprocessing,drop_rate,lr_rate) History = Return[[1]] LS = Return[[2]] AE = Return[[3]] PV = Return[[4]] AV = Return[[5]] # Save saveRDS(History,file=paste('Output_ForML_Type100_AC_OM/HistoryMain.obj',sep='')) write.csv(cbind(AV,PV),file=paste('Output_ForML_Type100_AC_OM/AV_PV_Main.csv',sep='')) Time_End = Sys.time() } if (1 == 0){ AV_PV = read.csv(paste('Output_ForML_Type100_AC_OM/AV_PV_Main.csv',sep=''))[,2:35] AV = AV_PV[,c(1:17)] PV = AV_PV[,c(18:34)] } Diff = apply(abs(PV-AV),2,mean) temp_PV = PV Data_model = as.matrix(Mat_Output[-TestDataNumber[[1]],]) Data_PV = f_get_Data_NotScaled(Data_model,N=n_O,temp_PV) Data_AV = as.matrix(Mat_Output[TestDataNumber[[1]],]) Wave1_PV = array(t(Data_PV),c(200,3,dim(TestDataNumber)[1])) Wave2_PV = aperm(Wave1_PV,c(1,3,2)) Wave1_AV = array(t(Data_AV),c(200,3,dim(TestDataNumber)[1])) Wave2_AV = aperm(Wave1_AV,c(1,3,2)) #------------------------------------------------------------------------ RMSE = sqrt(apply((Wave2_PV-Wave2_AV)^2,c(2,3),mean)) RMSE_AVE = round(apply(RMSE,2,mean),2) RMSE_SD = round(apply(RMSE,2,sd),2) Wave_Max_AV = apply(Wave2_AV,c(2,3),max) # Wave_Min_AV = apply(Wave2_AV,c(2,3),min) # Wave_Max_PV = apply(Wave2_PV,c(2,3),max) # Wave_Min_PV = apply(Wave2_PV,c(2,3),min) # Wave_MaxMin = 0.5*((Wave_Max_AV-Wave_Min_AV)+(Wave_Max_PV-Wave_Min_PV)) # Based on references NRMSE = RMSE/(Wave_MaxMin) NRMSE_AVE = round(apply(NRMSE,2,mean)*100,1) NRMSE_SD = round(apply(NRMSE,2,sd)*100,1) MAE = apply(abs(Wave2_PV-Wave2_AV),c(2,3),mean) MAE_AVE = round(apply(MAE,2,mean),2) MAE_SD = round(apply(MAE,2,sd),2) #------------------------------------------------------------------------ # 2023 0513 Wave3_AV = array(Wave2_AV,c(200,10,40,3)) Wave3_PV = array(Wave2_PV,c(200,10,40,3)) temp_box = c() ######### # hip, hip flexion at 0% var_AV = Wave3_AV[,,,1] # 1hip 2knee 3ankle var_PV = Wave3_PV[,,,1] v_mat_AV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) v_mat_PV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) range = 1 # 0-100% for (i in 1:dim(var_AV)[2]){ for (j in 1:dim(var_AV)[3]){ x = seq(1,101,length=200) y_AV = var_AV[,i,j] y_PV = var_PV[,i,j] xi = seq(1,101,length=101) temp_AV = interp1(x,y_AV,xi) temp_PV = interp1(x,y_PV,xi) v_temp_AV = max(temp_AV[range]) v_temp_PV = max(temp_PV[range]) v_mat_AV[i,j] = v_temp_AV v_mat_PV[i,j] = v_temp_PV } } temp_mean = round(mean(abs(v_mat_AV-v_mat_PV)),1) temp_sd = round(sd(abs(v_mat_AV-v_mat_PV)),1) temp_box = rbind(temp_box,c(temp_mean,temp_sd)) ######### # hip, hip extension at 25-75% var_AV = Wave3_AV[,,,1] # 1hip 2knee 3ankle var_PV = Wave3_PV[,,,1] v_mat_AV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) v_mat_PV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) range = 26:76 # 0-100% for (i in 1:dim(var_AV)[2]){ for (j in 1:dim(var_AV)[3]){ x = seq(1,101,length=200) y_AV = var_AV[,i,j] y_PV = var_PV[,i,j] xi = seq(1,101,length=101) temp_AV = interp1(x,y_AV,xi) temp_PV = interp1(x,y_PV,xi) v_temp_AV = min(temp_AV[range]) v_temp_PV = min(temp_PV[range]) v_mat_AV[i,j] = v_temp_AV v_mat_PV[i,j] = v_temp_PV } } temp_mean = round(mean(abs(v_mat_AV-v_mat_PV)),1) temp_sd = round(sd(abs(v_mat_AV-v_mat_PV)),1) temp_box = rbind(temp_box,c(temp_mean,temp_sd)) ######### # knee, knee flexion at 50-100% var_AV = Wave3_AV[,,,2] # 1hip 2knee 3ankle var_PV = Wave3_PV[,,,2] v_mat_AV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) v_mat_PV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) range = 51:101 # 0-100% for (i in 1:dim(var_AV)[2]){ for (j in 1:dim(var_AV)[3]){ x = seq(1,101,length=200) y_AV = var_AV[,i,j] y_PV = var_PV[,i,j] xi = seq(1,101,length=101) temp_AV = interp1(x,y_AV,xi) temp_PV = interp1(x,y_PV,xi) v_temp_AV = min(temp_AV[range]) v_temp_PV = min(temp_PV[range]) v_mat_AV[i,j] = v_temp_AV v_mat_PV[i,j] = v_temp_PV } } temp_mean = round(mean(abs(v_mat_AV-v_mat_PV)),1) temp_sd = round(sd(abs(v_mat_AV-v_mat_PV)),1) temp_box = rbind(temp_box,c(temp_mean,temp_sd)) ######### # ankle, ankle at 0% var_AV = Wave3_AV[,,,3] # 1hip 2knee 3ankle var_PV = Wave3_PV[,,,3] v_mat_AV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) v_mat_PV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) range = 1 # 0-100% for (i in 1:dim(var_AV)[2]){ for (j in 1:dim(var_AV)[3]){ x = seq(1,101,length=200) y_AV = var_AV[,i,j] y_PV = var_PV[,i,j] xi = seq(1,101,length=101) temp_AV = interp1(x,y_AV,xi) temp_PV = interp1(x,y_PV,xi) v_temp_AV = (temp_AV[range]) v_temp_PV = (temp_PV[range]) v_mat_AV[i,j] = v_temp_AV v_mat_PV[i,j] = v_temp_PV } } temp_mean = round(mean(abs(v_mat_AV-v_mat_PV)),1) temp_sd = round(sd(abs(v_mat_AV-v_mat_PV)),1) temp_box = rbind(temp_box,c(temp_mean,temp_sd)) ######### # ankle, ankle at 25-50% var_AV = Wave3_AV[,,,3] # 1hip 2knee 3ankle var_PV = Wave3_PV[,,,3] v_mat_AV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) v_mat_PV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) range = 26:51 # 0-100% for (i in 1:dim(var_AV)[2]){ for (j in 1:dim(var_AV)[3]){ x = seq(1,101,length=200) y_AV = var_AV[,i,j] y_PV = var_PV[,i,j] xi = seq(1,101,length=101) temp_AV = interp1(x,y_AV,xi) temp_PV = interp1(x,y_PV,xi) v_temp_AV = max(temp_AV[range]) v_temp_PV = max(temp_PV[range]) v_mat_AV[i,j] = v_temp_AV v_mat_PV[i,j] = v_temp_PV } } temp_mean = round(mean(abs(v_mat_AV-v_mat_PV)),1) temp_sd = round(sd(abs(v_mat_AV-v_mat_PV)),1) temp_box = rbind(temp_box,c(temp_mean,temp_sd)) ######### # ankle, ankle pf at 50-75% var_AV = Wave3_AV[,,,3] # 1hip 2knee 3ankle var_PV = Wave3_PV[,,,3] v_mat_AV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) v_mat_PV = matrix(0,dim(var_AV)[2],dim(var_AV)[3]) range = 51:76 # 0-100% for (i in 1:dim(var_AV)[2]){ for (j in 1:dim(var_AV)[3]){ x = seq(1,101,length=200) y_AV = var_AV[,i,j] y_PV = var_PV[,i,j] xi = seq(1,101,length=101) temp_AV = interp1(x,y_AV,xi) temp_PV = interp1(x,y_PV,xi) v_temp_AV = min(temp_AV[range]) v_temp_PV = min(temp_PV[range]) v_mat_AV[i,j] = v_temp_AV v_mat_PV[i,j] = v_temp_PV } } temp_mean = round(mean(abs(v_mat_AV-v_mat_PV)),1) temp_sd = round(sd(abs(v_mat_AV-v_mat_PV)),1) temp_box = rbind(temp_box,c(temp_mean,temp_sd)) write.csv(temp_box,file=paste('Output_ForML_Type100_AC_OM/Res_Table_peak_mae.csv',sep='')) #------------------------------------------------------------------------ # r Mat_r = matrix(-999,dim(TestDataNumber)[1],3) for (i in 1:dim(TestDataNumber)[1]){ for (j in 1:3){ temp_PV = Wave2_PV[,i,j] temp_AV = Wave2_AV[,i,j] Mat_r[i,j] = cor.test(temp_PV,temp_AV)$estimate } } r = Mat_r r_AVE = round(apply(r,2,mean),2) r_SD = round(apply(r,2,sd),2) #------------------------------------------------------------------------ # Table Res_Table = cbind(RMSE_AVE,RMSE_SD,NRMSE_AVE,NRMSE_SD,MAE_AVE,MAE_SD,r_AVE,r_SD) # joint x 6 write.csv(Res_Table,file=paste('Output_ForML_Type100_AC_OM/Res_Table.csv',sep='')) #------------------------------------------------------------------------ # After training....? if (1 == 0){ L_Condition = list('AC_OM') Arr_Main_LS_train = matrix(0,NumEpoch,length(L_Condition)) Arr_Main_AE_train = matrix(0,NumEpoch,length(L_Condition)) for (i_Condition in 1:length(L_Condition)){ for (i_HyperParameter in 1){ History = readRDS(paste('Output_ForML_Type100_',L_Condition[[i_Condition]],'/HistoryMain_HL',L_HiddenLayer[i_HyperParameter],'_HU',L_HiddenUnit[[i_HyperParameter]][1],'_',L_HiddenUnit[[i_HyperParameter]][2],'.obj',sep='')) Arr_Main_LS_train[,i_Condition] = as.matrix(matrix(History$metrics$loss,NumEpoch,1)) Arr_Main_AE_train[,i_Condition] = as.matrix(matrix(History$metrics$mae,NumEpoch,1)) } } for (i_Condition in 1:length(L_Condition)){ write.csv(Arr_Main_LS_train[,i_Condition],paste('Output_ForML_Type100_LS_AE/HistoryMain_LS_train_',L_Condition[[i_Condition]],'.csv',sep='')) write.csv(Arr_Main_AE_train[,i_Condition],paste('Output_ForML_Type100_LS_AE/HistoryMain_AE_train_',L_Condition[[i_Condition]],'.csv',sep='')) } } #------------------------------------------------------------------------ # For plot ForWave_AV_AVE = apply(Wave1_AV,c(1,2),mean) ForWave_AV_SD = apply(Wave1_AV,c(1,2),sd) ForWave_PV_AVE = apply(Wave1_PV,c(1,2),mean) ForWave_PV_SD = apply(Wave1_PV,c(1,2),sd) ForWave1 = cbind(ForWave_AV_AVE,ForWave_AV_SD,ForWave_PV_AVE,ForWave_PV_SD) ForWave2 = matrix(0,101,dim(ForWave1)[2]) for (i in 1:dim(ForWave1)[2]){ x = seq(1,101,length=200) y = ForWave1[,i] xi = seq(1,101,length=101) ForWave2[,i] = interp1(x,y,xi) } write.csv(ForWave2,file='Output_ForML_Type100_LS_AE/ForWave.csv') #------------------------------------------------------------------------ # For plot ForWave1 = cbind(apply(abs(Wave1_AV-Wave1_PV),c(1,2),mean),apply(abs(Wave1_AV-Wave1_PV),c(1,2),sd)) ForWave2 = matrix(0,101,dim(ForWave1)[2]) for (i in 1:dim(ForWave1)[2]){ x = seq(1,101,length=200) y = ForWave1[,i] xi = seq(1,101,length=101) ForWave2[,i] = interp1(x,y,xi) } write.csv(ForWave2,file='Output_ForML_Type100_LS_AE/ForWave_Error.csv') #======================================================================== # Plot #========================================================================