################################################################################## # R code to construct for the ratio of medians of three-parameter lognormal # distributions containing zero values and their application to wind speed data # from northern Thailand ########################################S########################################## rm(list=ls()) library(EnvStats) library(zipfR) library(MASS) library(lattice) library(torch) #Generate random samples from the posterior of gamma post_a = function(mrep, a, mu, sigmaSq, size.nonzero){ vec.a = vector("numeric", mrep) sigma = sqrt(sigmaSq) x.a = exp(mu + sigmaSq/2) vec.a[1] = x.a i = 2 while (i <= mrep) { Can.a = rlnorm3(1, meanlog = mu, sdlog = sigma, threshold = a) if(Can.a <= a){next} den.can.a = function(can) sigmaSq^(-size.nonzero/2)*exp(-size.nonzero*(log(can - a)) - ((2*sigmaSq)^(-1)*size.nonzero*((log(can - a)-mu)^2))) den.can.a = den.can.a(can = Can.a) den.x.a = function(X) sigmaSq^(-size.nonzero/2)*exp(-size.nonzero*(log(X - a)) - ((2*sigmaSq)^(-1)*size.nonzero*((log(X - a)-mu)^2))) den.x.a = den.x.a(X = x.a) den.target.a = den.can.a/den.x.a den.proposed.a = dlnorm3(x.a, meanlog = mu, sdlog = sigma, threshold = a)/ dlnorm3(Can.a, meanlog = mu, sdlog = sigma, threshold = a) den.tar.pro.a = den.target.a * den.proposed.a if(is.na(den.tar.pro.a)){ print(a) print("den.tar.pro.a|Nan") next} prob.a = min(1, den.tar.pro.a) u = runif(1) if (u > prob.a){ vec.a[i] = x.a #print("rejected.a") }else{ x.a = Can.a vec.a[i] = x.a print("accepted.a") } i = i+1 if(i%%5000== 0){ print(i) } } return(vec.a) } ################################################################################### # R codes for FGPQ, MOVER-FGPQ (MF), NA and Bayesian intervals # ################################################################################## CI.rmed.TPLNZ = function(M=5000, m=2500, N=2500, mrep=5000, burnin = 2500, n1=25, n2=25, mulog1=3, mulog2=3, threshold1=3, threshold2=5, sigmaSqlog1=3, sigmaSqlog2=3, d1=0.1, d2=0.3, alpha=0.05){ n = c(n1,n2) mulog = c(mulog1,mulog2) sigmaSqlog = c(sigmaSqlog1,sigmaSqlog2) d = c(d1,d2) threshold = c(threshold1,threshold2) sdlog = sqrt(sigmaSqlog) median.TPLNZ = (1-d)*(threshold + exp(mulog)) rmed.TPLNZ = log(median.TPLNZ[1]) - log(median.TPLNZ[2]) #FGPQ interval mu.FGPQ.U1 = rep(0,m) sigmaSq.FGPQ.U1 = rep(0,m) a.FGPQ.U1 = rep(0,m) median.FGPQ.U1 = rep(0,m) mu.FGPQ.U2 = rep(0,m) sigmaSq.FGPQ.U2 = rep(0,m) a.FGPQ.U2 = rep(0,m) median.FGPQ.U2 = rep(0,m) rmed.TPLNZ.FGPQ.U = rep(0,m) CP.FGPQ = rep(0,M) Length.FGPQ= rep(0,M) #Normal approximation interval-based Mckean and Schrader (1984) (NAMS) CP.NAMS = rep(0,M) Length.NAMS = rep(0,M) #MOVER-FGPQ (MF) interval mu.FGPQ.U1 = rep(0,m) sigmaSq.FGPQ.U1 = rep(0,m) thres.FGPQ.U1 = rep(0,m) mu.FGPQ.U2 = rep(0,m) sigmaSq.FGPQ.U2 = rep(0,m) thres.FGPQ.U2 = rep(0,m) rmed.FGPQ.U.MF = rep(0,m) CP.MF = rep(0,M) Length.MF = rep(0,M) #Bayesian interval post.d.U1 = rep(0,m) post.thres.U1 = rep(0,m) post.mulog.U1 = rep(0,m) Temp.post.mulog.U1 = rep(0,m) post.sigmaSqlog.U1 = rep(0,m) post.medianTPLNZ.U1 = rep(0,m) post.d.U2 = rep(0,m) post.thres.U2 = rep(0,m) post.mulog.U2 = rep(0,m) Temp.post.mulog.U2 = rep(0,m) post.sigmaSqlog.U2 = rep(0,m) post.medianTPLNZ.U2 = rep(0,m) post.rmed.U = rep(0,m) CP.Baye = rep(0,M) Length.Baye = rep(0,M) i = 1 for(i in 1:M){ size.x.zero1 = rbinom(1, n[1], d[1]) size.x.zero2 = rbinom(1, n[2], d[2]) size.nonzero1 = n[1] - size.x.zero1 size.nonzero2 = n[2] - size.x.zero2 size.x.zero = c(size.x.zero1, size.x.zero2) size.nonzero = c(size.nonzero1, size.nonzero2) x.TPLN1 = rlnorm3(size.nonzero[1], mulog[1], sdlog[1], threshold[1]) x.TPLN2 = rlnorm3(size.nonzero[2], mulog[2], sdlog[2], threshold[2]) x.TPLN = c(list(x.TPLN1), list(x.TPLN2)) ######################################################################## # Algorithm 1 : Adam optimization algorithm ######################################################################## #Set initial parameter startstep = 1e-3 learning_rate = 1e-09 #Group1 xtensor1 = torch_tensor(c(x.TPLN[[1]])) minvalue1 = min(x.TPLN[[1]]) checkValue = 1e20 weight1 = torch_tensor(minvalue1-(startstep),requires_grad = TRUE) optimizer = optim_adam(weight1, lr = 0.01) for (t in 1:1000) { #The profile log-likelihood of threshold P11 = torch_sum(torch_log(xtensor1-weight1)/(xtensor1-weight1)) P21 = torch_sum(1/(xtensor1-weight1)) P31 = torch_sum(torch_log(xtensor1-weight1)^2) P41 = torch_sum(torch_log(xtensor1-weight1))^2 y1 = (size.nonzero[1]-1)*P11 + (P21*(P31 - (P41/size.nonzero[1]))) loss = torch_abs(y1-0) optimizer$zero_grad() loss$backward() optimizer$step() #print(c(loss$item())) if (is.infinite(loss$item())){ print("Loss inf") next } if (is.na(loss$item())){ print("Loss NA") next } if (is.nan(loss$item())){ print("Loss NAN") next } #print(c("checkValue = ",checkValue,"loss$item()=",loss$item())) if (checkValue > loss$item()){ checkValue = loss$item() }else{ print(c("checkValue = ",checkValue,"loss$item()=",loss$item())) #print(c("Problem in Group1")) next } } #print(c("final checkValue = ",checkValue,"loss$item()=",loss$item())) thresHat.GD1 = weight1$item() LogxLN.GD1 = log(x.TPLN[[1]] - thresHat.GD1) mulog.hat1 = mean(LogxLN.GD1) sdlog.hat1 = sqrt((sum(LogxLN.GD1^2) - (size.nonzero[1]*mulog.hat1^2))/size.nonzero[1]) sigmaSq.hat.unbi1= var(LogxLN.GD1) sigmaSq.hat.bias1 = sdlog.hat1^2 d.hat1 = size.x.zero[1]/n[1] #Group2 xtensor2 = torch_tensor(c(x.TPLN[[2]])) minvalue2 = min(x.TPLN[[2]]) checkValue = 1e20 weight2 = torch_tensor(minvalue2-(startstep),requires_grad = TRUE) optimizer = optim_adam(weight2, lr = 0.01) for (t in 1:1000) { #The profile log-likelihood of threshold P12 = torch_sum(torch_log(xtensor2-weight2)/(xtensor2-weight2)) P22 = torch_sum(1/(xtensor2-weight2)) P32 = torch_sum(torch_log(xtensor2-weight2)^2) P42 = torch_sum(torch_log(xtensor2-weight2))^2 y2 = (size.nonzero[2]-1)*P12 + (P22*(P32 - (P42/size.nonzero[2]))) loss = torch_abs(y2-0) optimizer$zero_grad() loss$backward() optimizer$step() #print(c(loss$item())) if (is.infinite(loss$item())){ print("Loss inf") next } if (is.na(loss$item())){ print("Loss NA") next } if (is.nan(loss$item())){ print("Loss NAN") next } #print(c("checkValue = ",checkValue,"loss$item()=",loss$item())) if (checkValue > loss$item()){ checkValue = loss$item() }else{ print(c("checkValue = ",checkValue,"loss$item()=",loss$item())) #print(c("Problem in Group2")) next } } print(c("final checkValue = ",checkValue,"loss$item()=",loss$item())) thresHat.GD2 = weight2$item() LogxLN.GD2 = log(x.TPLN[[2]] - thresHat.GD2) mulog.hat2 = mean(LogxLN.GD2) sdlog.hat2 = sqrt((sum(LogxLN.GD2^2) - (size.nonzero[2]*mulog.hat2^2))/size.nonzero[2]) sigmaSq.hat.unbi2= var(LogxLN.GD2) sigmaSq.hat.bias2 = sdlog.hat2^2 d.hat2 = size.x.zero[2]/n[2] LogxLN.GD = c(list(LogxLN.GD1), list(LogxLN.GD2)) thresHat.GD = c(thresHat.GD1, thresHat.GD2) mulog.hat = c(mulog.hat1, mulog.hat2) sdlog.hat = c(sdlog.hat1, sdlog.hat2) sigmaSq.hat.unbi = c(sigmaSq.hat.unbi1, sigmaSq.hat.unbi2) sigmaSq.hat.bias = c(sigmaSq.hat.bias1, sigmaSq.hat.bias2) d.hat = c(d.hat1, d.hat2) medianDTPLN.hat = (1-d.hat)*(thresHat.GD + exp(mulog.hat)) print(c("thresHat.GD", thresHat.GD)) ##################################################################################################################################### # Algorithm 2 : FGCI interval ##################################################################################################################################### df = size.nonzero-1 sigmaSq.FGPQ.U1 = df[1]*sdlog.hat[1]^2/rchisq(m, df[1]) mu.FGPQ.U1 = mulog.hat[1]-(rnorm(m)*sqrt(sigmaSq.FGPQ.U1/size.nonzero[1])) thres.FGPQ.U1 = runif(m, min = 0, max = min(x.TPLN[[1]])) dnon.FGPQ.U1 = rbeta(m, size.nonzero[1]+1, size.x.zero[1]+1) median.FGPQ.U1 = dnon.FGPQ.U1*(thres.FGPQ.U1 + exp(mu.FGPQ.U1)) sigmaSq.FGPQ.U2 = df[2]*sdlog.hat[2]^2/rchisq(m, df[2]) mu.FGPQ.U2 = mulog.hat[2]-(rnorm(m)*sqrt(sigmaSq.FGPQ.U2/size.nonzero[2])) thres.FGPQ.U2 = runif(m, min = 0, max = min(x.TPLN[[2]])) dnon.FGPQ.U2 = rbeta(m, size.nonzero[2]+1, size.x.zero[2]+1) median.FGPQ.U2 = dnon.FGPQ.U2*(thres.FGPQ.U2 + exp(mu.FGPQ.U2)) rmed.TPLNZ.FGPQ.U = log(median.FGPQ.U1) - log(median.FGPQ.U2) L.FGPQ = quantile(rmed.TPLNZ.FGPQ.U, alpha/2) #compute lower based on GCI U.FGPQ = quantile(rmed.TPLNZ.FGPQ.U, 1-alpha/2) #compute upper based on GCI CP.FGPQ[i] = ifelse(L.FGPQ < rmed.TPLNZ && rmed.TPLNZ < U.FGPQ, 1, 0) Length.FGPQ[i] = U.FGPQ - L.FGPQ # ##################################################################################################################################### # Algorithm 3 : MOVER-FGPQ (MF) interval # ##################################################################################################################################### df = size.nonzero-1 #Group1: sigmaSq.FGPQ.U1 = df[1]*sdlog.hat[1]^2/rchisq(m, df[1]) mu.FGPQ.U1 = mulog.hat[1]-(rnorm(m)*sqrt(sigmaSq.FGPQ.U1/size.nonzero[1])) l.mu.MF1 = quantile(mu.FGPQ.U1, alpha/2) u.mu.MF1 = quantile(mu.FGPQ.U1, 1- (alpha/2)) thres.FGPQ.U1 = runif(m, min = 0, max = min(x.TPLN[[1]])) l.thres.MF1 = quantile(thres.FGPQ.U1, alpha/2) u.thres.MF1 = quantile(thres.FGPQ.U1, 1- (alpha/2)) log.dnon.hat = log(1-d.hat) dnon.FGPQ.U1 = rbeta(m, size.nonzero[1]+1, size.x.zero[1]+1) l.log.dnon1 = log(quantile(dnon.FGPQ.U1, alpha/2)) u.log.dnon1 = log(quantile(dnon.FGPQ.U1, 1-(alpha/2))) medTPLN.hat = thresHat.GD +exp(mulog.hat) l.medTPLN1 = log(medTPLN.hat[1] - sqrt((thresHat.GD[1]-l.thres.MF1)^2 + (exp(mulog.hat[1])-exp(l.mu.MF1))^2)) u.medTPLN1 = log(medTPLN.hat[1] + sqrt((thresHat.GD[1]-u.thres.MF1)^2 + (exp(mulog.hat[1])-exp(u.mu.MF1))^2)) l.medDTPLN1 = log.dnon.hat[1] + log(medTPLN.hat[1]) - sqrt((log.dnon.hat[1]-l.log.dnon1)^2 + (log(medTPLN.hat[1])-l.medTPLN1)^2) u.medDTPLN1 = log.dnon.hat[1] + log(medTPLN.hat[1]) + sqrt((log.dnon.hat[1]-u.log.dnon1)^2 + (log(medTPLN.hat[1])-u.medTPLN1)^2) #Group2: sigmaSq.FGPQ.U2 = df[2]*sdlog.hat[2]^2/rchisq(m, df[2]) mu.FGPQ.U2 = mulog.hat[2]-(rnorm(m)*sqrt(sigmaSq.FGPQ.U2/size.nonzero[2])) l.mu.MF2 = quantile(mu.FGPQ.U2, alpha/2) u.mu.MF2 = quantile(mu.FGPQ.U2, 1- (alpha/2)) thres.FGPQ.U2 = runif(m, min = 0, max = min(x.TPLN[[2]])) l.thres.MF2 = quantile(thres.FGPQ.U2, alpha/2) u.thres.MF2 = quantile(thres.FGPQ.U2, 1- (alpha/2)) log.dnon.hat = log(1-d.hat) dnon.FGPQ.U2 = rbeta(m, size.nonzero[2]+1, size.x.zero[2]+1) l.log.dnon2 = log(quantile(dnon.FGPQ.U2, alpha/2)) u.log.dnon2 = log(quantile(dnon.FGPQ.U2, 1-(alpha/2))) medTPLN.hat = thresHat.GD +exp(mulog.hat) l.medTPLN2 = log(medTPLN.hat[2] - sqrt((thresHat.GD[2]-l.thres.MF2)^2 + (exp(mulog.hat[2])-exp(l.mu.MF2))^2)) u.medTPLN2 = log(medTPLN.hat[2] + sqrt((thresHat.GD[2]-u.thres.MF2)^2 + (exp(mulog.hat[2])-exp(u.mu.MF2))^2)) l.medDTPLN2 = log.dnon.hat[2] + log(medTPLN.hat[2]) - sqrt((log.dnon.hat[2]-l.log.dnon2)^2 + (log(medTPLN.hat[2])-l.medTPLN2)^2) u.medDTPLN2 = log.dnon.hat[2] + log(medTPLN.hat[2]) + sqrt((log.dnon.hat[2]-u.log.dnon2)^2 + (log(medTPLN.hat[2])-u.medTPLN2)^2) L.MF = (log(medianDTPLN.hat[1]) - log(medianDTPLN.hat[2])) - sqrt((log(medianDTPLN.hat[1]) - l.medDTPLN1)^2 + (log(medianDTPLN.hat[2]) - l.medDTPLN2)^2) U.MF = (log(medianDTPLN.hat[1]) - log(medianDTPLN.hat[2])) + sqrt((log(medianDTPLN.hat[1]) - u.medDTPLN1)^2 + (log(medianDTPLN.hat[2]) - u.medDTPLN2)^2) CP.MF[i] = ifelse(L.MF < rmed.TPLNZ && rmed.TPLNZ < U.MF, 1, 0) Length.MF[i] = U.MF - L.MF ##################################################################################################################################### # Algorithm 4 : NAMS interval ##################################################################################################################################### log.medianDTPLN.hat = log(1-d.hat) + log(thresHat.GD + exp(mulog.hat)) Var.logdnon = d.hat/size.nonzero #Variance of median based on Mckean and Schrader (1984) (MS) c = round(((size.nonzero+1)/2) - (1.96*sqrt(size.nonzero/4)), digits = 0) c_nc = size.nonzero-c+1 #Group1 : x_c1 = sort(x.TPLN[[1]])[c[1]] x_nc1 = sort(x.TPLN[[1]])[c_nc[1]] V.MS1 = ((x_nc1 - x_c1)/(2*1.96))^2 Var.logmed.MS1 = Var.logdnon[1] + (V.MS1/(thresHat.GD[1] + exp(mulog.hat[1]+(sigmaSq.hat.unbi[1]/(2*size.nonzero[1]))))^2) #Group2 : x_c2 = sort(x.TPLN[[2]])[c[2]] x_nc2 = sort(x.TPLN[[2]])[c_nc[2]] V.MS2 = ((x_nc2 - x_c2)/(2*1.96))^2 Var.logmed.MS2 = Var.logdnon[2] + (V.MS2/(thresHat.GD[2] + exp(mulog.hat[2]+(sigmaSq.hat.unbi[2]/(2*size.nonzero[2]))))^2) L.NAMS = (log.medianDTPLN.hat[1]-log.medianDTPLN.hat[2]) - (qnorm(1-alpha/2)*sqrt(Var.logmed.MS1 + Var.logmed.MS2)) U.NAMS = (log.medianDTPLN.hat[1]-log.medianDTPLN.hat[2]) + (qnorm(1-alpha/2)*sqrt(Var.logmed.MS1 + Var.logmed.MS2)) CP.NAMS[i] = ifelse(L.NAMS < rmed.TPLNZ && rmed.TPLNZ < U.NAMS,1,0) Length.NAMS[i] = U.NAMS - L.NAMS # ##################################################################################################################################### # Algorithm 5 : Bayesian interval # ##################################################################################################################################### #The posterior of gamma post_fa.temp1 = post_a(mrep, a = thresHat.GD[1], mu = mulog.hat[1], sigmaSq = sigmaSq.hat.unbi[1], size.nonzero[1]) post_fa.temp2 = post_a(mrep, a = thresHat.GD[2], mu = mulog.hat[2], sigmaSq = sigmaSq.hat.unbi[2], size.nonzero[2]) post.thres.U1 = post_fa.temp1[-(1:burnin)] post.thres.U2 = post_fa.temp2[-(1:burnin)] Sig.U = size.nonzero*sigmaSq.hat.unbi/(size.nonzero+2) sd.sigmaSqlog.U = sqrt(2*Sig.U^2/(size.nonzero+2)) post.sigmaSqlog.U1 = rnorm(m, Sig.U[1], sd.sigmaSqlog.U[1]) post.sigmaSqlog.U2 = rnorm(m, Sig.U[2], sd.sigmaSqlog.U[2]) post.mulog.U1 = rnorm(m, mulog.hat[1], sqrt(sigmaSq.hat.unbi[1]/size.nonzero[1])) post.mulog.U2 = rnorm(m, mulog.hat[2], sqrt(sigmaSq.hat.unbi[2]/size.nonzero[2])) post.dnon1 = rbeta(m, size.nonzero[1]+1, size.x.zero[1]+1) post.dnon2 = rbeta(m, size.nonzero[2]+1, size.x.zero[2]+1) post.medianTPLNZ.U1 = post.dnon1*(post.thres.U1 + exp(post.mulog.U1)) post.medianTPLNZ.U2 = post.dnon2*(post.thres.U2 + exp(post.mulog.U2)) post.rmed.U = log(post.medianTPLNZ.U1) - log(post.medianTPLNZ.U2) L.Baye = quantile(post.rmed.U, 0.025) U.Baye = quantile(post.rmed.U, 0.975) CP.Baye[i] = ifelse(L.Baye < rmed.TPLNZ && rmed.TPLNZ < U.Baye, 1,0) Length.Baye[i] = U.Baye - L.Baye if(i%%10==0){ print(i) } } ACP.FGPQ= mean(CP.FGPQ) EX.Length.FGPQ= mean(Length.FGPQ) ACP.MF = mean(CP.MF) EX.Length.MF = mean(Length.MF) ACP.NAMS = mean(CP.NAMS) EX.Length.NAMS = mean(Length.NAMS) ACP.Baye = mean(CP.Baye) EX.Length.Baye = mean(Length.Baye) cat("M=", M, "\n", "n=", n, "\n", "sigmaSqlog=", sigmaSqlog, "\n", "mulog=", mulog, "\n", "threshold=", threshold, "\n", "d=", d, "\n", "rmed.TPLNZ=", rmed.TPLNZ, "\n", "ACP.FGPQ=",ACP.FGPQ,"EX.Length.FGPQ=", EX.Length.FGPQ, "\n", "ACP.MF=",ACP.MF,"EX.Length.MF=", EX.Length.MF, "\n", "ACP.NAMS=",ACP.NAMS,"EX.Length.NAMS=", EX.Length.NAMS, "\n", "ACP.Baye=",ACP.Baye,"EX.Length.Baye=", EX.Length.Baye) } ############################################################################## # R codes to apply with wind speed data # ############################################################################## rm(list=ls()) library(readxl) library(EnvStats) library(MASS) library(FAdist) library(torch) set.seed(9) torch_manual_seed(9) data = read_excel("Data.xlsx") PHI = na.omit(data$PHI) PY = na.omit(data$PY) #Dataset1 : PHI x.TPLN1 = PHI[which(PHI!=0)] n1 = length(PHI) size.nonzero1 = length(x.TPLN1) size.x.zero1 = n1 - size.nonzero1 #Dataset2 : PY x.TPLN2 = PY[which(PY!=0)] n2 = length(PY) size.nonzero2 = length(x.TPLN2) size.x.zero2 = n2 - size.nonzero2 x.TPLN = c(list(x.TPLN1), list(x.TPLN2)) n = c(n1,n2) size.x.zero = c(size.x.zero1, size.x.zero2) size.nonzero = c(size.nonzero1, size.nonzero2) ######################################################################## # Algorithm 1 : Adam optimization algorithm ######################################################################## #Dataset 1 #Set initial parameter startstep = 1e-3 learning_rate = 1e-09 #Group1 xtensor1 = torch_tensor(c(x.TPLN[[1]])) minvalue1 = min(x.TPLN[[1]]) checkValue = 1e20 weight1 = torch_tensor(minvalue1-(startstep),requires_grad = TRUE) optimizer = optim_adam(weight1, lr = 0.01) for (t in 1:1000) { #The profile log-likelihood of threshold P11 = torch_sum(torch_log(xtensor1-weight1)/(xtensor1-weight1)) P21 = torch_sum(1/(xtensor1-weight1)) P31 = torch_sum(torch_log(xtensor1-weight1)^2) P41 = torch_sum(torch_log(xtensor1-weight1))^2 y1 = (size.nonzero[1]-1)*P11 + (P21*(P31 - (P41/size.nonzero[1]))) loss = torch_abs(y1-0) optimizer$zero_grad() loss$backward() optimizer$step() #print(c(loss$item())) if (is.infinite(loss$item())){ print("Loss inf") next } if (is.na(loss$item())){ print("Loss NA") next } if (is.nan(loss$item())){ print("Loss NAN") next } #print(c("checkValue = ",checkValue,"loss$item()=",loss$item())) if (checkValue > loss$item()){ checkValue = loss$item() }else{ #print(c("checkValue = ",checkValue,"loss$item()=",loss$item())) #print(c("Problem in Group1")) next } } #print(c("final checkValue = ",checkValue,"loss$item()=",loss$item())) thresHat.GD1 = weight1$item() LogxLN.GD1 = log(x.TPLN[[1]] - thresHat.GD1) mulog.hat1 = mean(LogxLN.GD1) sdlog.hat1 = sqrt((sum(LogxLN.GD1^2) - (size.nonzero[1]*mulog.hat1^2))/size.nonzero[1]) sigmaSq.hat.unbi1= var(LogxLN.GD1) sigmaSq.hat.bias1 = sdlog.hat1^2 d.hat1 = size.x.zero[1]/n[1] #Dataset2 xtensor2 = torch_tensor(c(x.TPLN[[2]])) minvalue2 = min(x.TPLN[[2]]) checkValue = 1e20 weight2 = torch_tensor(minvalue2-(startstep),requires_grad = TRUE) optimizer = optim_adam(weight2, lr = 0.01) for (t in 1:1000) { #The profile log-likelihood of threshold P12 = torch_sum(torch_log(xtensor2-weight2)/(xtensor2-weight2)) P22 = torch_sum(1/(xtensor2-weight2)) P32 = torch_sum(torch_log(xtensor2-weight2)^2) P42 = torch_sum(torch_log(xtensor2-weight2))^2 y2 = (size.nonzero[2]-1)*P12 + (P22*(P32 - (P42/size.nonzero[2]))) loss = torch_abs(y2-0) optimizer$zero_grad() loss$backward() optimizer$step() #print(c(loss$item())) if (is.infinite(loss$item())){ print("Loss inf") next } if (is.na(loss$item())){ print("Loss NA") next } if (is.nan(loss$item())){ print("Loss NAN") next } #print(c("checkValue = ",checkValue,"loss$item()=",loss$item())) if (checkValue > loss$item()){ checkValue = loss$item() }else{ #print(c("checkValue = ",checkValue,"loss$item()=",loss$item())) #print(c("Problem in Group2")) next } } print(c("final checkValue = ",checkValue,"loss$item()=",loss$item())) thresHat.GD2 = weight2$item() LogxLN.GD2 = log(x.TPLN[[2]] - thresHat.GD2) mulog.hat2 = mean(LogxLN.GD2) sdlog.hat2 = sqrt((sum(LogxLN.GD2^2) - (size.nonzero[2]*mulog.hat2^2))/size.nonzero[2]) sigmaSq.hat.unbi2= var(LogxLN.GD2) sigmaSq.hat.bias2 = sdlog.hat2^2 d.hat2 = size.x.zero[2]/n[2] ############################################################################## # R code to estimate AIC results # ############################################################################## #Dataset1 cauchy.PHI= fitdistr(x.TPLN1,"cauchy") chi.PHI = fitdistr(x.TPLN1, "chi-squared", start=list(df=size.nonzero[1]-1), method="Brent",lower=0.1,upper=100) expo.PHI = fitdistr(x.TPLN1,"exponential") lognorm.PHI= fitdistr(x.TPLN1, "lognormal") lognorm3.PHI= fitdistr(x.TPLN1, dlnorm3, start=list(shape = mulog.hat1, scale = sqrt(sigmaSq.hat.unbi1), thres = thresHat.GD1)) logis.PHI = fitdistr(x.TPLN1,"logistic") norm.PHI = fitdistr(x.TPLN1,"normal") t.PHI = fitdistr(x.TPLN1, "t", df=size.nonzero[1]-1) AIC(cauchy.PHI) AIC(chi.PHI) AIC(expo.PHI) AIC(lognorm.PHI) AIC(lognorm3.PHI) AIC(logis.PHI) AIC(norm.PHI) AIC(t.PHI) #Dataset2 cauchy.PY= fitdistr(x.TPLN2,"cauchy") chi.PY = fitdistr(x.TPLN2, "chi-squared", start=list(df=size.nonzero[2]-1), method="Brent",lower=0.1,upper=100) expo.PY = fitdistr(x.TPLN2,"exponential") lognorm.PY= fitdistr(x.TPLN2, "lognormal") lognorm3.PY= fitdistr(x.TPLN2, dlnorm3, start=list(shape = mulog.hat2, scale = sqrt(sigmaSq.hat.unbi2), thres = thresHat.GD2)) logis.PY = fitdistr(x.TPLN2,"logistic") norm.PY = fitdistr(x.TPLN2,"normal") t.PY = fitdistr(x.TPLN2, "t", df=size.nonzero[2]-1) AIC(cauchy.PY) AIC(chi.PY) AIC(expo.PY) AIC(lognorm.PY) AIC(lognorm3.PY) AIC(logis.PY) AIC(norm.PY) AIC(t.PY) AIC.PHI = data.frame() AIC.PY = data.frame() AIC.PHI = c(AIC(cauchy.PHI), AIC(chi.PHI), AIC(expo.PHI), AIC(lognorm.PHI), AIC(lognorm3.PHI), AIC(logis.PHI), AIC(norm.PHI), AIC(t.PHI)) AIC.PY = c(AIC(cauchy.PY), AIC(chi.PY), AIC(expo.PY), AIC(lognorm.PY), AIC(lognorm3.PY), AIC(logis.PY), AIC(norm.PY), AIC(t.PY)) AIC = rbind(AIC.PHI,AIC.PY) colnames(AIC)= c("Cauchy", "Chi-Square", "Exponential", "Lognormal", "TPLN", "Logistic", "Normal", "T") AIC