#################################################################### ### R code CIs for the single variance of delta-BS Distributions ### #################################################################### #clear in R rm(list = ls()) library(stats) library(MASS) library(HDInterval) library(TeachingDemos) library(LearnBayes) library(tictoc) library(writexl) library(dlm) library(LaplacesDemon) library(gsubfn) library(bsgof) tic() set.seed(95) # Single # begin process starttime <- proc.time() # x = data # M = number of simulation runs # m = number of GCI method # n0 = size of zero in population # n1 = size of non-zero in poplation # alpha = shape parameter # beta = scale parameter # a1 = a2 = b1 = b2 = hyperparameter # r = parameter of the generalized ratio-of-uniforms method # tau = mean of delta-BS dist # alpha_sig = level of significance # L.GCI = lower CI based on GCI method # U.GCI = upper CI based on GCI method # AV.length.GCI = average length of CI based on GCI method # CP.GCI = coverage probability of CI based on GCI method M <- 2000 m <- 5000 a1 <- b1 <- a2 <- b2 <- 10^(-4) # hyperparameters r <- 2 # the parameter of the generalized ratio-of-uniforms method M_Bay <- 1000 # the number of generated samples n=15 #20,30,50,70,100 for (delta in c(0.1,0.3)) #0.1,0.3,0.5 { for (beta in c(1,2)) #beta <- 1,2 { for(alpha in c(0.25,0.5,0.75,1)) #alpha <- 0.25,0.5,0.75,1 { alpha_sig <-0.05 deltaprime <- 1-delta #estimator Variance tau<-(1-delta)*(beta^2)*((alpha^2)*(1+(5*(alpha^2))/4) + (delta)*(1+(alpha^2)/2)^(2)) #----------GCI----------# delta_GPQ <- c() Z.VST <- c() L.GCI1 <- c() U.GCI1 <- c() AV.GCI1 <- c() CP.GCI1 <- c() L.GCI2 <- c() U.GCI2 <- c() AV.GCI2 <- c() CP.GCI2 <- c() L.GCI3 <- c() U.GCI3 <- c() AV.GCI3 <- c() CP.GCI3 <- c() AverLengthGCI1 <- c() CoveragePGCI1 <- c() AverLengthGCI2 <- c() CoveragePGCI2 <- c() AverLengthGCI3 <- c() CoveragePGCI3 <- c() AVVar <- c() CPVar <- c() TB1 <- c() V <- c() Talpha <- c() GCIVar <- c() I <- c() J <- c() K <- c() L <- c() S1 <- c() S2 <- c() GCI <- c() GCIVar1 <- c() GCIVar2 <- c() GCIVar3 <- c() #----------- FGCI ----------# L.FGCI1 <- c() U.FGCI1 <- c() AV.FGCI1 <- c() CP.FGCI1 <- c() L.FGCI2 <- c() U.FGCI2 <- c() AV.FGCI2 <- c() CP.FGCI2 <- c() L.FGCI3 <- c() U.FGCI3 <- c() AV.FGCI3 <- c() CP.FGCI3 <- c() Ax <- c() Bx <- c() Axhat <- c() delta_FGPQ <- c() beta0 <- c() beta1 <- c() FGCIVar <- c() FGCIVar1 <- c() FGCIVar2 <- c() FGCIVar3 <- c() #------------BCI-----------# Betad=c() Alad=c() Betastar=c() alphastar=c() delta_BCI=c() L.BCI1=c() U.BCI1=c() AV.BCI1=c() CP.BCI1=c() L.BCI2=c() U.BCI2=c() AV.BCI2=c() CP.BCI2=c() L.BCI3=c() U.BCI3=c() AV.BCI3=c() CP.BCI3=c() alphahat1=c() Betahat1=c() #-------------NA--------------# L.NA=c() U.NA=c() AV.NA=c() CP.NA=c() AverLengthNAVar=c() CoveragePNAVar=c() mme=c() mme_alpha=c() mme_bata=c() mme_delta=c() tau_NA=c() se_NA=c() options(digits = 4) ######################################## ### Start Iteration ### ######################################## ### GCI Method ### GCIFunc<-function(n1, I, J, K, L, S1, S2,delta_1,delta_2,delta_3) {Num=5000 j=0 while (j0 then we have to check before sloving for B1 and B2 if (((b^2)-(4*a*cc)) > 0) { B1=((-b)+(sqrt((b^2)-(4*a*cc))))/(2*a) B2=((-b)-(sqrt((b^2)-(4*a*cc))))/(2*a) } else { B1=0 B2=0} #find TB ; TB is GPQ for Beta if (T<=0) { TB=max(B1,B2) } else { TB=min(B1,B2) } #since Beta>0 we have to check TB>0 or not ? if not we have to generate t distribution again. if (TB>0) { j=j+1 TB1[j]=TB #Generate Chi-squared distribution V[j]=rchisq(1,df=n1) #calculate GPQ for alpha Talpha[j]=((((S2*(TB1[j]^2))-(2*n1*TB1[j])+S1)/(TB1[j]*V[j]))^(1/2)) #calculate GPQ for Var GCIVar1[j] = (1-delta_1)*((TB1[j])^2)*((Talpha[j]^2)*(1+(5*((Talpha[j])^2))/4) + (delta_1)*(1+((Talpha[j])^2)/2)^(2)) GCIVar2[j] = (1-delta_2)*((TB1[j])^2)*((Talpha[j]^2)*(1+(5*((Talpha[j])^2))/4) + (delta_2)*(1+((Talpha[j])^2)/2)^(2)) GCIVar3[j] = (1-delta_3)*((TB1[j])^2)*((Talpha[j]^2)*(1+(5*((Talpha[j])^2))/4) + (delta_3)*(1+((Talpha[j])^2)/2)^(2)) } else { j=j } } #return GCI for Var return(data.frame(GCIVar1,GCIVar2,GCIVar3)) } ### FGCI Method ### FuncFGCI <- function(s,delta_1,delta_2,delta_3) { #nFGCI=length(s) #for calculate Jacobian aa1= t(combn(s, 2)) bb1=aa1[,1] cc1=aa1[,2] #Initial value for alpha and beta AlphaIN=(2*(((mean(s)*(sum(s^(-1))/n_nonzero))^(1/2))-1))^(1/2) BetaIN=(mean(s)*((sum(s^(-1))/n_nonzero)^(-1)))^(1/2) #function for generating alpha FuncAlpha <- function(aFGCI,BeFGCI) { ((n_nonzero-1)/2)*log(aFGCI)-(((1/(2))*sum((s/BeFGCI)+(BeFGCI/s)-2))*aFGCI) } #function for generating beta betaLog <- function(bFGCI,ALp) { qq1=(abs(bb1-cc1))/((1+(bFGCI/bb1))*(1+(bFGCI/cc1))) -((n_nonzero) * log(bFGCI)) + sum(log((bFGCI/s)^(1/2) + (bFGCI/s)^(3/2))) - (((1/(2*ALp^2))*sum((s/bFGCI)+(bFGCI/s)))) + log(sum(qq1)) } #Generate alpha and beta by "arms" Bx[1]=arms(BetaIN, betaLog , function(bFGCI, ALp) (bFGCI>1e-4)*(bFGCI<1000), 1, ALp=AlphaIN) Ax[1]=arms(AlphaIN, FuncAlpha , function(aFGCI,BeFGCI) (aFGCI>1e-4)*(aFGCI<1000), 1, BeFGCI=Bx[1]) Axhat[1]=sqrt(1/Ax[1]) for (j in 2:3000) { Bx[j]=arms(Bx[j-1], betaLog , function(bFGCI, ALp) (bFGCI>1e-4)*(bFGCI<1000), 1, ALp=Axhat[j-1]) Ax[j]=arms(Ax[j-1], FuncAlpha , function(aFGCI,BeFGCI) (aFGCI>1e-4)*(aFGCI<1000), 1, BeFGCI=Bx[j]) Axhat[j]=sqrt(1/Ax[j]) } #obtained alpha by alpha=sqrt(1/Ax) AxR=sqrt(1/Ax) #burn-in 1000 sample BetahatF=Bx[-1:-1000] AlphahatF=AxR[-1:-1000] #Thin sample L=2 BBetaF=Thin(BetahatF, 2) AAlplaF=Thin(AlphahatF, 2) #calculate Var Var.hatFGCI1=(1-delta_1)*((BBetaF)^2)*((AAlplaF^2)*(1+(5*((AAlplaF)^2))/4) + (delta_1)*(1+((AAlplaF)^2)/2)^(2)) Var.hatFGCI2=(1-delta_2)*((BBetaF)^2)*((AAlplaF^2)*(1+(5*((AAlplaF)^2))/4) + (delta_2)*(1+((AAlplaF)^2)/2)^(2)) Var.hatFGCI3=(1-delta_3)*((BBetaF)^2)*((AAlplaF^2)*(1+(5*((AAlplaF)^2))/4) + (delta_3)*(1+((AAlplaF)^2)/2)^(2)) #return Var return(data.frame(Var.hatFGCI1,Var.hatFGCI2,Var.hatFGCI3)) } ### BCI Method ### #Declaring the Log-Likelihood Function for BCI Bir.lik1<-function(pars,w){ alpha<-pars[1] bata<-pars[2] nb<-length(w) logl<- ((-nb)*log(alpha))- (nb*log(bata))+sum(log(((bata/w)^(1/2)) + ((bata/w)^(3/2))))-((1/(2*(alpha^2)))*sum((w/bata) + (bata/w)-2)) return(-logl) } #Declaring the gradient function for BCI nll_grad<-function(p,w) { alpha<-p[1] bata<-p[2] logal<- (-1/alpha)-(-(((w/bata)+(bata/w)-2)/ (alpha^3))) logbe<- (-(1/bata))+ ((w+(3*bata))/ ((2*w*bata)+(2*bata^2))) - (((1/w)-(w/(bata^2)))/ (2*(alpha^2))) return(c(-sum(logal),-sum(logbe))) } BCIFUNC<-function(alphahatBCI,batahatBCI,n1,AAlph,BBata){ for (m in 1:500) { #Generate bootstrap sample from BS distribution with parameter alpha_hat and beta_hat x11<-rnorm(n1, mean = 0 , sd = alphahatBCI/2) y11<-batahatBCI*(1+(2*(x11^2))+((2*x11)*sqrt(1+(x11^2)))) #Comput bootstrap MLE estimator by using BFGS aa=optim(c(AAlph,BBata),Bir.lik1, gr = nll_grad, method = "BFGS", w=y11) #bootstrap MLE estimator of alpha alphahat1[m]=aa[[1]][1] #bootstrap MLE estimator of beta Betahat1[m]=aa[[1]][2] } #calculate the bootstrap bias estimate for alpha bias=mean(alphahat1)-alphahatBCI #calculate the bootstrap bias estimate for beta biasBeta1=mean(Betahat1)-batahatBCI #calculate the CBC parametric bootstrap estimator of alpha alphastarwith0=alphahat1-(2*bias) #calculate the CBC parametric bootstrap estimator of beta Betastar1=Betahat1-(2*biasBeta1) #return the CBC parametric bootstrap estimator of alpha and beta return(c(alphastarwith0,Betastar1)) } NAfunc=function(x2){ mme=bs.mme(x2) mme_alpha=mme$alpha mme_bata=mme$beta mme_delta=n_zero/n z.alpha=qnorm(1-alpha_sig/2) tau_NA=(1-mme_delta)*(mme_bata^2)*((mme_alpha^2)*(1+(5/4)*(mme_alpha^2))+(mme_delta)*(1+(1/2)*(mme_alpha^2))^2) se_NA=sqrt((mme_bata^2*(mme_delta - 1)*(2*mme_alpha*((5*mme_alpha^2)/4 + 1) + (5*mme_alpha^3)/2 + 2*mme_alpha*mme_delta*(mme_alpha^2/2 + 1)))^(2) *((0.5*mme_alpha^2)/n1) +(2*mme_bata*(mme_alpha^2*((5*mme_alpha^2)/4 + 1) + mme_delta*(mme_alpha^2/2 + 1)^2)*(mme_delta - 1))^(2)*((mme_alpha*mme_bata)^(2)*(1+0.75*mme_alpha^2)/(n1*(1+0.5*mme_alpha^2)^2)) +(mme_bata^2*(mme_alpha^2*((5*mme_alpha^2)/4 + 1) + mme_delta*(mme_alpha^2/2 + 1)^2) - mme_bata^2*(mme_alpha^2/2 + 1)^2*(mme_delta - 1))^(2)*(mme_delta*(1-mme_delta)/n)) L.NA=tau_NA-z.alpha*se_NA U.NA=tau_NA+z.alpha*se_NA return(data.frame(L.NA,U.NA)) } for (i in 1:M) { #begin loop i ###################################### ### Generate x ### ###################################### x_0 <- rbinom(n, size=1 , prob=delta) #Choose n_zero from bi(n,deltta) x0 <- x_0[which(x_0!=0)] n_zero <- length(x0) n_nonzero <- n-n_zero x1 <- rnorm(n_nonzero, mean = 0 , sd = alpha/2) x2 <- beta*(1+(2*(x1^2))+((2*x1)*sqrt(1+x1^2))) x <- c(x0-1,x2) n0 <- n_zero n1 <- n_nonzero nonzero_value <- x[which(x!=0)] delta_hat <- n_zero/n #delta 1 #base on VST Z.VST = 2*(sqrt(n))*(asin(sqrt(delta_hat))-asin(sqrt(delta))) #Calcalate GPQ for delta delta_1= (sin(asin(sqrt(delta_hat))-(Z.VST/(2*sqrt(n)))))^2 #delta 2 #from Wilson zw =rnorm(M,0,1) q1 = (n0+(zw^2)/2)/(n+zw^2) q2 = (zw)/(n+(zw^2)) q3 = (n0*(1-n0/n)+(zw^2)/4)^(1/2) delta_2 = q1-q2*q3 #dalta 3 #from Hannig delta_3 = 0.5*(rbeta(M,n0,n1+1))+0.5*(rbeta(M,n0+1,n1)) #rbeta(1,n0,n1+1) #---------------GCI-----------------# #Calculate I J K L S1 S2 for GCI method I=sum((sqrt(nonzero_value)/n_nonzero)) J=(sum((1/sqrt(nonzero_value))))/n_nonzero K=sum((sqrt(nonzero_value)-I)^2) L=sum(((1/sqrt(nonzero_value))-J)^2) S1=sum(nonzero_value) S2=sum(1/nonzero_value) #Call GCIFunc GCIVar<-GCIFunc(n_nonzero, I, J, K, L, S1, S2,delta_1,delta_2,delta_3) GCIVar_1<-GCIVar[,1] GCIVar_2<-GCIVar[,2] GCIVar_3<-GCIVar[,3] #---------------End GCI--------------# #---------------FGCI-----------------# #Call FuncFGCI FGCIVar<-FuncFGCI(nonzero_value,delta_1,delta_2,delta_3) #calculate the Var FGCIVar_1<-FGCIVar[,1] FGCIVar_2<-FGCIVar[,2] FGCIVar_3<-FGCIVar[,3] #---------------End FGCI-------------# #-----------------BCI----------------# #Comput the MME estimator of alpha and beta for initial values AAlpha=(2*(((mean(x2)*(sum(x2^(-1))/n1))^(1/2))-1))^(1/2) BBataa=(mean(x2)*((sum((x2)^(-1))/n1)^(-1)))^(1/2) #using the Broyden–Fletcher–Goldfarb–Shanno (BFGS) quasi-Newton nonlinear optimization AA=optim(c(AAlpha,BBataa),Bir.lik1,gr = nll_grad, method = "BFGS", w=x2) #MLE of alpha and beta from the original sample alphahat=AA[[1]][1] batahat=AA[[1]][2] #Call BCIFUNC BCIG1<-BCIFUNC(alphahat,batahat,n1,AAlpha,BBataa) #bootstrap CBC parameter for alpha and beta alphastar11<-BCIG1[1:500] Betastar<-BCIG1[501:1000] #For Mean #Since Mean is a function of alpha and beta # since alpha, beta>0 then we have to check the CBC parametric bootstrap estimator of alpha,beta >0 if alpha or beta<0 then set alpha=0 and beta=0 for (v in 1:500) { if ( (Betastar[v]<=0) | (alphastar11[v]<=0) ) { Betad[v]=0 Alad[v]=0 } else { Betad[v]=Betastar[v] Alad[v]=alphastar11[v] } } # delete alpha=0 and beta=0 , we only consider alpha>0 and beta >0 AApha <- Alad[!Alad %in% 0] BBeta <- Betad[!Betad %in% 0] #Compute Mean BCIVar_1 = (1-delta_1)*(BBeta^2)*((AApha^2)*(1+(5*(AApha^2))/4) + (delta_1)*(1+(AAlpha^2)/2)^(2)) BCIVar_2 = (1-delta_2)*(BBeta^2)*((AApha^2)*(1+(5*(AApha^2))/4) + (delta_2)*(1+(AAlpha^2)/2)^(2)) BCIVar_3 = (1-delta_3)*(BBeta^2)*((AApha^2)*(1+(5*(AApha^2))/4) + (delta_3)*(1+(AAlpha^2)/2)^(2)) #----------------End BCI---------------# #----------------Begin NA---------------# #MME of alpha and beta from the original sample #Call NAFunc NAVar<-NAfunc(x2) L.NA[i]=NAVar[,1] U.NA[i]=NAVar[,2] #----------------END NA---------------# #calculate lower, upper bound and length for GCI1 L.GCI1[i]=quantile(GCIVar_1,prob = 0.025) U.GCI1[i]=quantile(GCIVar_1,prob = 0.975) AV.GCI1[i]=U.GCI1[i]-L.GCI1[i] #calculate lower, upper bound and length for GCI2 L.GCI2[i]=quantile(GCIVar_2,prob = 0.025) U.GCI2[i]=quantile(GCIVar_2,prob = 0.975) AV.GCI2[i]=U.GCI2[i]-L.GCI2[i] #calculate lower, upper bound and length for GCI3 L.GCI3[i]=quantile(GCIVar_3,prob = 0.025) U.GCI3[i]=quantile(GCIVar_3,prob = 0.975) AV.GCI3[i]=U.GCI3[i]-L.GCI3[i] #calculate lower, upper bound and length for FGCI1 L.FGCI1[i]=quantile(FGCIVar_1,prob = 0.025) U.FGCI1[i]=quantile(FGCIVar_1,prob = 0.975) AV.FGCI1[i]=U.FGCI1[i]-L.FGCI1[i] #calculate lower, upper bound and length for FGCI2 L.FGCI2[i]=quantile(FGCIVar_2,prob = 0.025) U.FGCI2[i]=quantile(FGCIVar_2,prob = 0.975) AV.FGCI2[i]=U.FGCI2[i]-L.FGCI2[i] #calculate lower, upper bound and length for FGCI3 L.FGCI3[i]=quantile(FGCIVar_3,prob = 0.025) U.FGCI3[i]=quantile(FGCIVar_3,prob = 0.975) AV.FGCI3[i]=U.FGCI3[i]-L.FGCI3[i] #calculate lower, upper bound and length for BCI1 L.BCI1[i]=quantile(BCIVar_1,prob = 0.025) U.BCI1[i]=quantile(BCIVar_1,prob = 0.975) AV.BCI1[i]=U.BCI1[i]-L.BCI1[i] #calculate lower, upper bound and length for BCI2 L.BCI2[i]=quantile(BCIVar_2,prob = 0.025) U.BCI2[i]=quantile(BCIVar_2,prob = 0.975) AV.BCI2[i]=U.BCI2[i]-L.BCI2[i] #calculate lower, upper bound and length for BCI3 L.BCI3[i]=quantile(BCIVar_3,prob = 0.025) U.BCI3[i]=quantile(BCIVar_3,prob = 0.975) AV.BCI3[i]=U.BCI3[i]-L.BCI3[i] #calculate lower, upper bound and length for NA AV.NA[i]=U.NA[i]-L.NA[i] #for coverage probability if ( L.GCI1[i]<=tau && tau<=U.GCI1[i] ) { #for GCI1 CP.GCI1[i]=1 } else {CP.GCI1[i]=0} if ( L.GCI2[i]<=tau && tau<=U.GCI2[i] ) { #for GCI2 CP.GCI2[i]=1 } else {CP.GCI2[i]=0} if ( L.GCI3[i]<=tau && tau<=U.GCI3[i] ) { #for GCI3 CP.GCI3[i]=1 } else {CP.GCI3[i]=0} if ( L.FGCI1[i]<=tau && tau<=U.FGCI1[i] ) { #for FGCI1 CP.FGCI1[i]=1 } else {CP.FGCI1[i]=0} if ( L.FGCI2[i]<=tau && tau<=U.FGCI2[i] ) { #for FGCI2 CP.FGCI2[i]=1 } else {CP.FGCI2[i]=0} if ( L.FGCI3[i]<=tau && tau<=U.FGCI3[i] ) { #for FGCI3 CP.FGCI3[i]=1 } else {CP.FGCI3[i]=0} if ( L.BCI1[i]<=tau && tau<=U.BCI1[i] ) { #for BCI1 CP.BCI1[i]=1 } else {CP.BCI1[i]=0} if ( L.BCI2[i]<=tau && tau<=U.BCI2[i] ) { #for BCI2 CP.BCI2[i]=1 } else {CP.BCI2[i]=0} if ( L.BCI3[i]<=tau && tau<=U.BCI3[i] ) { #for BCI3 CP.BCI3[i]=1 } else {CP.BCI3[i]=0} if ( L.NA[i]<=tau && tau<=U.NA[i] ) { #for NA CP.NA[i]=1 } else {CP.NA[i]=0} } AverLengthGCIVar1=mean(AV.GCI1) #calculate the average length for GCI1 CoveragePGCIVar1=mean(CP.GCI1) AverLengthGCIVar2=mean(AV.GCI2) #calculate the average length for GCI2 CoveragePGCIVar2=mean(CP.GCI2) AverLengthGCIVar3=mean(AV.GCI3) #calculate the average length for GCI3 CoveragePGCIVar3=mean(CP.GCI3) AverLengthFGCIVar1=mean(AV.FGCI1) #calculate the average length for FGCI1 CoveragePFGCIVar1=mean(CP.FGCI1) AverLengthFGCIVar2=mean(AV.FGCI2) #calculate the average length for FGCI2 CoveragePFGCIVar2=mean(CP.FGCI2) AverLengthFGCIVar3=mean(AV.FGCI3) #calculate the average length for FGCI3 CoveragePFGCIVar3=mean(CP.FGCI3) AverLengthBCIVar1=mean(AV.BCI1) #calculate the average length for BCI1 CoveragePBCIVar1=mean(CP.BCI1) AverLengthBCIVar2=mean(AV.BCI2) #calculate the average length for BCI2 CoveragePBCIVar2=mean(CP.BCI2) AverLengthBCIVar3=mean(AV.BCI3) #calculate the average length for BCI3 CoveragePBCIVar3=mean(CP.BCI3) AverLengthNAVar=mean(AV.NA) #calculate the average length for NA CoveragePNAVar=mean(CP.NA) AV_GCI=c(AverLengthGCIVar1,AverLengthGCIVar2,AverLengthGCIVar3) CP_GCI=c(CoveragePGCIVar1,CoveragePGCIVar2,CoveragePGCIVar3) AV_FGCI=c(AverLengthFGCIVar1,AverLengthFGCIVar2,AverLengthFGCIVar3) CP_FGCI=c(CoveragePFGCIVar1,CoveragePFGCIVar2,CoveragePFGCIVar3) AV_BCI=c(AverLengthBCIVar1,AverLengthBCIVar2,AverLengthBCIVar3) CP_BCI=c(CoveragePBCIVar1,CoveragePBCIVar2,CoveragePBCIVar3) tau alpbe=c(beta,alpha) print(n) print(delta) print(alpbe) print("GCI") print(CP_GCI) print(AV_GCI) print("FGCI") print(CP_FGCI) print(AV_FGCI) print("BCI") print(CP_BCI) print(AV_BCI) print("NA") print(CoveragePNAVar) print(AverLengthNAVar) print("-----------------------------------------------") } } } toc()