################################################################################# # R code to construct confidence intervals # ################################################################################# library(MASS) library(HDInterval) library(TeachingDemos) library(LearnBayes) library(tictoc) tic() set.seed(3) I=c() J=c() K=c() L=c() S1=c() S2=c() V=c() Talpha=c() GCI=c() TB1=c() Alpha.gibbs=c() Alpha.Bays=c() CV.hat=c() alphahat=c() batahat=c() alphahat1=c() bias=c() alphastar=c() CVhat=c() LBCI=c() UBCI=c() AVBCI=c() cpBCI=c() Alpha1=c() Beta1=c() Alpha2=c() R1=c() RR1=c() U1=c() GCICVPER=c() GCIPer=c() LGCIPer=c() UGCIPer=c() AVGCIPer=c() cpGCIPer=c() Betahat1=c() LBCIPer=c() UBCIPer=c() AVBCIPer=c() cpBCIPer=c() LBayPer=c() UBayPer=c() AVBayPer=c() cpBayPer=c() LHPDPer=c() UHPDPer=c() AVHPDPer=c() cpHPDPer=c() Alphabays=c() Betad=c() Alad=c() Betastar=c() alphastar=c() #set parameter n=10 betaa=1 alpha=0.10 a1 <- b1 <- a2 <- b2 <- 10^(-4) # hyperparameters r <- 2 # the parameter of the generalized ratio-of-uniforms method M <- 1000 # the number of generated samples # SD for generate Beta2 from N(mu,c*sigma^2) to balance the acceptance rate to around 0.45. for BayCI and HPD #replicate rept=5000 #in for loop #cal Percentile z.p <- qnorm(0.5) PerR=(betaa/4)*((alpha*z.p+sqrt((alpha^2)*(z.p^2)+4))^2) #Declaring the Log-Likelihood Function for BCI---------------------------------------------------------------------------------------------- Bir.lik1<-function(pars,w){ alpha<-pars[1] bata<-pars[2] n<-length(w) logl<- ((-n)*log(alpha))- (n*log(bata))+sum(log(((bata/w)^(1/2)) + ((bata/w)^(3/2))))-((1/(2*(alpha^2)))*sum((w/bata) + (bata/w)-2)) return(-logl) } #Declaring the gradient function for BCI nll_grad<-function(p,w) { alpha<-p[1] bata<-p[2] logal<- (-1/alpha)-(-(((w/bata)+(bata/w)-2)/ (alpha^3))) logbe<- (-(1/bata))+ ((w+(3*bata))/ ((2*w*bata)+(2*bata^2))) - (((1/w)-(w/(bata^2)))/ (2*(alpha^2))) return(c(-sum(logal),-sum(logbe))) } #----------------------------------------------------------------------------------------------------------------------------------- # BCIFUNC for BCI BCIFUNC<-function(alphahatBCI,batahatBCI,n,AAlph,BBata){ for (m in 1:500) { #Generate bootstrap sample from BS distribution with parameter alpha_hat and beta_hat x11<-rnorm(n, mean = 0 , sd = alphahatBCI/2) y11<-batahatBCI*(1+(2*(x11^2))+((2*x11)*sqrt(1+(x11^2)))) #Compute bootstrap MLE estimator by using BFGS aa=optim(c(AAlph,BBata),Bir.lik1, gr = nll_grad, method = "BFGS", w=y11) #bootstrap MLE estimator of alpha alphahat1[m]=aa[[1]][1] #bootstrap MLE estimator of beta Betahat1[m]=aa[[1]][2] } #calculate the bootstrap bias estimate for alpha bias=mean(alphahat1)-alphahatBCI #calculate the bootstrap bias estimate for beta biasBeta1=mean(Betahat1)-batahatBCI #calculate the CBC parametric bootstrap estimator of alpha alphastarwith0=alphahat1-(2*bias) #calculate the CBC parametric bootstrap estimator of beta Betastar1=Betahat1-(2*biasBeta1) #return the CBC parametric bootstrap estimator of alpha and beta return(c(alphastarwith0,Betastar1)) } #the generalized ratio-of-uniforms method for BayCI and the HPD interval RatioOfUniformsBS <- function(t, a1, b1, a2, b2, r, M) { n <- length(t) #Declaring the function for calculating a(r) betaLog <- function(b) { 1/(r + 1) * (-(n + a1 + 1) * log(b) - b1/b + sum(log((b/t)^(1/2) + (b/t)^(3/2))) - ((n + 1)/2 + a2) * log(sum(1/2 * (t/b + b/t - 2)) + b2)) } #Declaring the function for calculating b(r+) betafLog <- function(b) { log(b) + r/(r + 1) * (-(n + a1 + 1) * log(b) - b1/b + sum(log((b/t)^(1/2) + (b/t)^(3/2))) - ((n + 1)/2 + a2) * log(sum(1/2 * (t/b + b/t - 2)) + b2)) } #compute a(r) a.max <- optimize(betaLog, lower = 0, upper = 20000, maximum = TRUE)$objective #compute b(r+) b.max <- optimize(betafLog, lower = 0, upper = 20000, maximum = TRUE)$objective a.val <- b.val <- rep(0, M) #for i = 1 to M Generate U from Uniform (0,a(r)) and V from Uniform (0,b(r+)) and comput rho for (j in 1:M) { U <- runif(1, 0, exp(a.max)) V <- runif(1, 0, exp(b.max)) rho <- V/U^r # If logU>betaLog(rho) regenerate U from Uniform (0,a(r)) and V from Uniform (0,b(r+)) and comput rho while (log(U) > betaLog(rho)) { U <- runif(1, 0, exp(a.max)) V <- runif(1, 0, exp(b.max)) rho <- V/U^r } # If logU<=betaLog(rho) set beta[i] = rho b.val[j] <- rho #Generate alpha^2 from Gamma a.val[j] <- rigamma(1, n/2 + a2, sum(t/rho + rho/t - 2)/2 + b2) # alpha = sqat(alpha^2) Alphabays[j]=sqrt(a.val[j]) } #Compute Percentile PerBays = (b.val/4)*((Alphabays*z.p+sqrt((Alphabays^2)*(z.p^2)+4))^2) #return Percentile return(PerBays) } #---------------------------------------------------------------------------------------------------------------------------------- GCIFunc<-function(n, I, J, K, L, S1, S2){ Num=5000 j=0 while (j0 then we have to check before solving for B1 and B2 if (((b^2)-(4*a*cc))>0) { B1=((-b)+(sqrt((b^2)-(4*a*cc))))/(2*a) B2=((-b)-(sqrt((b^2)-(4*a*cc))))/(2*a) } else { B1=0 B2=0} #find TB ; TB is GPQ for Beta if (T<=0) { TB=max(B1,B2) } else { TB=min(B1,B2) } #since Beta>0 we have to check TB>0 or not ? if not we have to generate t distribution again. if (TB>0) { j=j+1 TB1[j]=TB #Generate Chi-squared distribution V[j]=rchisq(1,df=n) #calculate GPQ for alpha Talpha[j]=((((S2*(TB1[j]^2))-(2*n*TB1[j])+S1)/(TB1[j]*V[j]))^(1/2)) #calculate GPQ for Percentile GCIPer[j] = (TB1[j]/4)*((Talpha[j]*z.p+sqrt((Talpha[j]^2)*(z.p^2)+4))^2) } else { j=j } } #return GCI for variance return(GCIPer) } for (i in 1:5000) { #Generate Data y <- rnorm(n, mean = 0 , sd = alpha/2) x <- betaa*(1+(2*(y^2))+((2*y)*sqrt(1+y^2))) #---------------------------GCI-------------------------------------------------------------------------------- #Calculate I J K L S1 S2 for GCI method I=sum((sqrt(x)/n)) J=sum((1/sqrt(x)))/n K=sum((sqrt(x)-I)^2) L=sum(((1/sqrt(x))-J)^2) S1=sum(x) S2=sum(1/x) #Call GCIFunc GCIPercentile <- GCIFunc(n, I, J, K, L, S1, S2) #----------------------------End GCI-------------------------------------------------------------------------------- #-------------------------------------BCI---------------------------------------------------------------------------- #Compute the MME estimator of alpha and beta for initial values AAlpha=(2*(((mean(x)*(sum(x^(-1))/n))^(1/2))-1))^(1/2) BBataa=(mean(x)*((sum(x^(-1))/n)^(-1)))^(1/2) #using the Broyden–Fletcher–Goldfarb–Shanno (BFGS) quasi-Newton nonlinear optimization AA=optim(c(AAlpha,BBataa),Bir.lik1,gr = nll_grad, method = "BFGS", w=x) #MLE of alpha and beta from the original sample alphahat=AA[[1]][1] batahat=AA[[1]][2] #Call BCIFUNC BCIG1<-BCIFUNC(alphahat,batahat,n,AAlpha,BBataa) #bootstrap CBC parameter for alpha and beta alphastar11<-BCIG1[1:500] Betastar<-BCIG1[501:1000] #For Variance #Since variance is a function of alpha and beta # since alpha, beta>0 then we have to check the CBC parametric bootstrap estimator of alpha,beta >0 if alpha or beta<0 then set alpha=0 and beta=0 for (v in 1:500) { if ( (Betastar[v]<=0) | (alphastar11[v]<=0) ) { Betad[v]=0 Alad[v]=0 } else { Betad[v]=Betastar[v] Alad[v]=alphastar11[v] } } # delete alpha=0 and beta=0 , we only consider alpha>0 and beta >0 AApha <- Alad[!Alad %in% 0] BBeta <- Betad[!Betad %in% 0] #Compute Percentile Perhat=(BBeta/4)*((AApha*z.p+sqrt((AApha^2)*(z.p^2)+4))^2) #----------------------------End BCI------------------------------------------------------------------------------------ #-------------------------------------------------BayCI and HPD---------------------------------------------------- #Call RatioOfUniformsBS PerBays<-RatioOfUniformsBS(x, a1, b1, a2, b2, r, M) #compute the HPD interval HPDPer=hdi(PerBays,0.95) #---------------------------------End BayCI and HPD----------------------------------------------------------------- #calculate lower, upper bound and length for GCI LGCIPer[i]=quantile(GCIPercentile,prob = 0.025) UGCIPer[i]=quantile(GCIPercentile,prob = 0.975) AVGCIPer[i]=UGCIPer[i]-LGCIPer[i] #calculate lower, upper bound and length for BCI LBCIPer[i]=quantile(Perhat,prob = 0.025) UBCIPer[i]=quantile(Perhat,prob = 0.975) AVBCIPer[i]=UBCIPer[i]-LBCIPer[i] #calculate lower, upper bound and length for BayCI LBayPer[i]=quantile(PerBays,prob = 0.025) UBayPer[i]=quantile(PerBays,prob = 0.975) AVBayPer[i]=UBayPer[i]-LBayPer[i] #calculate lower, upper bound and length for HPD LHPDPer[i]=HPDPer[1] UHPDPer[i]=HPDPer[2] AVHPDPer[i]=UHPDPer[i]-LHPDPer[i] #for coverage probability if ( LGCIPer[i]<=PerR & PerR<=UGCIPer[i] ) { #for GCI cpGCIPer[i]=1 } else {cpGCIPer[i]=0} if ( LBCIPer[i]<=PerR & PerR<=UBCIPer[i] ) { #for BCI cpBCIPer[i]=1 } else {cpBCIPer[i]=0} if ( LBayPer[i]<=PerR & PerR <=UBayPer[i] ) { #for BayCI cpBayPer[i]=1 } else {cpBayPer[i]=0} if ( LHPDPer[i]<=PerR & PerR <=UHPDPer[i] ) { #for HPD cpHPDPer[i]=1 } else {cpHPDPer[i]=0} } AverLengthGCIPer=mean(AVGCIPer) #calculate the average length for GCI CoveragePGCIPer=mean(cpGCIPer) AverLengthBCIPer=mean(AVBCIPer) #calculate the average length for BCI CoveragePBCIPer=mean(cpBCIPer) AverLengthBayPer=mean(AVBayPer) #calculate the average length for BayCI CoveragePBayPer=mean(cpBayPer) AverLengthHPDPer=mean(AVHPDPer) #calculate the average length for HPD CoveragePHPDPer=mean(cpHPDPer) AVPer=c(AverLengthGCIPer, AverLengthBCIPer, AverLengthBayPer, AverLengthHPDPer) CPPer=c(CoveragePGCIPer, CoveragePBCIPer, CoveragePBayPer, CoveragePHPDPer) print(AVPer) print(CPPer) toc()