count=read.csv("C:/MacLaren_PowerAnalysis_Count_Data.csv") #import occurrence dataset str(count) # date = day and year (YYYYMMDD) # minutes = time of 5 minute interval (range 0-1440) # HT = count of Houston Toad detections within each 5 minute interval # HT_bino = binomial Houston Toad occurrence # day = 3 digit day of month (MDD) weather=read.csv(file.choose()) #import environmental dataset str(weather) # date = day and year (YYYYMMDD) # hour = hour in day (range 1-24) # temp = temperature (degrees Celcius) # hum = percent humidity # wind = wind speed (kmph) # pressure = barometric pressure (mmHg) # precip = accumulated rainfall (mm) # moon = percent of moon illuminated # d_p24 = change in barometric pressure since 24 hours previous #####Trim Dataset##### febapr=count[count$day>200 & count$day<500,] #february through april dates only dates=unique(febapr$date) #dates in the febapr dataset, this insures no date is repeatedly drawn #Generate 1000 Seasons of Calling Activity N=1000 ran.mat=matrix(NA,nrow=length(dates),ncol=N) #Matrix used to track samples and increase survey duration HT.mat=matrix(NA,nrow=length(dates),ncol=N) #Matrix of presence/absence for(j in 1:N){ ran.sam=NULL for(i in 1:length(dates)){ x=sample(which(count$date==dates[i]),1) ran.sam=c(ran.sam,x)} #vector of row numbers to draw from occurrence data ran.mat[,j]=ran.sam HT.mat[,j]=count[ran.mat[,j],"HT_bino"]} #Sample Simulated Seasons at different lengths of survey duration out.test=array(dim=c(length(dates),N,12)) out.test[,,1]=HT.mat for(k in 2:12){ x=ran.mat+(k-1)#tap dateXminute matrix up by 1 5-min interval y=matrix(NA,nrow=length(dates),ncol=N) for(i in 1:N){ y[,i]=count[x[,i],"HT_bino"]} out.test[,,k]=y} #now this is an array of 1's and 0's that SHOULD be able to be summed to reflect longer surveys. out.test2=array(dim=c(length(dates),N,12)) dim(out.test2) out.test2[,,1]=out.test[,,1] out.test2[,,2]=out.test[,,1]+out.test[,,2] out.test2[,,3]=out.test[,,1]+out.test[,,2]+out.test[,,3] out.test2[,,4]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4] out.test2[,,5]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5] out.test2[,,6]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6] out.test2[,,7]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7] out.test2[,,8]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8] out.test2[,,9]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9] out.test2[,,10]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9]+out.test[,,10] out.test2[,,11]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9]+out.test[,,10]+out.test[,,11] out.test2[,,12]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9]+out.test[,,10]+out.test[,,11]+out.test[,,12] #calculate detection probability. out=matrix(NA,nrow=N,ncol=12) for(i in 1:12){ for(j in 1:N){ y=sum(out.test2[,j,i]>0)/length(dates) out[j,i]=y}} #calculate confidence bounds low_p=rep(NA,12) for(i in 1:12){low_p[i]=as.numeric(quantile(out[,i],0.025,na.rm=T))} mean_p=rep(NA,12) for(i in 1:12){mean_p[i]=mean(out[,i],na.rm=T)} high_p=rep(NA,12) for(i in 1:12){high_p[i]=as.numeric(quantile(out[,i],0.975,na.rm=T))} p_det=data.frame(mins=seq(5,60,5),low_p,mean_p,high_p) n.surveys=data.frame(mins=seq(5,60,5),low=rep(NA,12),mean=rep(NA,12),high=rep(NA,12)) for(i in 1:12){ n.surveys[i,"low"]=(log(0.05))/(log(1-p_det$low_p[i])) n.surveys[i,"high"]=(log(0.05))/(log(1-p_det$high_p[i])) n.surveys[i,"mean"]=(log(0.05))/(log(1-p_det$mean_p[i]))} p_random=p_det #detection probability for randomized surveys n.surveys_random=n.surveys #no of surveys required to be 95% confidence in absence #USFWS 2007 PROTOCOL######################################################################################################################## #Restrict Environmental to reflect USFWS 2007 guidelines USFWS<-weather[weather$temp > 14 & weather$hum > 70 & weather$wind < 24 & weather$moon > 0.5,] USFWS$year=as.numeric(substr(USFWS$date,1,4)) #create column of "year" USFWS$day=as.numeric(substr(USFWS$date,5,8)) #creates column of "day" USFWS=USFWS[USFWS$day>200 & USFWS$day<500,] #restrict to feb-april USFWS_dates=na.exclude(unique(USFWS$date)) USFWS_toads=NULL for(i in 1:length(USFWS_dates)){ x=febapr[which(febapr$date == USFWS_dates[i]),] USFWS_toads=rbind(USFWS_toads,x)} #UPDATE "dates" to reflect new survey dates dates=unique(USFWS_toads$date) #Generate 1000 Seasons of Calling Activity N=1000 ran.mat=matrix(NA,nrow=length(dates),ncol=N) #Matrix used to track samples and increase survey duration HT.mat=matrix(NA,nrow=length(dates),ncol=N) #Matrix of presence/absence for(j in 1:N){ ran.sam=NULL for(i in 1:length(dates)){ x=sample(which(count$date==dates[i]),1) ran.sam=c(ran.sam,x)} #vector of row numbers to draw from occurrence data ran.mat[,j]=ran.sam HT.mat[,j]=count[ran.mat[,j],"HT_bino"]} #Sample Simulated Seasons at different lengths of survey duration out.test=array(dim=c(length(dates),N,12)) out.test[,,1]=HT.mat for(k in 2:12){ x=ran.mat+(k-1)#tap dateXminute matrix up by 1 5-min interval y=matrix(NA,nrow=length(dates),ncol=N) for(i in 1:N){ y[,i]=count[x[,i],"HT_bino"]} out.test[,,k]=y} #now this is an array of 1's and 0's that SHOULD be able to be summed to reflect longer surveys. out.test2=array(dim=c(length(dates),N,12)) dim(out.test2) out.test2[,,1]=out.test[,,1] out.test2[,,2]=out.test[,,1]+out.test[,,2] out.test2[,,3]=out.test[,,1]+out.test[,,2]+out.test[,,3] out.test2[,,4]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4] out.test2[,,5]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5] out.test2[,,6]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6] out.test2[,,7]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7] out.test2[,,8]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8] out.test2[,,9]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9] out.test2[,,10]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9]+out.test[,,10] out.test2[,,11]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9]+out.test[,,10]+out.test[,,11] out.test2[,,12]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9]+out.test[,,10]+out.test[,,11]+out.test[,,12] #calculate detection probability. out=matrix(NA,nrow=N,ncol=12) for(i in 1:12){ for(j in 1:N){ y=sum(out.test2[,j,i]>0)/length(dates) out[j,i]=y}} #calculate confidence bounds low_p=rep(NA,12) for(i in 1:12){low_p[i]=as.numeric(quantile(out[,i],0.025,na.rm=T))} mean_p=rep(NA,12) for(i in 1:12){mean_p[i]=mean(out[,i],na.rm=T)} high_p=rep(NA,12) for(i in 1:12){high_p[i]=as.numeric(quantile(out[,i],0.975,na.rm=T))} p_det=data.frame(mins=seq(5,60,5),low_p,mean_p,high_p) n.surveys=data.frame(mins=seq(5,60,5),low=rep(NA,12),mean=rep(NA,12),high=rep(NA,12)) for(i in 1:12){ n.surveys[i,"low"]=(log(0.05))/(log(1-p_det$low_p[i])) n.surveys[i,"high"]=(log(0.05))/(log(1-p_det$high_p[i])) n.surveys[i,"mean"]=(log(0.05))/(log(1-p_det$mean_p[i]))} p_USFWS=p_det #det. prob n_surveys_USFWS=n.surveys #bounds #NEW PROTOCOL######################################################################################################################################### NEW<-weather[weather$temp > 16 & weather$precip > 0 & weather$d_p24 < -0.07,] #enact new protocols NEW_dates=na.exclude(unique(NEW$date)) NEW_toads=NULL for(i in 1:length(NEW_dates)){ x=febapr[which(febapr$date == NEW_dates[i]),] NEW_toads=rbind(NEW_toads,x)} NEW$year=as.numeric(substr(NEW$date,1,4)) #create column of "year" NEW$day=as.numeric(substr(NEW$date,5,8)) #creates column of "day" NEW=NEW[NEW$day>200 & NEW$day<500,] #restrict to feb-april #UPDATE code to draw from new dates. dates=unique(NEW_toads$date) #Generate 1000 Seasons of Calling Activity N=1000 ran.mat=matrix(NA,nrow=length(dates),ncol=N) #Matrix used to track samples and increase survey duration HT.mat=matrix(NA,nrow=length(dates),ncol=N) #Matrix of presence/absence for(j in 1:N){ ran.sam=NULL for(i in 1:length(dates)){ x=sample(which(count$date==dates[i]),1) ran.sam=c(ran.sam,x)} #vector of row numbers to draw from occurrence data ran.mat[,j]=ran.sam HT.mat[,j]=count[ran.mat[,j],"HT_bino"]} #Sample Simulated Seasons at different lengths of survey duration out.test=array(dim=c(length(dates),N,12)) out.test[,,1]=HT.mat for(k in 2:12){ x=ran.mat+(k-1)#tap dateXminute matrix up by 1 5-min interval y=matrix(NA,nrow=length(dates),ncol=N) for(i in 1:N){ y[,i]=count[x[,i],"HT_bino"]} out.test[,,k]=y} #now this is an array of 1's and 0's that SHOULD be able to be summed to reflect longer surveys. out.test2=array(dim=c(length(dates),N,12)) dim(out.test2) out.test2[,,1]=out.test[,,1] out.test2[,,2]=out.test[,,1]+out.test[,,2] out.test2[,,3]=out.test[,,1]+out.test[,,2]+out.test[,,3] out.test2[,,4]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4] out.test2[,,5]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5] out.test2[,,6]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6] out.test2[,,7]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7] out.test2[,,8]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8] out.test2[,,9]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9] out.test2[,,10]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9]+out.test[,,10] out.test2[,,11]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9]+out.test[,,10]+out.test[,,11] out.test2[,,12]=out.test[,,1]+out.test[,,2]+out.test[,,3]+out.test[,,4]+out.test[,,5]+out.test[,,6]+out.test[,,7]+out.test[,,8]+out.test[,,9]+out.test[,,10]+out.test[,,11]+out.test[,,12] #calculate detection probability. out=matrix(NA,nrow=N,ncol=12) for(i in 1:12){ for(j in 1:N){ y=sum(out.test2[,j,i]>0)/length(dates) out[j,i]=y}} #calculate confidence bounds low_p=rep(NA,12) for(i in 1:12){low_p[i]=as.numeric(quantile(out[,i],0.025,na.rm=T))} mean_p=rep(NA,12) for(i in 1:12){mean_p[i]=mean(out[,i],na.rm=T)} high_p=rep(NA,12) for(i in 1:12){high_p[i]=as.numeric(quantile(out[,i],0.975,na.rm=T))} p_det=data.frame(mins=seq(5,60,5),low_p,mean_p,high_p) n.surveys=data.frame(mins=seq(5,60,5),low=rep(NA,12),mean=rep(NA,12),high=rep(NA,12)) for(i in 1:12){ n.surveys[i,"low"]=(log(0.05))/(log(1-p_det$low_p[i])) n.surveys[i,"high"]=(log(0.05))/(log(1-p_det$high_p[i])) n.surveys[i,"mean"]=(log(0.05))/(log(1-p_det$mean_p[i]))} p_NEW=p_det #det. prob n_surveys_NEW=n.surveys #bounds #results# p_random p_USFWS p_NEW n_surveys_random n_surveys_USFWS n_surveys_NEW