#'=================================================================================================# # # #' "Creating Home ranges" # #'=================================================================================================# #' NB!!!! Set the working directory setwd("C:/Shambala/data/") #' MAKE SURE THE WORKING DIRECTORY IS CORRECT i.e. [1] "C:/R_course/Day3/data" getwd() #'------------------------------------- Load Packages ----------------------------------------# library(adehabitatLT) library(lattice) library(ggplot2) library(extrafont) library(plyr) library(colorspace) library(lubridate) library(rgdal) library(stringr) library(adehabitatHR) library(move) #'----------------------------------- 1- Import Data ----------------------------------------# #' Import .csv data (Columns: animal, lon, lat, timestamp (or date, and time) #' thanda_cheetah_clean.csv or kruger_buffalo_clean.csv data_tracking <- read.csv("data_clean.csv") #' Import the study area boundry .shp file #' The dsn must not end in '/' #' Thanda_boundary or kruger study_area <- readOGR(dsn = '.', layer = 'kruger') #'----------------------------- 2- Prepare the Imported Data ----------------------------------# #' Set timestamps to correct timezone data_tracking$timestamp <- as.POSIXct(data_tracking$timestamp, tz="Africa/Johannesburg") #'------------------------------ 3- Choose Temporal Scale ----------------------------------------### #' Choose a temporal scale to calculate the home ranges based on the question/s you want to answer # #' and your data i.e. do you want an annual, seasonal, monthly, weekly or daily ranges? # #' # #' 1) Code adds year to each of the time frames, because we don't want to calculate a range for # #' months/days/weeks using locations from different years; # #' # #' 2) Change/Update the months you want to assign to a particular season; # #' # #'------------------------------------------ RUN AS IS ---------------------------------------------# #' All data_tracking$Total <- paste(data_tracking$animal,sep="") #' WEEK data_tracking$Lweek<- paste(data_tracking$animal,year(data_tracking$timestamp), str_pad(week(data_tracking$timestamp),2,pad="0"),sep="_") #' DAY data_tracking$Lday <- paste(data_tracking$animal,year(data_tracking$timestamp), str_pad(yday(data_tracking$timestamp),3,pad="0"),sep="_") #' SEASON is a bit more complicated as the wet season falls over two years, so Jan and Feb need #' to have year-1 #' Add a season column to the data frame data_tracking$Season<-as.character(NA) #' Update the season column with the correct season (change values to correct months if neccessary) #' NB. This is another reason why it is important to choose the correct collaring dates data_tracking$Lmonth<-month(data_tracking$timestamp) data_tracking$Lyear<-year(data_tracking$timestamp) #' WET data_tracking$Season[month(data_tracking$timestamp) %in% c(11,12)]<-paste(data_tracking$animal[data_tracking$Lmonth %in% c(11,12)], data_tracking$Lyear[data_tracking$Lmonth %in% c(11,12)],"w",sep="_") data_tracking$Season[month(data_tracking$timestamp) %in% c(1,2)] <-paste(data_tracking$animal[data_tracking$Lmonth %in% c(1,2)], as.numeric(data_tracking$Lyear[data_tracking$Lmonth %in% c(1,2)])-1,"w",sep="_") #' DRY data_tracking$Season[month(data_tracking$timestamp) %in% c(5,6,7,8)]<-paste(data_tracking$animal[data_tracking$Lmonth %in% c(5,6,7,8)],data_tracking$Lyear[data_tracking$Lmonth %in% c(5,6,7,8)],"d",sep="_") #' FALL data_tracking$Season[month(data_tracking$timestamp) %in% c(3,4)]<- paste(data_tracking$animal[data_tracking$Lmonth %in% c(3,4)],data_tracking$Lyear[data_tracking$Lmonth %in% c(3,4)],"f",sep="_") #' SPRING data_tracking$Season[month(data_tracking$timestamp) %in% c(09,10)]<-paste(data_tracking$animal[month(data_tracking$timestamp) %in% c(9,10)],data_tracking$Lyear[data_tracking$Lmonth %in% c(9,10)],"s",sep="_") #' MONTH, extract year month and save to data frame data_tracking$Lmonth<-paste(data_tracking$animal,data_tracking$Lyear, str_pad(month(data_tracking$timestamp),2,pad="0"),sep="_") #' ANNUAL, here we extract the year of each location and save to the data frame data_tracking$Lyear <-paste(data_tracking$animal, year(data_tracking$timestamp),sep="_") #' Make sure it is correct head(data_tracking) #'-------------------------------------- END RUN AS IS ------------------------------------------# #' The above code added extra columns to the data_tracking table so that it can be divided temporally #' Make sure it is correct, example below head(data_tracking) #' Total Lyear Lweek Lday Season Lmonth #' Cilla Cilla_2005 Cilla_2005_28 Cilla_2005_195 Cilla_2005_dry Cilla_2005_07 #'------------------------------------ + User Input ---------------------------------------# #' Specify temporal scale - NB!! after running this line you need to type in an option below user<-menu(c("Year", "Season", "Month", "Week", "Day", "Total") , title="What temporal scale do you want to create ranges for?") id<-NA #' Creates unique labels to subset the data and create ranges with if(user==1){ subset<-unique(data_tracking$Lyear) id<-"Lyear" }else if(user==2){ subset<-unique(data_tracking$Season) id<-"Season" }else if(user==3){ subset<-unique(data_tracking$Lmonth) id<-"Lmonth" }else if(user==4){ subset<-unique(data_tracking$Lweek) id<-"Lweek" }else if(user==5){ subset<-unique(data_tracking$Lday) id<-"Lday" }else if(user==6){ subset<-unique(data_tracking$Total) id<-"Total" } #' Summary of the home ranges that will be made, make sure these data are correct before continuing paste(length(subset), " ranges will be calculated") #'----------------------------- 4- Create SpatialPointsDataFrame -----------------------------# #' For all the methods, we need the GPS locations stored in an object of class SpatialPointsDataFrame #' Takes the Longitude and Latitude from data and saves it as a SpatialPoints data.frame with a projection sp_latlong <- SpatialPoints(data_tracking[, c("lon", "lat")],proj4string=CRS("+proj=longlat +ellps=WGS84")) #' Project to real world coordinates sp_proj <- spTransform(sp_latlong, CRS("+init=epsg:32736")) #' Adds the ids to the SpatialPointsDataFrame depending on the spatial scale you chose sp_proj$id <- data_tracking[,colnames(data_tracking)==id] #' Add original EID for later reference sp_proj$name<-data_tracking$animal #' Add date to the data frame sp_proj$Date<-data_tracking$timestamp #' Double check the data frame head(sp_proj) summary(sp_proj) #' Transform study_area or any other shapefiles you may want to plot #study_area_proj<- spTransform(study_area, proj4string(sp_proj)) #'------------------------------ 5- Calculating Home Ranges ---------------------------------------# #--------------------------------------------------------------------------------------------------# #' We can now create home ranges based on any of the above temporals "scales", we are going to # #' start off with calculating the most simple MCP and end off with calculating HR using a # #' dynamic Brownian bridge movement model. # #--------------------------------------------------------------------------------------------------# ####---------------------------- 5.1- Minimum Convex Polygons ----------------------------------#### #' The MCP is probably the most widely used estimation method. This method calculates the smallest #' convex polygon enclosing all the relocations of the animal. The polygon is considered to be the #' home range of the animal. Because the home range has been defined as the area traversed by the #' animal during its normal activities of foraging, mating and caring for young (Burt, 1943), #' a common operation consists to first remove a small percentage of the relocations farthest from #' the centroid of the cloud of relocations before the estimation. Indeed, the animal may sometimes #' make occasionnal large moves to unusual places outside its home range, and this results into #' outliers that cannot be considered as "normal activities" #' #' Rather imprecise deffinition of a HR: "that area traversed by the animal during its normal #' activities of food gathering, mating and caring for young" (Burt, 1943). What is an area #' traversed? what is normal activity? #' #' Because the MCP is so coarse, I would not consider a spatial scale smaller than a season #' (check using boostrapping) #' #' !!NB. Despite it being widely used, I would suggest using newer methods that make less assumptions #' Create MCP for each unique ID as defined by the spatial scale above #' percent = how many points you want to retain, usiually = 95 mcp<-mcp(sp_proj[,1],percent =95) #' Can plot the results plot(mcp, col=rainbow(length(mcp))) #plot(study_area_proj, add=T, lwd=3) #' Write the results to a shapeFile writeOGR(mcp, ".", "mcp_95", driver="ESRI Shapefile") #' We can see how HR area would have changed if we used a different percent hrs<-mcp.area(sp_proj[,1], percent=seq(50,100,by=5)) #' We can use the boostrap MCP function from the move package to determine if there were sufficient #' points for each MCP #' #' Note: Ideally you want the area to reach an asymptote in HR size #' if lots of time steps, it will struggle to run due to figure margins #' Add projected locations to data frame locs_move <- as.data.frame(sp_proj@coords) data_tracking$x <- locs_move$lon data_tracking$y <- locs_move$lat data_tracking$id <- data_tracking[,colnames(data_tracking)==id] #' Create Move object obj_move <- move(x=data_tracking$x, y=data_tracking$y, time=data_tracking$timestamp, data=data_tracking,proj=CRS("+init=epsg:32736") , animal=data_tracking$animal) #' Bootstrap, depending on the number of time steps this can take a long time to run hrBootstrap(obj_move, rep=100, unin="km", unout="km2") ####---------------- 5.2- The kernal estimation and the utilisation distribution----------------#### #' Several authors have proposed to replace the MCP by a more formal model: the utilisation #' distribution (UD, van Winkle, 1975). Under this model, we consider that the animals use of space #' can be described by a bivariate probability density function, the UD, which gives the probability #' density to relocate the animal at any place according to the coordinates (x, y) of this place. #' The study of the space use by an animal could consist in the study of the properties of the utilisation #' distribution. The issue is therefore to estimate the utilisation distribution from the relocation data. #' #' Worton (1989) proposed Kernal method, need to choose an appropriate smoothing parameter (control "width" #' of the kernal function placed over each point): #' 1) Least Square Cross Validation (LSCV) most widely used #' 2) Reference bandwidth (href), assumes bivariate normal distribution (can result in strong oversmoothing) #' 3) Subjective viual choice for a smoothing parameter based on successive trials #' #' I would sugest LSCV to be on the safe side. UNLESS, it does not converge! #' Create a utilisation distribution for each unique ID #' h = is the smoothing parameter, try run using "href" or "LSCV" #' same4all=T, sets the grid the same for all the UDs kud <-kernelUD(sp_proj[,1], h="LSCV", same4all = F, extent = 0.8) #' Check warnings, if did not converge, may not be sufficient locations at this resolution for LSCV kud #' Look at the reults of LCSV, if the results do not converge then we cannot use LCSV. #plotLSCV(kud) #' Plot the UD for all individuals image(kud, extent=0.4) #' Plot the UD for a individual image(kud[[1]], extent=0.4) #'The UD gives the probability density to relocate the animal at a given place. The #' home range deduced from the UD as the minimum area on which the probability #' to relocate the animal is equal to a specified value. For example, the 95% home #' range corresponds to the smallest area on which the probability to relocate the #' animal is equal to 0.95. homerange<-getverticeshr(kud,percent =95) #' Plot the results, NOTE, crosses the boundries of the PA plot(homerange,col=rainbow(length(homerange))) plot(study_area_proj, add=T, lwd=3) #' Write the results to a shapeFile writeOGR(homerange, ".", "kud_95", driver="ESRI Shapefile") #' We can also calculate the home range size for several probability levels homerange<-kernel.area(kud, percent=seq(50, 95, by=5)) homerange ####----------------------------------------- NOTES ------------------------------------------------#### #' There are methods to take into account hard boundries. e.g. Benhamou and Cornelis (2010) #' extended an approach developed for kernel estimation in one dimension: #' this approach consists in calculating the mirror image of the points located near the boundary, and #' adding these "mirror points" to the actual relocations before the kernel estimation. #' #' HOWEVER, The boundry shape needs to be fairly simple, and not to tortuous to work #' #' See help files of adehabitatHR for more details #' #' AND there are better methods than KDE that should overcome (minimise) this problem (dBBMM)