library(tidyverse)
#> -- Attaching packages --------------------------------------- tidyverse 1.3.0 --
#> v ggplot2 3.3.3 v purrr 0.3.4
#> v tibble 3.1.0 v dplyr 1.0.5
#> v tidyr 1.1.3 v stringr 1.4.0
#> v readr 1.4.0 v forcats 0.5.1
#> -- Conflicts ------------------------------------------ tidyverse_conflicts() --
#> x dplyr::filter() masks stats::filter()
#> x dplyr::lag() masks stats::lag()
library(RLA)Potential Biological Removal (Wade 1998) is a management procedure to set limits to anthropogenic removals. The operation model behind PBR is a deterministic Pella-Tomlinson process, which is emulated in function pellatomlinson_pbr. The specifications below correspond to the base case scenario of Wade (1998).
hp <- pellatomlinson_pbr(depletion0 = 0.3,
K = 1E5,
MNPL = 0.5,
Rmax = 0.04,
catches = floor(runif(30, 5e1, 1e2)),
CV = 0.2,
seed_id = 20210306
)
#> No burn-in initial depletion level: 30 % of carrying capacityWe’ve simulated a population of \(\frac{30}{100} \times \mathrm{K} = \frac{30}{100} \times 100000 = 30000\) animals, of which between \(50\) and \(100\) are removed annually because of human activities. The maximum population growth rate is \(\mathrm{R_{max}} = 4\)% and the \(\mathrm{MNPL} = 50\)% of the carrying capacity \(\mathrm{K}\).
data.frame(time = rep(0:length(hp$removals), 2),
y = c(hp$depletion, 0, hp$removals/hp$K),
response = rep(c("depletion", "removal_rate"), each = length(hp$depletion))
) %>%
ggplot(aes(x = time, y = y, color = response)) +
geom_line() +
facet_wrap(~response, scales = "free_y") +
guides(color = "none") +
theme_bw()In this simulation, the population is starting to recover. However, we can change \(\mathrm{K}\) to be only \(10000\) and see what this changes:
hp <- pellatomlinson_pbr(depletion0 = 0.3,
K = 1E4,
MNPL = 0.5,
Rmax = 0.04,
catches = floor(runif(30, 5E1, 1E2)),
CV = 0.2,
seed_id = 20210306
)
#> No burn-in initial depletion level: 30 % of carrying capacity
data.frame(time = rep(0:length(hp$removals), 2),
y = c(hp$depletion, 0, hp$removals/hp$K),
response = rep(c("depletion", "removal_rate"), each = length(hp$depletion))
) %>%
ggplot(aes(x = time, y = y, color = response)) +
geom_line() +
facet_wrap(~response, scales = "free_y") +
guides(color = "none") +
theme_bw()The removals are now representing a larger fraction of the total population, and the population is not recovering.
In order to tune PBR to a conservation objective, you need to assess many scenarios, each with several simulations. This is illustrated below. We first need to generate several simulations, here \(100\).
n_sim <- 100
set.seed(20210306)
scenario1 <- lapply(1:n_sim,
function(i) {
pellatomlinson_pbr(depletion = 0.3,
K = 1E4,
MNPL = 0.5,
Rmax = 0.04,
CV = 0.2,
verbose = FALSE
)
}
)These can now be used with the function forward_pbr. Here we’ll start by evaluating the \(20\)% quantile of the best available abundance estimate (simulated as part of the function pellatomlinson_pbr):
q20 <- lapply(scenario1,
forward_pbr,
frequency = 4,
bycatch_variation_cv = 0.3,
distribution = "truncnorm",
F_r = 1.0,
q = 0.2
)The \(\mathrm{R_{max}}\) argument is left unspecified as, by default, it is \(4\)%, as appropriate for cetaceans (Wade 1998). The different population trajectories associated with this choice of a quantile can easily be visualized:
trajectory <- do.call('rbind',
lapply(1:length(q20), function(i) { q20[[i]]$depletion })
)
ggplot() +
geom_hline(yintercept = 0.8, linetype = "dotted", color = "tomato", size = 1) +
geom_line(data = trajectory,
aes(x = time, y = depletion, group = factor(seed * quantile), color = quantile), alpha = 0.1
) +
geom_line(data = trajectory %>%
group_by(time, quantile) %>%
summarize(mean = mean(depletion),
lower = as.numeric(quantile(depletion, probs = 0.025)),
upper = as.numeric(quantile(depletion, probs = 0.975))
),
aes(x = time, y = mean, group = quantile, color = quantile), size = 1
) +
scale_y_continuous(name = "Depletion (as % of K)",
breaks = seq(0.0, 1.0, 0.1),
labels = 100 * seq(0.0, 1.0, 0.1)
) +
ggtitle("Rmax = 4%, Quantile = 20%") +
guides(color = "none") +
theme_bw()
#> `summarise()` has grouped output by 'time'. You can override using the `.groups` argument.This figure is very similar to Figure 2D in Wade (1998) as it should be given that the same specifications are used. The point of PBR is to test several quantiles to tune the procedure.
quantile2evaluate <- seq(0.1, 0.9, 0.1)
tuning <- lapply(quantile2evaluate,
function(q) {
lapply(scenario1,
forward_pbr,
frequency = 4,
bycatch_variation_cv = 0.3,
distribution = "truncnorm",
F_r = 1.0,
q = q
)
}
)
trajectory <- do.call('rbind',
lapply(1:length(tuning), function(l) {
do.call('rbind',
lapply(1:length(tuning[[l]]), function(i) { tuning[[l]][[i]]$depletion })
)
})
)
ggplot() +
geom_hline(yintercept = 0.8, linetype = "dotted", color = "tomato", size = 1) +
geom_line(data = trajectory %>%
group_by(time, quantile) %>%
summarize(mean = mean(depletion),
lower = as.numeric(quantile(depletion, probs = 0.025)),
upper = as.numeric(quantile(depletion, probs = 0.975))
),
aes(x = time, y = mean, group = quantile, color = quantile), size = 1
) +
scale_y_continuous(name = "Depletion (as % of K)",
breaks = seq(0.0, 1.0, 0.1),
labels = 100 * seq(0.0, 1.0, 0.1)
) +
ggtitle("Rmax = 4%") +
theme_bw()
#> `summarise()` has grouped output by 'time'. You can override using the `.groups` argument.For example, in this case, none of the quantile allows the population to recover to \(80\)% of \(\mathrm{K}\) over the next \(100\) years. This is a simple scenario, which can be elaborated upon: we’ll simulate now a catastrophic event that will happen once in those \(100\) years and that will kill immediately \(20\)% of the population.
tuning <- lapply(quantile2evaluate,
function(q) {
lapply(scenario1,
forward_pbr,
frequency = 4,
bycatch_variation_cv = 0.3,
distribution = "truncnorm",
catastrophe = 0.2,
F_r = 1.0,
q = q
)
}
)
trajectory <- do.call('rbind',
lapply(1:length(tuning), function(l) {
do.call('rbind',
lapply(1:length(tuning[[l]]), function(i) { tuning[[l]][[i]]$depletion })
)
})
)
ggplot() +
geom_hline(yintercept = 0.8, linetype = "dotted", color = "tomato", size = 1) +
geom_line(data = trajectory %>%
group_by(time, quantile) %>%
summarize(mean = mean(depletion),
lower = as.numeric(quantile(depletion, probs = 0.025)),
upper = as.numeric(quantile(depletion, probs = 0.975))
),
aes(x = time, y = mean, group = quantile, color = quantile), size = 1
) +
scale_y_continuous(name = "Depletion (as % of K)",
breaks = seq(0.0, 1.0, 0.1),
labels = 100 * seq(0.0, 1.0, 0.1)
) +
ggtitle("Rmax = 4%, Catastrophic event\nwiping out 20% of animals") +
theme_bw()
#> `summarise()` has grouped output by 'time'. You can override using the `.groups` argument.Here the catastrophe is happening always at the same moment across the different choice of quantiles for a given simulation in scenario1 because we’ve left the arg. random = FALSE in forward_pbr. This allows to control the seed for the simulations across the different choices of a quantile, and thus it is like playing the exact same simulations with only the choice of quantile that is different. This can be turned off by setting random = TRUE in forward_pbr.
tuning <- lapply(quantile2evaluate,
function(q) {
lapply(scenario1,
forward_pbr,
frequency = 4,
bycatch_variation_cv = 0.3,
distribution = "truncnorm",
catastrophe = 0.2,
F_r = 1.0,
q = q,
random = TRUE
)
}
)
trajectory <- do.call('rbind',
lapply(1:length(tuning), function(l) {
do.call('rbind',
lapply(1:length(tuning[[l]]), function(i) { tuning[[l]][[i]]$depletion })
)
})
)
ggplot() +
geom_hline(yintercept = 0.8, linetype = "dotted", color = "tomato", size = 1) +
geom_line(data = trajectory %>%
group_by(time, quantile) %>%
summarize(mean = mean(depletion),
lower = as.numeric(quantile(depletion, probs = 0.025)),
upper = as.numeric(quantile(depletion, probs = 0.975))
),
aes(x = time, y = mean, group = quantile, color = quantile), size = 1
) +
scale_y_continuous(name = "Depletion (as % of K)",
breaks = seq(0.0, 1.0, 0.1),
labels = 100 * seq(0.0, 1.0, 0.1)
) +
ggtitle("Rmax = 4%, Catastrophic event\nwiping out 20% of animals") +
theme_bw()
#> `summarise()` has grouped output by 'time'. You can override using the `.groups` argument.The average curves do not look very different but the individual trajectories are:
ggplot() +
geom_hline(yintercept = 0.8, linetype = "dotted", color = "tomato", size = 1) +
geom_line(data = trajectory,
aes(x = time, y = depletion, group = factor(seed * quantile), color = quantile), alpha = 0.05
) +
geom_line(data = trajectory %>%
group_by(time, quantile) %>%
summarize(mean = mean(depletion),
lower = as.numeric(quantile(depletion, probs = 0.025)),
upper = as.numeric(quantile(depletion, probs = 0.975))
),
aes(x = time, y = mean, group = quantile, color = quantile), size = 1
) +
scale_y_continuous(name = "Depletion (as % of K)",
breaks = seq(0.0, 1.0, 0.1),
labels = 100 * seq(0.0, 1.0, 0.1)
) +
ggtitle("Rmax = 4%, Catastrophic event\nwiping out 20% of animals") +
guides(color = "none") +
facet_wrap(~ quantile, ncol = 3) +
theme_bw()
#> `summarise()` has grouped output by 'time'. You can override using the `.groups` argument.This also shows that the forward_* functions in the package are systematically simulating a catastrophic event at the end of a \(4\) years cycle (the jagged pattern every \(4\) years; \(4\) years was chosen to macth the base case of Wade 1998). This is because it was convenient to code and easy to do so, and tweaking it would not make a big difference.
New simulations can thus easily be carried out, and tuning to new management objectives is straightforward.